Merge branch 'master' of github.com:kubernetes/kubernetes into branch1

This commit is contained in:
Yinan Li 2017-08-08 12:13:22 -07:00
commit 2c1ada38d0
249 changed files with 8732 additions and 4682 deletions

13
Godeps/Godeps.json generated
View File

@ -1707,14 +1707,6 @@
"ImportPath": "github.com/gorilla/websocket",
"Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844"
},
{
"ImportPath": "github.com/gregjones/httpcache",
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
},
{
"ImportPath": "github.com/gregjones/httpcache/diskcache",
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
},
{
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
"Comment": "v1.1-4-g2500245",
@ -2247,11 +2239,6 @@
"Comment": "v0.3.5-10-g0049ab3",
"Rev": "0049ab3dc4c4c70a9eee23087437b69c0dde2130"
},
{
"ImportPath": "github.com/peterbourgon/diskv",
"Comment": "v2.0.0-2-g5dfcb07",
"Rev": "5dfcb07a075adbaaa4094cddfd160b1e1c77a043"
},
{
"ImportPath": "github.com/pkg/errors",
"Comment": "v0.7.0-13-ga221380",

55
Godeps/LICENSES generated
View File

@ -59441,34 +59441,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/github.com/gregjones/httpcache licensed under: =
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= vendor/github.com/gregjones/httpcache/LICENSE.txt 3cfef421226b2dacde78a4871380ac24 -
================================================================================
================================================================================
= vendor/github.com/gregjones/httpcache/diskcache licensed under: =
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= vendor/github.com/gregjones/httpcache/LICENSE.txt 3cfef421226b2dacde78a4871380ac24 -
================================================================================
================================================================================
= vendor/github.com/grpc-ecosystem/go-grpc-prometheus licensed under: =
@ -71602,33 +71574,6 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/peterbourgon/diskv licensed under: =
Copyright (c) 2011-2012 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
= vendor/github.com/peterbourgon/diskv/LICENSE f9f3e815fc84aa04c4f4db33c553eef9 -
================================================================================
================================================================================
= vendor/github.com/pkg/errors licensed under: =

View File

@ -21339,6 +21339,160 @@
}
]
},
"/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale": {
"get": {
"description": "read scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta1"
],
"operationId": "readAppsV1beta1NamespacedStatefulSetScale",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta1"
}
},
"put": {
"description": "replace scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta1"
],
"operationId": "replaceAppsV1beta1NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta1"
}
},
"patch": {
"description": "partially update scale of the specified StatefulSet",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta1"
],
"operationId": "patchAppsV1beta1NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta1"
}
},
"parameters": [
{
"uniqueItems": true,
"type": "string",
"description": "name of the Scale",
"name": "name",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "object name and auth scope, such as for teams and projects",
"name": "namespace",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "If 'true', then the output is pretty printed.",
"name": "pretty",
"in": "query"
}
]
},
"/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status": {
"get": {
"description": "read status of the specified StatefulSet",
@ -25360,6 +25514,160 @@
}
]
},
"/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale": {
"get": {
"description": "read scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "readAppsV1beta2NamespacedStatefulSetScale",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta2"
}
},
"put": {
"description": "replace scale of the specified StatefulSet",
"consumes": [
"*/*"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "replaceAppsV1beta2NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta2"
}
},
"patch": {
"description": "partially update scale of the specified StatefulSet",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"schemes": [
"https"
],
"tags": [
"apps_v1beta2"
],
"operationId": "patchAppsV1beta2NamespacedStatefulSetScale",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.Scale"
}
},
"401": {
"description": "Unauthorized"
}
},
"x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": {
"group": "apps",
"kind": "Scale",
"version": "v1beta2"
}
},
"parameters": [
{
"uniqueItems": true,
"type": "string",
"description": "name of the Scale",
"name": "name",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "object name and auth scope, such as for teams and projects",
"name": "namespace",
"in": "path",
"required": true
},
{
"uniqueItems": true,
"type": "string",
"description": "If 'true', then the output is pretty printed.",
"name": "pretty",
"in": "query"
}
]
},
"/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status": {
"get": {
"description": "read status of the specified StatefulSet",

View File

@ -2982,6 +2982,171 @@
}
]
},
{
"path": "/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale",
"description": "API at /apis/apps/v1beta1",
"operations": [
{
"type": "v1beta1.Scale",
"method": "GET",
"summary": "read scale of the specified StatefulSet",
"nickname": "readNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta1.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta1.Scale",
"method": "PUT",
"summary": "replace scale of the specified StatefulSet",
"nickname": "replaceNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1beta1.Scale",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta1.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified StatefulSet",
"nickname": "patchNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1.Patch",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta1.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
]
}
]
},
{
"path": "/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status",
"description": "API at /apis/apps/v1beta1",

View File

@ -4338,6 +4338,171 @@
}
]
},
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale",
"description": "API at /apis/apps/v1beta2",
"operations": [
{
"type": "v1beta2.Scale",
"method": "GET",
"summary": "read scale of the specified StatefulSet",
"nickname": "readNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta2.Scale",
"method": "PUT",
"summary": "replace scale of the specified StatefulSet",
"nickname": "replaceNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1beta2.Scale",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"*/*"
]
},
{
"type": "v1beta2.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified StatefulSet",
"nickname": "patchNamespacedStatefulSetScale",
"parameters": [
{
"type": "string",
"paramType": "query",
"name": "pretty",
"description": "If 'true', then the output is pretty printed.",
"required": false,
"allowMultiple": false
},
{
"type": "v1.Patch",
"paramType": "body",
"name": "body",
"description": "",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Scale",
"required": true,
"allowMultiple": false
}
],
"responseMessages": [
{
"code": 200,
"message": "OK",
"responseModel": "v1beta2.Scale"
}
],
"produces": [
"application/json",
"application/yaml",
"application/vnd.kubernetes.protobuf"
],
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json"
]
}
]
},
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status",
"description": "API at /apis/apps/v1beta2",

View File

@ -342,6 +342,7 @@ func NewControllerInitializers() map[string]InitFunc {
func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) {
var discoveryClient discovery.DiscoveryInterface
var healthzContent string
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
@ -352,17 +353,19 @@ func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma
}
healthStatus := 0
client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
resp := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
if healthStatus != http.StatusOK {
glog.Errorf("Server isn't healthy yet. Waiting a little while.")
return false, nil
}
content, _ := resp.Raw()
healthzContent = string(content)
discoveryClient = client.Discovery()
return true, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get api versions from server: %v", err)
return nil, fmt.Errorf("failed to get api versions from server: %v: %v", healthzContent, err)
}
resourceMap, err := discoveryClient.ServerResources()

View File

@ -39,11 +39,6 @@ type MasterConfiguration struct {
Token string
TokenTTL time.Duration
// SelfHosted enables an alpha deployment type where the apiserver, scheduler, and
// controller manager are managed by Kubernetes itself. This option is likely to
// become the default in the future.
SelfHosted bool
APIServerExtraArgs map[string]string
ControllerManagerExtraArgs map[string]string
SchedulerExtraArgs map[string]string

View File

@ -38,11 +38,6 @@ type MasterConfiguration struct {
Token string `json:"token"`
TokenTTL time.Duration `json:"tokenTTL"`
// SelfHosted enables an alpha deployment type where the apiserver, scheduler, and
// controller manager are managed by Kubernetes itself. This option is likely to
// become the default in the future.
SelfHosted bool `json:"selfHosted"`
APIServerExtraArgs map[string]string `json:"apiServerExtraArgs"`
ControllerManagerExtraArgs map[string]string `json:"controllerManagerExtraArgs"`
SchedulerExtraArgs map[string]string `json:"schedulerExtraArgs"`

View File

@ -24,6 +24,7 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/cmd/features:go_default_library",
"//cmd/kubeadm/app/cmd/phases:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/discovery:go_default_library",

View File

@ -31,6 +31,12 @@ const (
// FeatureList represents a list of feature gates
type FeatureList map[utilfeature.Feature]utilfeature.FeatureSpec
// Enabled indicates whether a feature name has been enabled
func Enabled(featureList map[string]bool, featureName utilfeature.Feature) bool {
_, ok := featureList[string(featureName)]
return ok
}
// Supports indicates whether a feature name is supported on the given
// feature set
func Supports(featureList FeatureList, featureName string) bool {

View File

@ -31,6 +31,7 @@ import (
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/features"
cmdphases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
addonsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons"
@ -147,10 +148,6 @@ func NewCmdInit(out io.Writer) *cobra.Command {
&skipTokenPrint, "skip-token-print", skipTokenPrint,
"Skip printing of the default bootstrap token generated by 'kubeadm init'",
)
cmd.PersistentFlags().BoolVar(
&cfg.SelfHosted, "self-hosted", cfg.SelfHosted,
"[experimental] If kubeadm should make this control plane self-hosted",
)
cmd.PersistentFlags().StringVar(
&cfg.Token, "token", cfg.Token,
@ -288,7 +285,7 @@ func (i *Init) Run(out io.Writer) error {
}
// Is deployment type self-hosted?
if i.cfg.SelfHosted {
if features.Enabled(i.cfg.FeatureFlags, features.SelfHosting) {
// Temporary control plane is up, now we create our self hosted control
// plane components and remove the static manifests:
fmt.Println("[self-hosted] Creating self-hosted control plane...")

View File

@ -21,6 +21,7 @@ go_test(
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
],

View File

@ -166,6 +166,9 @@ func componentPod(container v1.Container, volumes []v1.Volume) v1.Pod {
Name: container.Name,
Namespace: metav1.NamespaceSystem,
Annotations: map[string]string{kubetypes.CriticalPodAnnotationKey: ""},
// The component and tier labels are useful for quickly identifying the control plane Pods when doing a .List()
// against Pods in the kube-system namespace. Can for example be used together with the WaitForPodsWithLabel function
Labels: map[string]string{"component": container.Name, "tier": "control-plane"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{container},

View File

@ -26,6 +26,7 @@ import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/yaml"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -179,22 +180,43 @@ func TestComponentProbe(t *testing.T) {
func TestComponentPod(t *testing.T) {
var tests = []struct {
n string
name string
expected v1.Pod
}{
{
n: "foo",
name: "foo",
expected: v1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "kube-system",
Annotations: map[string]string{"scheduler.alpha.kubernetes.io/critical-pod": ""},
Labels: map[string]string{"component": "foo", "tier": "control-plane"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
},
},
HostNetwork: true,
Volumes: []v1.Volume{},
},
},
},
}
for _, rt := range tests {
c := v1.Container{Name: rt.n}
v := []v1.Volume{}
actual := componentPod(c, v)
if actual.ObjectMeta.Name != rt.n {
c := v1.Container{Name: rt.name}
actual := componentPod(c, []v1.Volume{})
if !reflect.DeepEqual(rt.expected, actual) {
t.Errorf(
"failed componentPod:\n\texpected: %s\n\t actual: %s",
rt.n,
actual.ObjectMeta.Name,
"failed componentPod:\n\texpected: %v\n\t actual: %v",
rt.expected,
actual,
)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,7 @@ go_library(
"//federation/pkg/federatedtypes:go_default_library",
"//federation/pkg/federation-controller/cluster:go_default_library",
"//federation/pkg/federation-controller/ingress:go_default_library",
"//federation/pkg/federation-controller/job:go_default_library",
"//federation/pkg/federation-controller/service:go_default_library",
"//federation/pkg/federation-controller/service/dns:go_default_library",
"//federation/pkg/federation-controller/sync:go_default_library",

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/federation/pkg/federatedtypes"
clustercontroller "k8s.io/kubernetes/federation/pkg/federation-controller/cluster"
ingresscontroller "k8s.io/kubernetes/federation/pkg/federation-controller/ingress"
jobcontroller "k8s.io/kubernetes/federation/pkg/federation-controller/job"
servicecontroller "k8s.io/kubernetes/federation/pkg/federation-controller/service"
servicednscontroller "k8s.io/kubernetes/federation/pkg/federation-controller/service/dns"
synccontroller "k8s.io/kubernetes/federation/pkg/federation-controller/sync"
@ -155,6 +156,14 @@ func StartControllers(s *options.CMServer, restClientCfg *restclient.Config) err
}
}
if controllerEnabled(s.Controllers, serverResources, jobcontroller.ControllerName, jobcontroller.RequiredResources, true) {
glog.V(3).Infof("Loading client config for job controller %q", jobcontroller.UserAgentName)
jobClientset := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, jobcontroller.UserAgentName))
jobController := jobcontroller.NewJobController(jobClientset)
glog.V(3).Infof("Running job controller")
go jobController.Run(s.ConcurrentJobSyncs, wait.NeverStop)
}
if controllerEnabled(s.Controllers, serverResources, ingresscontroller.ControllerName, ingresscontroller.RequiredResources, true) {
glog.V(3).Infof("Loading client config for ingress controller %q", ingresscontroller.UserAgentName)
ingClientset := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, ingresscontroller.UserAgentName))

View File

@ -56,6 +56,10 @@ type ControllerManagerConfiguration struct {
// allowed to sync concurrently. Larger number = more responsive service
// management, but more CPU (and network) load.
ConcurrentReplicaSetSyncs int `json:"concurrentReplicaSetSyncs"`
// concurrentJobSyncs is the number of Jobs that are
// allowed to sync concurrently. Larger number = more responsive service
// management, but more CPU (and network) load.
ConcurrentJobSyncs int `json:"concurrentJobSyncs"`
// clusterMonitorPeriod is the period for syncing ClusterStatus in cluster controller.
ClusterMonitorPeriod metav1.Duration `json:"clusterMonitorPeriod"`
// APIServerQPS is the QPS to use while talking with federation apiserver.
@ -96,6 +100,7 @@ func NewCMServer() *CMServer {
ConcurrentServiceSyncs: 10,
ConcurrentReplicaSetSyncs: 10,
ClusterMonitorPeriod: metav1.Duration{Duration: 40 * time.Second},
ConcurrentJobSyncs: 10,
APIServerQPS: 20.0,
APIServerBurst: 30,
LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(),
@ -115,6 +120,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.ServiceDnsSuffix, "service-dns-suffix", s.ServiceDnsSuffix, "DNS Suffix to use when publishing federated service names. Defaults to zone-name")
fs.IntVar(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of service syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
fs.IntVar(&s.ConcurrentReplicaSetSyncs, "concurrent-replicaset-syncs", s.ConcurrentReplicaSetSyncs, "The number of ReplicaSets syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
fs.IntVar(&s.ConcurrentJobSyncs, "concurrent-job-syncs", s.ConcurrentJobSyncs, "The number of Jobs syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
fs.DurationVar(&s.ClusterMonitorPeriod.Duration, "cluster-monitor-period", s.ClusterMonitorPeriod.Duration, "The period for syncing ClusterStatus in ClusterController.")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled")

View File

@ -10,11 +10,16 @@ load(
go_test(
name = "go_default_test",
srcs = ["scheduling_test.go"],
srcs = [
"hpa_test.go",
"scheduling_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//federation/pkg/federation-controller/util/test:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -29,6 +34,7 @@ go_library(
"configmap.go",
"daemonset.go",
"deployment.go",
"hpa.go",
"namespace.go",
"qualifiedname.go",
"registry.go",
@ -48,12 +54,14 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/controller/namespace/deletion:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -40,16 +40,16 @@ func init() {
}
type DeploymentAdapter struct {
*schedulingAdapter
*replicaSchedulingAdapter
client federationclientset.Interface
}
func NewDeploymentAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter {
schedulingAdapter := schedulingAdapter{
schedulingAdapter := replicaSchedulingAdapter{
preferencesAnnotationName: FedDeploymentPreferencesAnnotation,
updateStatusFunc: func(obj pkgruntime.Object, status interface{}) error {
updateStatusFunc: func(obj pkgruntime.Object, schedulingInfo interface{}) error {
deployment := obj.(*extensionsv1.Deployment)
typedStatus := status.(ReplicaSchedulingStatus)
typedStatus := schedulingInfo.(*ReplicaSchedulingInfo).Status
if typedStatus.Replicas != deployment.Status.Replicas || typedStatus.UpdatedReplicas != deployment.Status.UpdatedReplicas ||
typedStatus.ReadyReplicas != deployment.Status.ReadyReplicas || typedStatus.AvailableReplicas != deployment.Status.AvailableReplicas {
deployment.Status = extensionsv1.DeploymentStatus{

View File

@ -0,0 +1,927 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package federatedtypes
import (
"time"
"fmt"
autoscalingv1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
kubeclientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
)
const (
HpaKind = "horizontalpodautoscaler"
HpaControllerName = "horizontalpodautoscalers"
// This is a tunable which does not change replica nums
// on an existing local hpa, before this timeout, if it
// did scale already (avoids thrashing of replicas around).
scaleForbiddenWindow = 5 * time.Minute
// This is used as the default min for hpa object submitted
// to federation, in a situation where the default is for
// some reason not present (Spec.MinReplicas == nil)
hpaMinReplicaDefault = int32(1)
)
func init() {
RegisterFederatedType(HpaKind, HpaControllerName, []schema.GroupVersionResource{autoscalingv1.SchemeGroupVersion.WithResource(HpaControllerName)}, NewHpaAdapter)
}
type HpaAdapter struct {
client federationclientset.Interface
}
func NewHpaAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter {
return &HpaAdapter{client: client}
}
func (a *HpaAdapter) Kind() string {
return HpaKind
}
func (a *HpaAdapter) ObjectType() pkgruntime.Object {
return &autoscalingv1.HorizontalPodAutoscaler{}
}
func (a *HpaAdapter) IsExpectedType(obj interface{}) bool {
_, ok := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return ok
}
func (a *HpaAdapter) Copy(obj pkgruntime.Object) pkgruntime.Object {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: fedutil.DeepCopyRelevantObjectMeta(hpa.ObjectMeta),
Spec: *fedutil.DeepCopyApiTypeOrPanic(&hpa.Spec).(*autoscalingv1.HorizontalPodAutoscalerSpec),
}
}
func (a *HpaAdapter) Equivalent(obj1, obj2 pkgruntime.Object) bool {
return fedutil.ObjectMetaAndSpecEquivalent(obj1, obj2)
}
func (a *HpaAdapter) QualifiedName(obj pkgruntime.Object) QualifiedName {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return QualifiedName{Namespace: hpa.Namespace, Name: hpa.Name}
}
func (a *HpaAdapter) ObjectMeta(obj pkgruntime.Object) *metav1.ObjectMeta {
return &obj.(*autoscalingv1.HorizontalPodAutoscaler).ObjectMeta
}
func (a *HpaAdapter) FedCreate(obj pkgruntime.Object) (pkgruntime.Object, error) {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return a.client.AutoscalingV1().HorizontalPodAutoscalers(hpa.Namespace).Create(hpa)
}
func (a *HpaAdapter) FedDelete(qualifiedName QualifiedName, options *metav1.DeleteOptions) error {
return a.client.AutoscalingV1().HorizontalPodAutoscalers(qualifiedName.Namespace).Delete(qualifiedName.Name, options)
}
func (a *HpaAdapter) FedGet(qualifiedName QualifiedName) (pkgruntime.Object, error) {
return a.client.AutoscalingV1().HorizontalPodAutoscalers(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{})
}
func (a *HpaAdapter) FedList(namespace string, options metav1.ListOptions) (pkgruntime.Object, error) {
return a.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options)
}
func (a *HpaAdapter) FedUpdate(obj pkgruntime.Object) (pkgruntime.Object, error) {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return a.client.AutoscalingV1().HorizontalPodAutoscalers(hpa.Namespace).Update(hpa)
}
func (a *HpaAdapter) FedWatch(namespace string, options metav1.ListOptions) (watch.Interface, error) {
return a.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options)
}
func (a *HpaAdapter) ClusterCreate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return client.AutoscalingV1().HorizontalPodAutoscalers(hpa.Namespace).Create(hpa)
}
func (a *HpaAdapter) ClusterDelete(client kubeclientset.Interface, qualifiedName QualifiedName, options *metav1.DeleteOptions) error {
return client.AutoscalingV1().HorizontalPodAutoscalers(qualifiedName.Namespace).Delete(qualifiedName.Name, options)
}
func (a *HpaAdapter) ClusterGet(client kubeclientset.Interface, qualifiedName QualifiedName) (pkgruntime.Object, error) {
return client.AutoscalingV1().HorizontalPodAutoscalers(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{})
}
func (a *HpaAdapter) ClusterList(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (pkgruntime.Object, error) {
return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options)
}
func (a *HpaAdapter) ClusterUpdate(client kubeclientset.Interface, obj pkgruntime.Object) (pkgruntime.Object, error) {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
return client.AutoscalingV1().HorizontalPodAutoscalers(hpa.Namespace).Update(hpa)
}
func (a *HpaAdapter) ClusterWatch(client kubeclientset.Interface, namespace string, options metav1.ListOptions) (watch.Interface, error) {
return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options)
}
func (a *HpaAdapter) NewTestObject(namespace string) pkgruntime.Object {
var min int32 = 4
var targetCPU int32 = 70
return &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-hpa-",
Namespace: namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
Kind: "replicaset",
Name: "myrs",
},
MinReplicas: &min,
MaxReplicas: int32(10),
TargetCPUUtilizationPercentage: &targetCPU,
},
}
}
func (a *HpaAdapter) IsSchedulingAdapter() bool {
return true
}
func (a *HpaAdapter) EquivalentIgnoringSchedule(obj1, obj2 pkgruntime.Object) bool {
hpa1 := obj1.(*autoscalingv1.HorizontalPodAutoscaler)
hpa2 := a.Copy(obj2).(*autoscalingv1.HorizontalPodAutoscaler)
if hpa1.Spec.MinReplicas == nil {
hpa2.Spec.MinReplicas = nil
} else if hpa2.Spec.MinReplicas == nil {
var r int32 = *hpa1.Spec.MinReplicas
hpa2.Spec.MinReplicas = &r
} else {
*hpa2.Spec.MinReplicas = *hpa1.Spec.MinReplicas
}
hpa2.Spec.MaxReplicas = hpa1.Spec.MaxReplicas
return fedutil.ObjectMetaAndSpecEquivalent(hpa1, hpa2)
}
type replicaNums struct {
min int32
max int32
}
type hpaFederatedStatus struct {
lastScaleTime *metav1.Time
// Indicates how many clusters have hpa/replicas.
// Used to average the cpu utilization which is
// reflected to the federation user.
count int32
aggregateCPUUtilizationPercentage *int32
currentReplicas int32
desiredReplicas int32
}
type hpaSchedulingInfo struct {
scheduleState map[string]*replicaNums
fedStatus hpaFederatedStatus
}
// List of cluster names.
type hpaLists struct {
// Stores names of those clusters which can offer min.
availableMin sets.String
// Stores names of those clusters which can offer max.
availableMax sets.String
// Stores names of those clusters which do not have hpa yet.
noHpa sets.String
}
func (a *HpaAdapter) GetSchedule(obj pkgruntime.Object, key string, clusters []*federationapi.Cluster, informer fedutil.FederatedInformer) (interface{}, error) {
currentClusterObjs, err := getCurrentClusterObjs(informer, key, clusters)
if err != nil {
return nil, err
}
// Initialise averaged cpu utilisation for this reconcile.
var ccup int32 = 0
fedStatus := hpaFederatedStatus{
aggregateCPUUtilizationPercentage: &ccup,
count: int32(0),
desiredReplicas: int32(0),
currentReplicas: int32(0),
}
fedHpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
// We assign the last known scale time here, which we update with
// the latest time from among all clusters in ScheduleObject()
if fedHpa.Status.LastScaleTime != nil {
t := metav1.NewTime(fedHpa.Status.LastScaleTime.Time)
fedStatus.lastScaleTime = &t
}
return &hpaSchedulingInfo{
scheduleState: getHpaScheduleState(obj, currentClusterObjs),
fedStatus: fedStatus,
}, nil
}
func getCurrentClusterObjs(informer fedutil.FederatedInformer, key string, clusters []*federationapi.Cluster) (map[string]pkgruntime.Object, error) {
currentClusterObjs := make(map[string]pkgruntime.Object)
for _, cluster := range clusters {
clusterName := cluster.Name
clusterObj, found, err := informer.GetTargetStore().GetByKey(clusterName, key)
if err != nil {
return nil, err
}
currentClusterObjs[clusterName] = nil
if found {
currentClusterObjs[clusterName] = clusterObj.(pkgruntime.Object)
}
}
return currentClusterObjs, nil
}
// The algorithm used for scheduling is briefed as below:
//
// 1. Find clusters which can offer max and min, if any (lists.availableMax and
// lists.availableMin) in one pass on all clusters.
//
// 2. Reduce the replicas (both min and max) if needed (situation when fedHpa
// has lesser replicas then all cluster local hpa replicas totalled together).
// In this step reduce first from those hpas which already have max (and min)
// reducible. Once such clusters are over and reduction still needed, reduce
// one at a time from all clusters, randomly. This step will ensure that the
// exceeding replicas in local hpas are reduced to match the fedHpa.
// This step would ideally be a noop in most cases because its rare that fedHpa
// would have lesser replicas then the cluster local total (probably when user
// forces update if fedHpa).
//
// 3. Distribute the replicas. In this step we have replicas to distribute (which
// are fed replicas exceeding the sum total of local cluster replicas). If clusters
// already have replicas, one replica from each cluster which can offer replicas
// (both for max and min) are also added to this replicas to distribute numbers (min
// and max).
// 3a. We first do a sub-pass to distribute to clusters which need replicas, considering
// those as clusters in crucial need of replicas.
// 3b. After previous sub-pass, if we still have replicas remaining, in the sub-pass
// we distribute to those clusters which do not yet have any hpa.
// 3c. After previous if we still have more to distribute, then we distribute to all
// clusters randomly, giving replica distribution count (rdc=total-fed-replicas/no-of-clusters)
// to each at a time.
//
// The above algorithm is run to first distribute max and then distribute min to those clusters
// which get max.
func getHpaScheduleState(fedObj pkgruntime.Object, currentObjs map[string]pkgruntime.Object) map[string]*replicaNums {
fedHpa := fedObj.(*autoscalingv1.HorizontalPodAutoscaler)
requestedMin := hpaMinReplicaDefault
if fedHpa.Spec.MinReplicas != nil {
requestedMin = *fedHpa.Spec.MinReplicas
}
requestedReplicas := replicaNums{
min: requestedMin,
max: fedHpa.Spec.MaxReplicas,
}
// replica distribution count, per cluster
rdc := replicaNums{
min: requestedReplicas.min / int32(len(currentObjs)),
max: requestedReplicas.max / int32(len(currentObjs)),
}
if rdc.min < 1 {
rdc.min = 1
}
// TODO: Is there a better way?
// We need to cap the lowest limit of Max to 2, because in a
// situation like both min and max become 1 (same) for all clusters,
// no rebalancing would happen.
if rdc.max < 2 {
rdc.max = 2
}
// Pass 1: Analyse existing local hpa's if any.
// clusterLists holds the list of those clusters which can offer
// min and max replicas, to those which want them.
// For example new clusters joining the federation and/or
// those clusters which need to increase or reduce replicas
// beyond min/max limits.
// schedStatus currently have status of existing hpas.
// It will eventually have desired status for this reconcile.
clusterLists, currentReplicas, scheduleState := prepareForScheduling(currentObjs)
remainingReplicas := replicaNums{
min: requestedReplicas.min - currentReplicas.min,
max: requestedReplicas.max - currentReplicas.max,
}
// Pass 2: reduction of replicas if needed ( situation that fedHpa updated replicas
// to lesser then existing).
// In this pass, we remain pessimistic and reduce one replica per cluster at a time.
if remainingReplicas.min < 0 {
excessMin := (remainingReplicas.min * int32(-1))
remainingReplicas.min = reduceMinReplicas(excessMin, clusterLists.availableMin, scheduleState)
}
if remainingReplicas.max < 0 {
excessMax := (remainingReplicas.max * int32(-1))
remainingReplicas.max = reduceMaxReplicas(excessMax, clusterLists.availableMax, scheduleState)
}
toDistribute := replicaNums{
min: remainingReplicas.min + int32(clusterLists.availableMin.Len()),
max: remainingReplicas.max + int32(clusterLists.availableMax.Len()),
}
// Pass 3: Distribute Max and then Min.
// Here we first distribute max and then (in the next loop)
// distribute min into those clusters which already get the
// max fixed.
// In this process we might not meet the min limit and total of
// min limits might remain more then the requested federated min.
// This is partially because a min per cluster cannot be lesser
// then 1, but min could be requested as 1 at federation.
// Additionally we first increase replicas into those clusters
// which already have hpa's and are in a condition to increase.
// This will save cluster related resources for the user, such that
// if an already existing cluster can satisfy users request why send
// the workload to another.
// We then go ahead to give the replicas to those which do not
// have any hpa. In this pass however we try to ensure that all
// our Max are consumed in this reconcile.
distributeMaxReplicas(toDistribute.max, clusterLists, rdc, currentObjs, scheduleState)
// We distribute min to those clusters which:
// 1 - can adjust min (our increase step would be only 1)
// 2 - which do not have this hpa and got max(increase step rdcMin)
// We might exhaust all min replicas here, with
// some clusters still needing them. We adjust this in finalise by
// assigning min replicas to 1 into those clusters which got max
// but min remains 0.
distributeMinReplicas(toDistribute.min, clusterLists, rdc, currentObjs, scheduleState)
return finaliseScheduleState(scheduleState)
}
func (a *HpaAdapter) ScheduleObject(cluster *federationapi.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo interface{}) (pkgruntime.Object, ScheduleAction, error) {
// Update federated status info
typedInfo := schedulingInfo.(*hpaSchedulingInfo)
if clusterObj != nil {
clusterHpa := clusterObj.(*autoscalingv1.HorizontalPodAutoscaler)
if clusterHpa.Status.CurrentCPUUtilizationPercentage != nil {
*typedInfo.fedStatus.aggregateCPUUtilizationPercentage +=
(*clusterHpa.Status.CurrentCPUUtilizationPercentage * clusterHpa.Status.CurrentReplicas)
typedInfo.fedStatus.count += clusterHpa.Status.CurrentReplicas
}
if clusterHpa.Status.LastScaleTime != nil {
t := metav1.NewTime(clusterHpa.Status.LastScaleTime.Time)
if typedInfo.fedStatus.lastScaleTime != nil &&
t.After(typedInfo.fedStatus.lastScaleTime.Time) {
typedInfo.fedStatus.lastScaleTime = &t
}
}
typedInfo.fedStatus.currentReplicas += clusterHpa.Status.CurrentReplicas
typedInfo.fedStatus.desiredReplicas += clusterHpa.Status.DesiredReplicas
}
// Update the cluster obj and the needed action on the cluster
clusterHpaState := typedInfo.scheduleState[cluster.Name]
desiredHpa := federationObjCopy.(*autoscalingv1.HorizontalPodAutoscaler)
if clusterHpaState != nil {
desiredHpa.Spec.MaxReplicas = clusterHpaState.max
if desiredHpa.Spec.MinReplicas == nil {
min := int32(0)
desiredHpa.Spec.MinReplicas = &min
}
*desiredHpa.Spec.MinReplicas = clusterHpaState.min
}
var defaultAction ScheduleAction = ""
switch {
case clusterHpaState != nil && clusterObj != nil:
return desiredHpa, defaultAction, nil
case clusterHpaState != nil && clusterObj == nil:
return desiredHpa, ActionAdd, nil
case clusterHpaState == nil && clusterObj != nil:
return nil, ActionDelete, nil
}
return nil, defaultAction, nil
}
func (a *HpaAdapter) UpdateFederatedStatus(obj pkgruntime.Object, schedulingInfo interface{}) error {
fedHpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
needUpdate, newFedHpaStatus := updateStatus(fedHpa, schedulingInfo.(*hpaSchedulingInfo).fedStatus)
if needUpdate {
fedHpa.Status = newFedHpaStatus
_, err := a.client.AutoscalingV1().HorizontalPodAutoscalers(fedHpa.Namespace).UpdateStatus(fedHpa)
if err != nil {
return fmt.Errorf("Error updating hpa: %s status in federation: %v", fedHpa.Name, err)
}
}
return nil
}
func updateStatus(fedHpa *autoscalingv1.HorizontalPodAutoscaler, newStatus hpaFederatedStatus) (bool, autoscalingv1.HorizontalPodAutoscalerStatus) {
averageCPUUtilizationPercentage := int32(0)
// Average out the available current utilisation
if *newStatus.aggregateCPUUtilizationPercentage != 0 && newStatus.count != 0 {
averageCPUUtilizationPercentage = *newStatus.aggregateCPUUtilizationPercentage / newStatus.count
}
gen := fedHpa.Generation
newFedHpaStatus := autoscalingv1.HorizontalPodAutoscalerStatus{ObservedGeneration: &gen}
needUpdate := false
if (fedHpa.Status.CurrentCPUUtilizationPercentage == nil &&
averageCPUUtilizationPercentage != 0) ||
(fedHpa.Status.CurrentCPUUtilizationPercentage != nil &&
averageCPUUtilizationPercentage !=
*fedHpa.Status.CurrentCPUUtilizationPercentage) {
needUpdate = true
newFedHpaStatus.CurrentCPUUtilizationPercentage = &averageCPUUtilizationPercentage
}
if (fedHpa.Status.LastScaleTime == nil && newStatus.lastScaleTime != nil) ||
(fedHpa.Status.LastScaleTime != nil && newStatus.lastScaleTime == nil) ||
((fedHpa.Status.LastScaleTime != nil && newStatus.lastScaleTime != nil) &&
newStatus.lastScaleTime.After(fedHpa.Status.LastScaleTime.Time)) {
needUpdate = true
newFedHpaStatus.LastScaleTime = newStatus.lastScaleTime
}
if fedHpa.Status.DesiredReplicas != newStatus.desiredReplicas {
needUpdate = true
newFedHpaStatus.CurrentReplicas = newStatus.currentReplicas
}
if fedHpa.Status.CurrentReplicas != newStatus.currentReplicas {
needUpdate = true
newFedHpaStatus.DesiredReplicas = newStatus.desiredReplicas
}
return needUpdate, newFedHpaStatus
}
// prepareForScheduling prepares the lists and totals from the
// existing objs.
// currentObjs has the list of all clusters, with obj as nil
// for those clusters which do not have hpa yet.
func prepareForScheduling(currentObjs map[string]pkgruntime.Object) (hpaLists, replicaNums, map[string]*replicaNums) {
lists := hpaLists{
availableMax: sets.NewString(),
availableMin: sets.NewString(),
noHpa: sets.NewString(),
}
existingTotal := replicaNums{
min: int32(0),
max: int32(0),
}
scheduleState := make(map[string]*replicaNums)
for cluster, obj := range currentObjs {
if obj == nil {
lists.noHpa.Insert(cluster)
scheduleState[cluster] = nil
continue
}
if maxReplicasReducible(obj) {
lists.availableMax.Insert(cluster)
}
if minReplicasReducible(obj) {
lists.availableMin.Insert(cluster)
}
replicas := replicaNums{min: 0, max: 0}
scheduleState[cluster] = &replicas
if obj.(*autoscalingv1.HorizontalPodAutoscaler).Spec.MinReplicas != nil {
existingTotal.min += *obj.(*autoscalingv1.HorizontalPodAutoscaler).Spec.MinReplicas
replicas.min = *obj.(*autoscalingv1.HorizontalPodAutoscaler).Spec.MinReplicas
}
existingTotal.max += obj.(*autoscalingv1.HorizontalPodAutoscaler).Spec.MaxReplicas
replicas.max = obj.(*autoscalingv1.HorizontalPodAutoscaler).Spec.MaxReplicas
}
return lists, existingTotal, scheduleState
}
// Note: reduceMinReplicas and reduceMaxReplicas, look quite similar in flow
// and code, however there are subtle differences. They together can be made
// into 1 function with an arg governing the functionality difference and
// additional args (superset of args in both) as needed. Doing so however
// makes the logical flow quite less readable. They are thus left as 2 for
// readability.
// reduceMinReplicas reduces the min replicas from existing clusters.
// At the end of the function excessMin should be 0 and the MinList
// and the scheduledReplicas properly updated in place.
func reduceMinReplicas(excessMin int32, availableMinList sets.String, scheduled map[string]*replicaNums) int32 {
if excessMin > 0 {
// first we try reducing from those clusters which already offer min
if availableMinList.Len() > 0 {
for _, cluster := range availableMinList.List() {
replicas := scheduled[cluster]
if replicas.min > 1 {
replicas.min--
availableMinList.Delete(cluster)
excessMin--
if excessMin <= 0 {
break
}
}
}
}
}
// If we could not get needed replicas from already offered min above
// we abruptly start removing replicas from some/all clusters.
// Here we might make some min to 0 signalling that this hpa might be a
// candidate to be removed from this cluster altogether.
for excessMin > 0 {
for _, replicas := range scheduled {
if replicas != nil &&
replicas.min > 0 {
replicas.min--
excessMin--
if excessMin <= 0 {
break
}
}
}
}
return excessMin
}
// reduceMaxReplicas reduces the max replicas from existing clusters.
// At the end of the function excessMax should be 0 and the MaxList
// and the scheduledReplicas properly updated in place.
func reduceMaxReplicas(excessMax int32, availableMaxList sets.String, scheduled map[string]*replicaNums) int32 {
if excessMax > 0 {
// first we try reducing from those clusters which already offer max
if availableMaxList.Len() > 0 {
for _, cluster := range availableMaxList.List() {
replicas := scheduled[cluster]
if replicas != nil && !((replicas.max - replicas.min) < 0) {
replicas.max--
availableMaxList.Delete(cluster)
excessMax--
if excessMax <= 0 {
break
}
}
}
}
}
// If we could not get needed replicas to reduce from already offered
// max above we abruptly start removing replicas from some/all clusters.
// Here we might make some max and min to 0, signalling that this hpa be
// removed from this cluster altogether
for excessMax > 0 {
for _, replicas := range scheduled {
if replicas != nil &&
!((replicas.max - replicas.min) < 0) {
replicas.max--
excessMax--
if excessMax <= 0 {
break
}
}
}
}
return excessMax
}
// distributeMaxReplicas
// Takes input:
// toDistributeMax: number of replicas to distribute.
// lists: cluster name lists, which have clusters with available max,
// available min and those with no hpas yet.
// rdc: replicadistributioncount for max and min.
// currentObjs: list of current cluster hpas.
// scheduled: schedule state which will be updated in place.
func distributeMaxReplicas(toDistributeMax int32, lists hpaLists, rdc replicaNums,
currentObjs map[string]pkgruntime.Object, scheduled map[string]*replicaNums) int32 {
for cluster, replicas := range scheduled {
if toDistributeMax == 0 {
break
}
if replicas == nil {
continue
}
if maxReplicasNeeded(currentObjs[cluster]) {
replicas.max++
if lists.availableMax.Len() > 0 {
popped, notEmpty := lists.availableMax.PopAny()
if notEmpty {
// Boundary checks have happened earlier in
// minReplicasReducible().
scheduled[popped].max--
}
}
// Any which ways utilise available map replicas
toDistributeMax--
}
}
// If we have new clusters where we can give our replicas,
// then give away all our replicas to the new clusters first.
if lists.noHpa.Len() > 0 {
for toDistributeMax > 0 {
for _, cluster := range lists.noHpa.UnsortedList() {
if scheduled[cluster] == nil {
scheduled[cluster] = &replicaNums{min: 0, max: 0}
}
replicas := scheduled[cluster]
// first give away max from clusters offering them
// this case especially helps getting hpa into newly joining
// clusters.
if lists.availableMax.Len() > 0 {
popped, notEmpty := lists.availableMax.PopAny()
if notEmpty {
// Boundary checks to reduce max have happened earlier in
// minReplicasReducible().
replicas.max++
scheduled[popped].max--
toDistributeMax--
continue
}
}
if toDistributeMax < rdc.max {
replicas.max += toDistributeMax
toDistributeMax = 0
break
}
replicas.max += rdc.max
toDistributeMax -= rdc.max
}
}
} else { // we have no new clusters but if still have max replicas to distribute;
// just distribute all in current clusters.
for toDistributeMax > 0 {
for cluster, replicas := range scheduled {
if replicas == nil {
replicas = &replicaNums{min: 0, max: 0}
scheduled[cluster] = replicas
}
// First give away max from clusters offering them.
// This case especially helps getting hpa into newly joining
// clusters.
if lists.availableMax.Len() > 0 {
popped, notEmpty := lists.availableMax.PopAny()
if notEmpty {
// Boundary checks have happened earlier in
// minReplicasReducible().
replicas.max++
scheduled[popped].max--
toDistributeMax--
continue
}
}
if toDistributeMax < rdc.max {
replicas.max += toDistributeMax
toDistributeMax = 0
break
}
replicas.max += rdc.max
toDistributeMax -= rdc.max
}
}
}
return toDistributeMax
}
// distributeMinReplicas
// Takes input:
// toDistributeMin: number of replicas to distribute.
// lists: cluster name lists, which have clusters with available max,
// available min and those with no hpas yet.
// rdc: replicadistributioncount for max and min.
// currentObjs: list of current cluster hpas.
// scheduled: schedule state which will be updated in place.
func distributeMinReplicas(toDistributeMin int32, lists hpaLists, rdc replicaNums,
currentObjs map[string]pkgruntime.Object, scheduled map[string]*replicaNums) int32 {
for cluster, replicas := range scheduled {
if toDistributeMin == 0 {
break
}
// We have distriubted Max and thus scheduled might not be nil
// but probably current (what we got originally) is nil(no hpa)
if replicas == nil || currentObjs[cluster] == nil {
continue
}
if minReplicasIncreasable(currentObjs[cluster]) {
if lists.availableMin.Len() > 0 {
popped, notEmpty := lists.availableMin.PopAny()
if notEmpty {
// Boundary checks have happened earlier.
scheduled[popped].min--
replicas.min++
toDistributeMin--
}
}
}
}
if lists.noHpa.Len() > 0 {
// TODO: can this become an infinite loop?
for toDistributeMin > 0 {
for _, cluster := range lists.noHpa.UnsortedList() {
replicas := scheduled[cluster]
if replicas == nil {
// We did not get max here so this cluster
// remains without hpa
continue
}
var replicaNum int32 = 0
if toDistributeMin < rdc.min {
replicaNum = toDistributeMin
} else {
replicaNum = rdc.min
}
if (replicas.max - replicaNum) < replicas.min {
// Cannot increase the min in this cluster
// as it will go beyond max
continue
}
if lists.availableMin.Len() > 0 {
popped, notEmpty := lists.availableMin.PopAny()
if notEmpty {
// Boundary checks have happened earlier.
scheduled[popped].min--
replicas.min++
toDistributeMin--
continue
}
}
replicas.min += replicaNum
toDistributeMin -= replicaNum
}
}
} else { // we have no new clusters but if still have min replicas to distribute;
// just distribute all in current clusters.
for toDistributeMin > 0 {
for _, replicas := range scheduled {
if replicas == nil {
// We did not get max here so this cluster
// remains without hpa
continue
}
var replicaNum int32 = 0
if toDistributeMin < rdc.min {
replicaNum = toDistributeMin
} else {
replicaNum = rdc.min
}
if (replicas.max - replicaNum) < replicas.min {
// Cannot increase the min in this cluster
// as it will go beyond max
continue
}
if lists.availableMin.Len() > 0 {
popped, notEmpty := lists.availableMin.PopAny()
if notEmpty {
// Boundary checks have happened earlier.
scheduled[popped].min--
replicas.min++
toDistributeMin--
continue
}
}
replicas.min += replicaNum
toDistributeMin -= replicaNum
}
}
}
return toDistributeMin
}
// finaliseScheduleState ensures that the minReplica count is made to 1
// for those clusters which got max, but did not get min. This is because
// k8s hpa does not accept hpas with 0 min replicas.
// The replica num distribution can thus have more mins then fedHpa requested
// but its better then having all replicas go into one cluster (if fedHpa
// requested min=1 (which is the most usual case).
func finaliseScheduleState(scheduled map[string]*replicaNums) map[string]*replicaNums {
for _, replicas := range scheduled {
if (replicas != nil) && (replicas.min <= 0) && (replicas.max > 0) {
// Min total does not necessarily meet the federated min limit.
replicas.min = 1
}
}
return scheduled
}
// isPristine is used to determine if so far local controller has been
// able to really determine, what should be the desired replica number for
// this cluster.
// This is used to get hpas into those clusters which might join fresh,
// and so far other cluster hpas haven't really reached anywhere.
// TODO: There is a flaw here, that a just born object would also offer its
// replicas which can also lead to fast thrashing.
// The only better way is to either ensure that object creation time stamp is set
// and can be used authoritatively; or have another field on the local object
// which is mandatorily set on creation and can be used authoritatively.
// Should we abuse annotations again for this, or this can be a proper requirement?
func isPristine(hpa *autoscalingv1.HorizontalPodAutoscaler) bool {
if hpa.Status.LastScaleTime == nil &&
hpa.Status.DesiredReplicas == 0 {
return true
}
return false
}
// isScaleable tells if it already has been a reasonable amount of
// time since this hpa scaled. Its used to avoid fast thrashing.
func isScaleable(hpa *autoscalingv1.HorizontalPodAutoscaler) bool {
if hpa.Status.LastScaleTime == nil {
return false
}
t := hpa.Status.LastScaleTime.Add(scaleForbiddenWindow)
if t.After(time.Now()) {
return false
}
return true
}
func maxReplicasReducible(obj pkgruntime.Object) bool {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
if (hpa.Spec.MinReplicas != nil) &&
(((hpa.Spec.MaxReplicas - 1) - *hpa.Spec.MinReplicas) < 0) {
return false
}
if isPristine(hpa) {
return true
}
if !isScaleable(hpa) {
return false
}
if (hpa.Status.DesiredReplicas < hpa.Status.CurrentReplicas) ||
((hpa.Status.DesiredReplicas == hpa.Status.CurrentReplicas) &&
(hpa.Status.DesiredReplicas < hpa.Spec.MaxReplicas)) {
return true
}
return false
}
// minReplicasReducible checks if this cluster (hpa) can offer replicas which are
// stuck here because of min limit.
// Its noteworthy, that min and max are adjusted separately, but if the replicas
// are not being used here, the max adjustment will lead it to become equal to min,
// but will not be able to scale down further and offer max to some other cluster
// which needs replicas.
func minReplicasReducible(obj pkgruntime.Object) bool {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
if isPristine(hpa) && (hpa.Spec.MinReplicas != nil) &&
(*hpa.Spec.MinReplicas > 1) &&
(*hpa.Spec.MinReplicas <= hpa.Spec.MaxReplicas) {
return true
}
if !isScaleable(hpa) {
return false
}
if (hpa.Spec.MinReplicas != nil) &&
(*hpa.Spec.MinReplicas > 1) &&
(hpa.Status.DesiredReplicas == hpa.Status.CurrentReplicas) &&
(hpa.Status.CurrentReplicas == *hpa.Spec.MinReplicas) {
return true
}
return false
}
func maxReplicasNeeded(obj pkgruntime.Object) bool {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
if !isScaleable(hpa) {
return false
}
if (hpa.Status.CurrentReplicas == hpa.Status.DesiredReplicas) &&
(hpa.Status.CurrentReplicas == hpa.Spec.MaxReplicas) {
return true
}
return false
}
func minReplicasIncreasable(obj pkgruntime.Object) bool {
hpa := obj.(*autoscalingv1.HorizontalPodAutoscaler)
if !isScaleable(hpa) ||
((hpa.Spec.MinReplicas != nil) &&
(*hpa.Spec.MinReplicas) >= hpa.Spec.MaxReplicas) {
return false
}
if (hpa.Spec.MinReplicas != nil) &&
(hpa.Status.DesiredReplicas > *hpa.Spec.MinReplicas) {
return true
}
return false
}

View File

@ -0,0 +1,262 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package federatedtypes
import (
"testing"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"github.com/stretchr/testify/assert"
)
type replicas struct {
min int32
max int32
}
func TestGetHpaScheduleState(t *testing.T) {
defaultFedHpa := newHpaWithReplicas(NewInt32(1), NewInt32(70), 10)
testCases := map[string]struct {
fedHpa *autoscalingv1.HorizontalPodAutoscaler
localHpas map[string]pkgruntime.Object
expectedReplicas map[string]*replicas
}{
"Distribiutes replicas randomly if no existing hpa in any local cluster": {
localHpas: func() map[string]pkgruntime.Object {
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = nil
hpas["c2"] = nil
return hpas
}(),
},
"Cluster with no hpa gets replicas if other clusters have replicas": {
localHpas: func() map[string]pkgruntime.Object {
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = newHpaWithReplicas(NewInt32(1), NewInt32(70), 10)
hpas["c2"] = nil
return hpas
}(),
expectedReplicas: map[string]*replicas{
"c1": {
min: int32(1),
max: int32(9),
},
"c2": {
min: int32(1),
max: int32(1),
},
},
},
"Cluster needing max replicas gets it if there is another cluster to offer max": {
localHpas: func() map[string]pkgruntime.Object {
hpa1 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 7)
hpa1 = updateHpaStatus(hpa1, NewInt32(50), 5, 5, true)
hpa2 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 1)
hpa2 = updateHpaStatus(hpa2, NewInt32(70), 1, 1, true)
// include third object to ensure, it does not break the test
hpa3 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 2)
hpa3 = updateHpaStatus(hpa3, NewInt32(70), 1, 1, false)
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = hpa1
hpas["c2"] = hpa2
hpas["c3"] = hpa3
return hpas
}(),
expectedReplicas: map[string]*replicas{
"c1": {
min: int32(1),
max: int32(6),
},
"c2": {
min: int32(1),
max: int32(2),
},
"c3": {
min: int32(1),
max: int32(2),
},
},
},
"Cluster needing max replicas does not get it if there is no cluster offerring max": {
localHpas: func() map[string]pkgruntime.Object {
hpa1 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 9)
hpa1 = updateHpaStatus(hpa1, NewInt32(70), 9, 9, false)
hpa2 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 1)
hpa2 = updateHpaStatus(hpa2, NewInt32(70), 1, 1, true)
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = hpa1
hpas["c2"] = hpa2
return hpas
}(),
expectedReplicas: map[string]*replicas{
"c1": {
min: int32(1),
max: int32(9),
},
"c2": {
min: int32(1),
max: int32(1),
},
},
},
"Cluster which can increase min replicas gets to increase min if there is a cluster offering min": {
fedHpa: newHpaWithReplicas(NewInt32(4), NewInt32(70), 10),
localHpas: func() map[string]pkgruntime.Object {
hpa1 := newHpaWithReplicas(NewInt32(3), NewInt32(70), 6)
hpa1 = updateHpaStatus(hpa1, NewInt32(50), 3, 3, true)
hpa2 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 4)
hpa2 = updateHpaStatus(hpa2, NewInt32(50), 3, 3, true)
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = hpa1
hpas["c2"] = hpa2
return hpas
}(),
expectedReplicas: map[string]*replicas{
"c1": {
min: int32(2),
max: int32(6),
},
"c2": {
min: int32(2),
max: int32(4),
},
},
},
"Cluster which can increase min replicas does not increase if there are no clusters offering min": {
fedHpa: newHpaWithReplicas(NewInt32(4), NewInt32(70), 10),
localHpas: func() map[string]pkgruntime.Object {
hpa1 := newHpaWithReplicas(NewInt32(3), NewInt32(70), 6)
hpa1 = updateHpaStatus(hpa1, NewInt32(50), 4, 4, true)
hpa2 := newHpaWithReplicas(NewInt32(1), NewInt32(70), 4)
hpa2 = updateHpaStatus(hpa2, NewInt32(50), 3, 3, true)
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = hpa1
hpas["c2"] = hpa2
return hpas
}(),
expectedReplicas: map[string]*replicas{
"c1": {
min: int32(3),
max: int32(6),
},
"c2": {
min: int32(1),
max: int32(4),
},
},
},
"Increasing replicas on fed object increases the same on clusters": {
// Existing total of local min, max = 1+1, 5+5 decreasing to below
fedHpa: newHpaWithReplicas(NewInt32(4), NewInt32(70), 14),
localHpas: func() map[string]pkgruntime.Object {
// does not matter if scaleability is true
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = newHpaWithReplicas(NewInt32(1), NewInt32(70), 5)
hpas["c2"] = newHpaWithReplicas(NewInt32(1), NewInt32(70), 5)
return hpas
}(),
// We dont know which cluster gets how many, but the resultant total should match
},
"Decreasing replicas on fed object decreases the same on clusters": {
// Existing total of local min, max = 2+2, 8+8 decreasing to below
fedHpa: newHpaWithReplicas(NewInt32(3), NewInt32(70), 8),
localHpas: func() map[string]pkgruntime.Object {
// does not matter if scaleability is true
hpas := make(map[string]pkgruntime.Object)
hpas["c1"] = newHpaWithReplicas(NewInt32(2), NewInt32(70), 8)
hpas["c2"] = newHpaWithReplicas(NewInt32(2), NewInt32(70), 8)
return hpas
}(),
// We dont know which cluster gets how many, but the resultant total should match
},
}
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
if testCase.fedHpa == nil {
testCase.fedHpa = defaultFedHpa
}
scheduledState := getHpaScheduleState(testCase.fedHpa, testCase.localHpas)
checkClusterConditions(t, testCase.fedHpa, scheduledState)
if testCase.expectedReplicas != nil {
for cluster, replicas := range testCase.expectedReplicas {
scheduledReplicas := scheduledState[cluster]
assert.Equal(t, replicas.min, scheduledReplicas.min)
assert.Equal(t, replicas.max, scheduledReplicas.max)
}
}
})
}
}
func updateHpaStatus(hpa *autoscalingv1.HorizontalPodAutoscaler, currentUtilisation *int32, current, desired int32, scaleable bool) *autoscalingv1.HorizontalPodAutoscaler {
hpa.Status.CurrentReplicas = current
hpa.Status.DesiredReplicas = desired
hpa.Status.CurrentCPUUtilizationPercentage = currentUtilisation
now := metav1.Now()
scaledTime := now
if scaleable {
// definitely more then 5 minutes ago
scaledTime = metav1.NewTime(now.Time.Add(-6 * time.Minute))
}
hpa.Status.LastScaleTime = &scaledTime
return hpa
}
func checkClusterConditions(t *testing.T, fedHpa *autoscalingv1.HorizontalPodAutoscaler, scheduled map[string]*replicaNums) {
minTotal := int32(0)
maxTotal := int32(0)
for _, replicas := range scheduled {
minTotal += replicas.min
maxTotal += replicas.max
}
// - Total of max matches the fed max
assert.Equal(t, fedHpa.Spec.MaxReplicas, maxTotal)
// - Total of min is not less then fed min
assert.Condition(t, func() bool {
if *fedHpa.Spec.MinReplicas <= minTotal {
return true
}
return false
})
}
func newHpaWithReplicas(min, targetUtilisation *int32, max int32) *autoscalingv1.HorizontalPodAutoscaler {
return &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "myhpa",
Namespace: apiv1.NamespaceDefault,
SelfLink: "/api/mylink",
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
Kind: "HorizontalPodAutoscaler",
Name: "target-",
},
MinReplicas: min,
MaxReplicas: max,
TargetCPUUtilizationPercentage: targetUtilisation,
},
}
}

View File

@ -40,16 +40,16 @@ func init() {
}
type ReplicaSetAdapter struct {
*schedulingAdapter
*replicaSchedulingAdapter
client federationclientset.Interface
}
func NewReplicaSetAdapter(client federationclientset.Interface, config *restclient.Config) FederatedTypeAdapter {
schedulingAdapter := schedulingAdapter{
replicaSchedulingAdapter := replicaSchedulingAdapter{
preferencesAnnotationName: FedReplicaSetPreferencesAnnotation,
updateStatusFunc: func(obj pkgruntime.Object, status interface{}) error {
updateStatusFunc: func(obj pkgruntime.Object, schedulingInfo interface{}) error {
rs := obj.(*extensionsv1.ReplicaSet)
typedStatus := status.(ReplicaSchedulingStatus)
typedStatus := schedulingInfo.(*ReplicaSchedulingInfo).Status
if typedStatus.Replicas != rs.Status.Replicas || typedStatus.FullyLabeledReplicas != rs.Status.FullyLabeledReplicas ||
typedStatus.ReadyReplicas != rs.Status.ReadyReplicas || typedStatus.AvailableReplicas != rs.Status.AvailableReplicas {
rs.Status = extensionsv1.ReplicaSetStatus{
@ -64,7 +64,7 @@ func NewReplicaSetAdapter(client federationclientset.Interface, config *restclie
return nil
},
}
return &ReplicaSetAdapter{&schedulingAdapter, client}
return &ReplicaSetAdapter{&replicaSchedulingAdapter, client}
}
func (a *ReplicaSetAdapter) Kind() string {

View File

@ -37,6 +37,16 @@ import (
"github.com/golang/glog"
)
// ScheduleAction is used by the interface ScheduleObject of SchedulingAdapter
// to sync controller reconcile to convey the action type needed for the
// particular cluster local object in ScheduleObject
type ScheduleAction string
const (
ActionAdd = "add"
ActionDelete = "delete"
)
// ReplicaSchedulingStatus contains the status of the replica type objects (rs or deployment)
// that are being scheduled into joined clusters.
type ReplicaSchedulingStatus struct {
@ -58,26 +68,26 @@ type ReplicaSchedulingInfo struct {
// federated type that requires more complex synchronization logic.
type SchedulingAdapter interface {
GetSchedule(obj pkgruntime.Object, key string, clusters []*federationapi.Cluster, informer fedutil.FederatedInformer) (interface{}, error)
ScheduleObject(cluster *federationapi.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo interface{}) (pkgruntime.Object, bool, error)
UpdateFederatedStatus(obj pkgruntime.Object, status interface{}) error
ScheduleObject(cluster *federationapi.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo interface{}) (pkgruntime.Object, ScheduleAction, error)
UpdateFederatedStatus(obj pkgruntime.Object, schedulingInfo interface{}) error
// EquivalentIgnoringSchedule returns whether obj1 and obj2 are
// equivalent ignoring differences due to scheduling.
EquivalentIgnoringSchedule(obj1, obj2 pkgruntime.Object) bool
}
// schedulingAdapter is meant to be embedded in other type adapters that require
// workload scheduling.
type schedulingAdapter struct {
// replicaSchedulingAdapter is meant to be embedded in other type adapters that require
// workload scheduling with actual pod replicas.
type replicaSchedulingAdapter struct {
preferencesAnnotationName string
updateStatusFunc func(pkgruntime.Object, interface{}) error
}
func (a *schedulingAdapter) IsSchedulingAdapter() bool {
func (a *replicaSchedulingAdapter) IsSchedulingAdapter() bool {
return true
}
func (a *schedulingAdapter) GetSchedule(obj pkgruntime.Object, key string, clusters []*federationapi.Cluster, informer fedutil.FederatedInformer) (interface{}, error) {
func (a *replicaSchedulingAdapter) GetSchedule(obj pkgruntime.Object, key string, clusters []*federationapi.Cluster, informer fedutil.FederatedInformer) (interface{}, error) {
var clusterNames []string
for _, cluster := range clusters {
clusterNames = append(clusterNames, cluster.Name)
@ -128,7 +138,7 @@ func (a *schedulingAdapter) GetSchedule(obj pkgruntime.Object, key string, clust
}, nil
}
func (a *schedulingAdapter) ScheduleObject(cluster *federationapi.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo interface{}) (pkgruntime.Object, bool, error) {
func (a *replicaSchedulingAdapter) ScheduleObject(cluster *federationapi.Cluster, clusterObj pkgruntime.Object, federationObjCopy pkgruntime.Object, schedulingInfo interface{}) (pkgruntime.Object, ScheduleAction, error) {
typedSchedulingInfo := schedulingInfo.(*ReplicaSchedulingInfo)
replicas, ok := typedSchedulingInfo.Schedule[cluster.Name]
if !ok {
@ -152,11 +162,15 @@ func (a *schedulingAdapter) ScheduleObject(cluster *federationapi.Cluster, clust
}
}
}
return federationObjCopy, replicas > 0, nil
var action ScheduleAction = ""
if replicas > 0 {
action = ActionAdd
}
return federationObjCopy, action, nil
}
func (a *schedulingAdapter) UpdateFederatedStatus(obj pkgruntime.Object, status interface{}) error {
return a.updateStatusFunc(obj, status)
func (a *replicaSchedulingAdapter) UpdateFederatedStatus(obj pkgruntime.Object, schedulingInfo interface{}) error {
return a.updateStatusFunc(obj, schedulingInfo)
}
func schedule(planner *planner.Planner, obj pkgruntime.Object, key string, clusterNames []string, currentReplicasPerCluster map[string]int64, estimatedCapacity map[string]int64) map[string]int64 {

View File

@ -26,6 +26,7 @@ filegroup(
":package-srcs",
"//federation/pkg/federation-controller/cluster:all-srcs",
"//federation/pkg/federation-controller/ingress:all-srcs",
"//federation/pkg/federation-controller/job:all-srcs",
"//federation/pkg/federation-controller/service:all-srcs",
"//federation/pkg/federation-controller/sync:all-srcs",
"//federation/pkg/federation-controller/util:all-srcs",

View File

@ -0,0 +1,80 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["jobcontroller.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation:go_default_library",
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_clientset:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/deletionhelper:go_default_library",
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//federation/pkg/federation-controller/util/planner:go_default_library",
"//federation/pkg/federation-controller/util/replicapreferences:go_default_library",
"//pkg/api:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["jobcontroller_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_clientset/fake:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//federation/pkg/federation-controller/util/finalizers:go_default_library",
"//federation/pkg/federation-controller/util/test:go_default_library",
"//pkg/apis/batch/v1:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,561 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"fmt"
"reflect"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
batchv1 "k8s.io/api/batch/v1"
clientv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
kubeclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/workqueue"
fed "k8s.io/kubernetes/federation/apis/federation"
fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/planner"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/controller"
)
const (
fedJobPreferencesAnnotation = "federation.kubernetes.io/job-preferences"
allClustersKey = "THE_ALL_CLUSTER_KEY"
// UserAgentName is the user agent used in the federation client
UserAgentName = "Federation-Job-Controller"
// ControllerName is name of this controller
ControllerName = "jobs"
)
var (
// RequiredResources is the resource group version of the type this controller manages
RequiredResources = []schema.GroupVersionResource{batchv1.SchemeGroupVersion.WithResource("jobs")}
jobReviewDelay = 10 * time.Second
clusterAvailableDelay = 20 * time.Second
clusterUnavailableDelay = 60 * time.Second
updateTimeout = 30 * time.Second
backoffInitial = 5 * time.Second
backoffMax = 1 * time.Minute
)
// FederationJobController synchronizes the state of a federated job object
// to clusters that are members of the federation.
type FederationJobController struct {
fedClient fedclientset.Interface
jobController cache.Controller
jobStore cache.Store
fedJobInformer fedutil.FederatedInformer
jobDeliverer *fedutil.DelayingDeliverer
clusterDeliverer *fedutil.DelayingDeliverer
jobWorkQueue workqueue.Interface
// For updating members of federation.
fedUpdater fedutil.FederatedUpdater
jobBackoff *flowcontrol.Backoff
// For events
eventRecorder record.EventRecorder
defaultPlanner *planner.Planner
deletionHelper *deletionhelper.DeletionHelper
}
// NewJobController creates a new federation job controller
func NewJobController(fedClient fedclientset.Interface) *FederationJobController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(fedClient))
recorder := broadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "federated-job-controller"})
fjc := &FederationJobController{
fedClient: fedClient,
jobDeliverer: fedutil.NewDelayingDeliverer(),
clusterDeliverer: fedutil.NewDelayingDeliverer(),
jobWorkQueue: workqueue.New(),
jobBackoff: flowcontrol.NewBackOff(backoffInitial, backoffMax),
defaultPlanner: planner.NewPlanner(&fed.ReplicaAllocationPreferences{
Clusters: map[string]fed.ClusterPreferences{
"*": {Weight: 1},
},
}),
eventRecorder: recorder,
}
jobFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return clientset.BatchV1().Jobs(metav1.NamespaceAll).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return clientset.BatchV1().Jobs(metav1.NamespaceAll).Watch(options)
},
},
&batchv1.Job{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) { fjc.deliverLocalJob(obj, jobReviewDelay) },
),
)
}
clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *fedv1.Cluster) {
fjc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay)
},
ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) {
fjc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
}
fjc.fedJobInformer = fedutil.NewFederatedInformer(fedClient, jobFedInformerFactory, &clusterLifecycle)
fjc.jobStore, fjc.jobController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fjc.fedClient.BatchV1().Jobs(metav1.NamespaceAll).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fjc.fedClient.BatchV1().Jobs(metav1.NamespaceAll).Watch(options)
},
},
&batchv1.Job{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnMetaAndSpecChanges(
func(obj runtime.Object) { fjc.deliverFedJobObj(obj, 0) },
),
)
fjc.fedUpdater = fedutil.NewFederatedUpdater(fjc.fedJobInformer, "job", updateTimeout, fjc.eventRecorder,
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*batchv1.Job)
_, err := client.BatchV1().Jobs(rs.Namespace).Create(rs)
return err
},
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*batchv1.Job)
_, err := client.BatchV1().Jobs(rs.Namespace).Update(rs)
return err
},
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*batchv1.Job)
err := client.BatchV1().Jobs(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{})
return err
})
fjc.deletionHelper = deletionhelper.NewDeletionHelper(
fjc.updateJob,
// objNameFunc
func(obj runtime.Object) string {
job := obj.(*batchv1.Job)
return job.Name
},
fjc.fedJobInformer,
fjc.fedUpdater,
)
return fjc
}
// Sends the given updated object to apiserver.
// Assumes that the given object is a job.
func (fjc *FederationJobController) updateJob(obj runtime.Object) (runtime.Object, error) {
job := obj.(*batchv1.Job)
return fjc.fedClient.BatchV1().Jobs(job.Namespace).Update(job)
}
// Run starts the syncing of federation jobs to the clusters.
func (fjc *FederationJobController) Run(workers int, stopCh <-chan struct{}) {
go fjc.jobController.Run(stopCh)
fjc.fedJobInformer.Start()
fjc.jobDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) {
fjc.jobWorkQueue.Add(item.Key)
})
fjc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) {
fjc.reconcileJobsOnClusterChange()
})
for !fjc.isSynced() {
time.Sleep(5 * time.Millisecond)
}
for i := 0; i < workers; i++ {
go wait.Until(fjc.worker, time.Second, stopCh)
}
fedutil.StartBackoffGC(fjc.jobBackoff, stopCh)
<-stopCh
glog.Infof("Shutting down FederationJobController")
fjc.jobDeliverer.Stop()
fjc.clusterDeliverer.Stop()
fjc.jobWorkQueue.ShutDown()
fjc.fedJobInformer.Stop()
}
func (fjc *FederationJobController) isSynced() bool {
if !fjc.fedJobInformer.ClustersSynced() {
glog.V(3).Infof("Cluster list not synced")
return false
}
clusters, err := fjc.fedJobInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get ready clusters: %v", err)
return false
}
if !fjc.fedJobInformer.GetTargetStore().ClustersSynced(clusters) {
glog.V(2).Infof("cluster job list not synced")
return false
}
if !fjc.jobController.HasSynced() {
glog.V(2).Infof("federation job list not synced")
return false
}
return true
}
func (fjc *FederationJobController) deliverLocalJob(obj interface{}, duration time.Duration) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %v: %v", obj, err)
return
}
_, exists, err := fjc.jobStore.GetByKey(key)
if err != nil {
glog.Errorf("Couldn't get federated job %v: %v", key, err)
return
}
if exists { // ignore jobs exists only in local k8s
fjc.deliverJobByKey(key, duration, false)
}
}
func (fjc *FederationJobController) deliverFedJobObj(obj interface{}, delay time.Duration) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
fjc.deliverJobByKey(key, delay, false)
}
func (fjc *FederationJobController) deliverJobByKey(key string, delay time.Duration, failed bool) {
if failed {
fjc.jobBackoff.Next(key, time.Now())
delay = delay + fjc.jobBackoff.Get(key)
} else {
fjc.jobBackoff.Reset(key)
}
fjc.jobDeliverer.DeliverAfter(key, nil, delay)
}
type reconciliationStatus string
const (
statusAllOk = reconciliationStatus("ALL_OK")
statusNeedRecheck = reconciliationStatus("RECHECK")
statusError = reconciliationStatus("ERROR")
statusNotSynced = reconciliationStatus("NOSYNC")
)
func (fjc *FederationJobController) worker() {
for {
item, quit := fjc.jobWorkQueue.Get()
if quit {
return
}
key := item.(string)
status, err := fjc.reconcileJob(key)
fjc.jobWorkQueue.Done(item)
if err != nil {
glog.Errorf("Error syncing job controller: %v", err)
fjc.deliverJobByKey(key, 0, true)
} else {
switch status {
case statusAllOk:
break
case statusError:
fjc.deliverJobByKey(key, 0, true)
case statusNeedRecheck:
fjc.deliverJobByKey(key, jobReviewDelay, false)
case statusNotSynced:
fjc.deliverJobByKey(key, clusterAvailableDelay, false)
default:
glog.Errorf("Unhandled reconciliation status: %s", status)
fjc.deliverJobByKey(key, jobReviewDelay, false)
}
}
}
}
type scheduleResult struct {
Parallelism *int32
Completions *int32
}
func (fjc *FederationJobController) schedule(fjob *batchv1.Job, clusters []*fedv1.Cluster) map[string]scheduleResult {
plnr := fjc.defaultPlanner
frsPref, err := replicapreferences.GetAllocationPreferences(fjob, fedJobPreferencesAnnotation)
if err != nil {
glog.Warningf("Invalid job specific preference, use default. rs: %v, err: %v", fjob, err)
}
if frsPref != nil { // create a new planner if user specified a preference
plnr = planner.NewPlanner(frsPref)
}
parallelism := int64(*fjob.Spec.Parallelism)
var clusterNames []string
for _, cluster := range clusters {
clusterNames = append(clusterNames, cluster.Name)
}
parallelismResult, _ := plnr.Plan(parallelism, clusterNames, nil, nil, fjob.Namespace+"/"+fjob.Name)
if frsPref != nil {
for _, clusterPref := range frsPref.Clusters {
clusterPref.MinReplicas = 0
clusterPref.MaxReplicas = nil
}
plnr = planner.NewPlanner(frsPref)
}
clusterNames = nil
for clusterName := range parallelismResult {
clusterNames = append(clusterNames, clusterName)
}
completionsResult := make(map[string]int64)
if fjob.Spec.Completions != nil {
completionsResult, _ = plnr.Plan(int64(*fjob.Spec.Completions), clusterNames, nil, nil, fjob.Namespace+"/"+fjob.Name)
}
results := make(map[string]scheduleResult)
for _, clusterName := range clusterNames {
paralle := int32(parallelismResult[clusterName])
complet := int32(completionsResult[clusterName])
result := scheduleResult{
Parallelism: &paralle,
}
if fjob.Spec.Completions != nil {
result.Completions = &complet
}
results[clusterName] = result
}
return results
}
func (fjc *FederationJobController) reconcileJob(key string) (reconciliationStatus, error) {
if !fjc.isSynced() {
return statusNotSynced, nil
}
glog.V(4).Infof("Start reconcile job %q", key)
startTime := time.Now()
defer glog.V(4).Infof("Finished reconcile job %q (%v)", key, time.Now().Sub(startTime))
objFromStore, exists, err := fjc.jobStore.GetByKey(key)
if err != nil {
return statusError, err
}
if !exists {
// deleted federated job, nothing need to do
return statusAllOk, nil
}
// Create a copy before modifying the obj to prevent race condition with other readers of obj from store.
obj, err := api.Scheme.DeepCopy(objFromStore)
fjob, ok := obj.(*batchv1.Job)
if err != nil || !ok {
return statusError, err
}
// delete job
if fjob.DeletionTimestamp != nil {
if err := fjc.delete(fjob); err != nil {
fjc.eventRecorder.Eventf(fjob, api.EventTypeNormal, "DeleteFailed", "Job delete failed: %v", err)
return statusError, err
}
return statusAllOk, nil
}
glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for job: %s\n", key)
// Add the required finalizers before creating a job in underlying clusters.
updatedJobObj, err := fjc.deletionHelper.EnsureFinalizers(fjob)
if err != nil {
return statusError, err
}
fjob = updatedJobObj.(*batchv1.Job)
clusters, err := fjc.fedJobInformer.GetReadyClusters()
if err != nil {
return statusError, err
}
scheduleResult := fjc.schedule(fjob, clusters)
glog.V(3).Infof("Start syncing local job %s: %s\n", key, spew.Sprintf("%v", scheduleResult))
fedStatus := batchv1.JobStatus{}
var fedStatusFailedCondition *batchv1.JobCondition
var fedStatusCompleteCondition *batchv1.JobCondition
var operations []fedutil.FederatedOperation
for clusterName, result := range scheduleResult {
ljobObj, exists, err := fjc.fedJobInformer.GetTargetStore().GetByKey(clusterName, key)
if err != nil {
return statusError, err
}
ljob := &batchv1.Job{
ObjectMeta: fedutil.DeepCopyRelevantObjectMeta(fjob.ObjectMeta),
Spec: *fedutil.DeepCopyApiTypeOrPanic(&fjob.Spec).(*batchv1.JobSpec),
}
// use selector generated at federation level, or user specified value
manualSelector := true
ljob.Spec.ManualSelector = &manualSelector
ljob.Spec.Parallelism = result.Parallelism
ljob.Spec.Completions = result.Completions
if !exists {
if *ljob.Spec.Parallelism > 0 {
fjc.eventRecorder.Eventf(fjob, api.EventTypeNormal, "CreateInCluster", "Creating job in cluster %s", clusterName)
operations = append(operations, fedutil.FederatedOperation{
Type: fedutil.OperationTypeAdd,
Obj: ljob,
ClusterName: clusterName,
})
}
} else {
currentLjob := ljobObj.(*batchv1.Job)
// Update existing job, if needed.
if !fedutil.ObjectMetaAndSpecEquivalent(ljob, currentLjob) {
fjc.eventRecorder.Eventf(fjob, api.EventTypeNormal, "UpdateInCluster", "Updating job in cluster %s", clusterName)
operations = append(operations, fedutil.FederatedOperation{
Type: fedutil.OperationTypeUpdate,
Obj: ljob,
ClusterName: clusterName,
})
}
// collect local job status
for _, condition := range currentLjob.Status.Conditions {
if condition.Type == batchv1.JobComplete {
if fedStatusCompleteCondition == nil ||
fedStatusCompleteCondition.LastTransitionTime.Before(condition.LastTransitionTime) {
fedStatusCompleteCondition = &condition
}
} else if condition.Type == batchv1.JobFailed {
if fedStatusFailedCondition == nil ||
fedStatusFailedCondition.LastTransitionTime.Before(condition.LastTransitionTime) {
fedStatusFailedCondition = &condition
}
}
}
if currentLjob.Status.StartTime != nil {
if fedStatus.StartTime == nil || fedStatus.StartTime.After(currentLjob.Status.StartTime.Time) {
fedStatus.StartTime = currentLjob.Status.StartTime
}
}
if currentLjob.Status.CompletionTime != nil {
if fedStatus.CompletionTime == nil || fedStatus.CompletionTime.Before(*currentLjob.Status.CompletionTime) {
fedStatus.CompletionTime = currentLjob.Status.CompletionTime
}
}
fedStatus.Active += currentLjob.Status.Active
fedStatus.Succeeded += currentLjob.Status.Succeeded
fedStatus.Failed += currentLjob.Status.Failed
}
}
// federated job fails if any local job failes
if fedStatusFailedCondition != nil {
fedStatus.Conditions = append(fedStatus.Conditions, *fedStatusFailedCondition)
} else if fedStatusCompleteCondition != nil {
fedStatus.Conditions = append(fedStatus.Conditions, *fedStatusCompleteCondition)
}
if !reflect.DeepEqual(fedStatus, fjob.Status) {
fjob.Status = fedStatus
_, err = fjc.fedClient.BatchV1().Jobs(fjob.Namespace).UpdateStatus(fjob)
if err != nil {
return statusError, err
}
}
if len(operations) == 0 {
// Everything is in order
return statusAllOk, nil
}
if glog.V(4) {
for i, op := range operations {
job := op.Obj.(*batchv1.Job)
glog.V(4).Infof("operation[%d]: %s, %s/%s/%s, %d", i, op.Type, op.ClusterName, job.Namespace, job.Name, *job.Spec.Parallelism)
}
}
err = fjc.fedUpdater.Update(operations)
if err != nil {
return statusError, err
}
// Some operations were made, reconcile after a while.
return statusNeedRecheck, nil
}
func (fjc *FederationJobController) reconcileJobsOnClusterChange() {
if !fjc.isSynced() {
fjc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay)
}
jobs := fjc.jobStore.List()
for _, job := range jobs {
key, _ := controller.KeyFunc(job)
fjc.deliverJobByKey(key, 0, false)
}
}
// delete deletes the given job or returns error if the deletion was not complete.
func (fjc *FederationJobController) delete(job *batchv1.Job) error {
glog.V(3).Infof("Handling deletion of job: %s/%s\n", job.Namespace, job.Name)
_, err := fjc.deletionHelper.HandleObjectInUnderlyingClusters(job)
if err != nil {
return err
}
err = fjc.fedClient.BatchV1().Jobs(job.Namespace).Delete(job.Name, nil)
if err != nil {
// Its all good if the error is not found error. That means it is deleted already and we do not have to do anything.
// This is expected when we are processing an update as a result of job finalizer deletion.
// The process that deleted the last finalizer is also going to delete the job and we do not have to do anything.
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete job: %s/%s, %v", job.Namespace, job.Name, err)
}
}
return nil
}

View File

@ -0,0 +1,282 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"flag"
"fmt"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
kubeclientset "k8s.io/client-go/kubernetes"
kubeclientfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientfake "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/fake"
fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
finalizersutil "k8s.io/kubernetes/federation/pkg/federation-controller/util/finalizers"
testutil "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
batchv1internal "k8s.io/kubernetes/pkg/apis/batch/v1"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets"
"reflect"
"strings"
)
func installWatchReactor(fakeClien *core.Fake, resource string) chan runtime.Object {
objChan := make(chan runtime.Object, 100)
fakeWatch := watch.NewRaceFreeFake()
fakeClien.PrependWatchReactor(resource, core.DefaultWatchReactor(fakeWatch, nil))
fakeClien.PrependReactor("create", resource, func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.CreateAction).GetObject()
batchv1internal.SetDefaults_Job(obj.(*batchv1.Job))
fakeWatch.Add(obj)
objChan <- obj
return false, nil, nil
})
fakeClien.PrependReactor("update", resource, func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.UpdateAction).GetObject()
fakeWatch.Modify(obj)
objChan <- obj
return false, nil, nil
})
fakeClien.PrependReactor("delete", resource, func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: action.(core.DeleteAction).GetName(),
Namespace: action.GetNamespace(),
},
}
fakeWatch.Delete(obj)
objChan <- obj
return false, nil, nil
})
return objChan
}
func TestJobController(t *testing.T) {
flag.Set("logtostderr", "true")
flag.Set("v", "5")
flag.Parse()
jobReviewDelay = 50 * time.Millisecond
clusterAvailableDelay = 200 * time.Millisecond
clusterUnavailableDelay = 200 * time.Millisecond
fedclientset := fedclientfake.NewSimpleClientset()
fedChan := installWatchReactor(&fedclientset.Fake, "jobs")
fedclientset.Federation().Clusters().Create(testutil.NewCluster("k8s-1", apiv1.ConditionTrue))
fedclientset.Federation().Clusters().Create(testutil.NewCluster("k8s-2", apiv1.ConditionTrue))
kube1clientset := kubeclientfake.NewSimpleClientset()
kube1Chan := installWatchReactor(&kube1clientset.Fake, "jobs")
kube2clientset := kubeclientfake.NewSimpleClientset()
kube2Chan := installWatchReactor(&kube2clientset.Fake, "jobs")
fedInformerClientFactory := func(cluster *fedv1.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case "k8s-1":
return kube1clientset, nil
case "k8s-2":
return kube2clientset, nil
default:
return nil, fmt.Errorf("Unknown cluster: %v", cluster.Name)
}
}
jobController := NewJobController(fedclientset)
fedjobinformer := testutil.ToFederatedInformerForTestOnly(jobController.fedJobInformer)
fedjobinformer.SetClientFactory(fedInformerClientFactory)
stopChan := make(chan struct{})
defer close(stopChan)
go jobController.Run(5, stopChan)
test := func(job *batchv1.Job, parallelism1, parallelism2, completions1, completions2 int32) {
job, _ = fedclientset.Batch().Jobs(metav1.NamespaceDefault).Create(job)
joinErrors := func(errors []error) error {
if len(errors) == 0 {
return nil
}
errorStrings := []string{}
for _, err := range errors {
errorStrings = append(errorStrings, err.Error())
}
return fmt.Errorf("%s", strings.Join(errorStrings, "\n"))
}
// check local jobs are created with correct spec
checkLocalJob := func(parallelism, completions int32) testutil.CheckingFunction {
return func(obj runtime.Object) error {
errors := []error{}
ljob := obj.(*batchv1.Job)
if !fedutil.ObjectMetaEquivalent(job.ObjectMeta, ljob.ObjectMeta) {
errors = append(errors, fmt.Errorf("Job meta un-equivalent: %#v (expected) != %#v (actual)", job.ObjectMeta, ljob.ObjectMeta))
}
if err := checkEqual(t, *ljob.Spec.Parallelism, parallelism, "Spec.Parallelism"); err != nil {
errors = append(errors, err)
}
if ljob.Spec.Completions != nil {
if err := checkEqual(t, *ljob.Spec.Completions, completions, "Spec.Completions"); err != nil {
errors = append(errors, err)
}
}
return joinErrors(errors)
}
}
checkFedJob := func(obj runtime.Object) error {
errors := []error{}
return joinErrors(errors)
}
assert.NoError(t, testutil.CheckObjectFromChan(kube1Chan, checkLocalJob(parallelism1, completions1)))
assert.NoError(t, testutil.CheckObjectFromChan(kube2Chan, checkLocalJob(parallelism2, completions2)))
assert.NoError(t, testutil.CheckObjectFromChan(fedChan, checkFedJob))
// finish local jobs
job1, _ := kube1clientset.Batch().Jobs(metav1.NamespaceDefault).Get(job.Name, metav1.GetOptions{})
finishJob(job1, 100*time.Millisecond)
job1, _ = kube1clientset.Batch().Jobs(metav1.NamespaceDefault).UpdateStatus(job1)
job2, _ := kube2clientset.Batch().Jobs(metav1.NamespaceDefault).Get(job.Name, metav1.GetOptions{})
finishJob(job2, 100*time.Millisecond)
job2, _ = kube2clientset.Batch().Jobs(metav1.NamespaceDefault).UpdateStatus(job2)
// check fed job status updated
assert.NoError(t, testutil.CheckObjectFromChan(fedChan, func(obj runtime.Object) error {
errors := []error{}
job := obj.(*batchv1.Job)
if err := checkEqual(t, *job.Spec.Parallelism, *job1.Spec.Parallelism+*job2.Spec.Parallelism, "Spec.Parallelism"); err != nil {
errors = append(errors, err)
}
if job.Spec.Completions != nil {
if err := checkEqual(t, *job.Spec.Completions, *job1.Spec.Completions+*job2.Spec.Completions, "Spec.Completions"); err != nil {
errors = append(errors, err)
}
}
if err := checkEqual(t, job.Status.Succeeded, job1.Status.Succeeded+job2.Status.Succeeded, "Status.Succeeded"); err != nil {
errors = append(errors, err)
}
return joinErrors(errors)
}))
// delete fed job by set deletion time, and remove orphan finalizer
job, _ = fedclientset.Batch().Jobs(metav1.NamespaceDefault).Get(job.Name, metav1.GetOptions{})
deletionTimestamp := metav1.Now()
job.DeletionTimestamp = &deletionTimestamp
finalizersutil.RemoveFinalizers(job, sets.NewString(metav1.FinalizerOrphanDependents))
fedclientset.Batch().Jobs(metav1.NamespaceDefault).Update(job)
// check jobs are deleted
checkDeleted := func(obj runtime.Object) error {
djob := obj.(*batchv1.Job)
deletedJob := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: djob.Name,
Namespace: djob.Namespace,
},
}
if !reflect.DeepEqual(djob, deletedJob) {
return fmt.Errorf("%s/%s should be deleted", djob.Namespace, djob.Name)
}
return nil
}
assert.NoError(t, testutil.CheckObjectFromChan(kube1Chan, checkDeleted))
assert.NoError(t, testutil.CheckObjectFromChan(kube2Chan, checkDeleted))
assert.NoError(t, testutil.CheckObjectFromChan(fedChan, checkDeleted))
}
test(newJob("job1", 2, 7), 1, 1, 4, 3)
test(newJob("job2", 2, -1), 1, 1, -1, -1)
test(newJob("job3", 7, 2), 4, 3, 1, 1)
test(newJob("job4", 7, 1), 4, 3, 1, 0)
}
func checkEqual(_ *testing.T, expected, actual interface{}, msg string) error {
if !assert.ObjectsAreEqual(expected, actual) {
return fmt.Errorf("%s not equal: %#v (expected) != %#v (actual)", msg, expected, actual)
}
return nil
}
func newJob(name string, parallelism int32, completions int32) *batchv1.Job {
job := batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/jobs/name",
},
Spec: batchv1.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
Template: apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": name,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{Image: "foo/bar"},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
},
},
}
if parallelism < 0 {
job.Spec.Parallelism = nil
}
if completions < 0 {
job.Spec.Completions = nil
}
batchv1internal.SetDefaults_Job(&job)
return &job
}
func newCondition(conditionType batchv1.JobConditionType, reason, message string) batchv1.JobCondition {
return batchv1.JobCondition{
Type: conditionType,
Status: apiv1.ConditionTrue,
LastProbeTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
func finishJob(job *batchv1.Job, duration time.Duration) {
job.Status.Conditions = append(job.Status.Conditions, newCondition(batchv1.JobComplete, "", ""))
if job.Spec.Completions == nil {
job.Status.Succeeded = 1
} else {
job.Status.Succeeded = *job.Spec.Completions
}
now := metav1.Now()
job.Status.StartTime = &now
time.Sleep(duration)
now = metav1.Now()
job.Status.CompletionTime = &now
}

View File

@ -490,8 +490,7 @@ func syncToClusters(clustersAccessor clustersAccessorFunc, operationsAccessor op
if !ok {
glog.Fatalf("Adapter for kind %q does not properly implement SchedulingAdapter.", kind)
}
typedScheduleInfo := schedulingInfo.(*federatedtypes.ReplicaSchedulingInfo)
err = schedulingAdapter.UpdateFederatedStatus(obj, typedScheduleInfo.Status)
err = schedulingAdapter.UpdateFederatedStatus(obj, schedulingInfo)
if err != nil {
runtime.HandleError(fmt.Errorf("adapter.UpdateFinished() failed on adapter for %s %q: %v", kind, key, err))
return statusError
@ -548,7 +547,7 @@ func clusterOperations(adapter federatedtypes.FederatedTypeAdapter, selectedClus
return nil, wrappedErr
}
shouldCreateIfNeeded := true
var scheduleAction federatedtypes.ScheduleAction = federatedtypes.ActionAdd
if adapter.IsSchedulingAdapter() {
schedulingAdapter, ok := adapter.(federatedtypes.SchedulingAdapter)
if !ok {
@ -559,7 +558,7 @@ func clusterOperations(adapter federatedtypes.FederatedTypeAdapter, selectedClus
if clusterObj != nil {
clusterTypedObj = clusterObj.(pkgruntime.Object)
}
desiredObj, shouldCreateIfNeeded, err = schedulingAdapter.ScheduleObject(cluster, clusterTypedObj, desiredObj, schedulingInfo)
desiredObj, scheduleAction, err = schedulingAdapter.ScheduleObject(cluster, clusterTypedObj, desiredObj, schedulingInfo)
if err != nil {
runtime.HandleError(err)
return nil, err
@ -568,11 +567,15 @@ func clusterOperations(adapter federatedtypes.FederatedTypeAdapter, selectedClus
var operationType util.FederatedOperationType = ""
if found {
clusterObj := clusterObj.(pkgruntime.Object)
if !adapter.Equivalent(desiredObj, clusterObj) {
operationType = util.OperationTypeUpdate
if scheduleAction == federatedtypes.ActionDelete {
operationType = util.OperationTypeDelete
} else {
clusterObj := clusterObj.(pkgruntime.Object)
if !adapter.Equivalent(desiredObj, clusterObj) {
operationType = util.OperationTypeUpdate
}
}
} else if shouldCreateIfNeeded {
} else if scheduleAction == federatedtypes.ActionAdd {
operationType = util.OperationTypeAdd
}

View File

@ -71,6 +71,7 @@ func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkgruntime.Object)) *cache.
oldMeta := getFieldOrPanic(old, "ObjectMeta").(metav1.ObjectMeta)
curMeta := getFieldOrPanic(cur, "ObjectMeta").(metav1.ObjectMeta)
if !ObjectMetaEquivalent(oldMeta, curMeta) ||
!reflect.DeepEqual(oldMeta.DeletionTimestamp, curMeta.DeletionTimestamp) ||
!reflect.DeepEqual(getFieldOrPanic(old, "Spec"), getFieldOrPanic(cur, "Spec")) {
triggerFunc(curObj)
}

View File

@ -446,3 +446,9 @@ func AssertHasFinalizer(t *testing.T, obj runtime.Object, finalizer string) {
require.Nil(t, err)
assert.True(t, hasFinalizer)
}
func NewInt32(val int32) *int32 {
p := new(int32)
*p = val
return p
}

View File

@ -182,7 +182,6 @@ pkg/controller/job
pkg/controller/namespace
pkg/controller/namespace/deletion
pkg/controller/node
pkg/controller/node/testutil
pkg/controller/podautoscaler
pkg/controller/podautoscaler/metrics
pkg/controller/podgc
@ -309,12 +308,9 @@ pkg/proxy/util
pkg/proxy/winuserspace
pkg/quota/evaluator/core
pkg/quota/generic
pkg/registry/admissionregistration/externaladmissionhookconfiguration
pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage
pkg/registry/admissionregistration/initializerconfiguration
pkg/registry/admissionregistration/initializerconfiguration/storage
pkg/registry/admissionregistration/rest
pkg/registry/apps/controllerrevision
pkg/registry/apps/rest
pkg/registry/apps/statefulset
pkg/registry/apps/statefulset/storage
@ -337,11 +333,9 @@ pkg/registry/certificates/certificates
pkg/registry/certificates/certificates/storage
pkg/registry/certificates/rest
pkg/registry/core/componentstatus
pkg/registry/core/configmap
pkg/registry/core/endpoint/storage
pkg/registry/core/event
pkg/registry/core/event/storage
pkg/registry/core/limitrange
pkg/registry/core/limitrange/storage
pkg/registry/core/namespace
pkg/registry/core/namespace/storage
@ -353,7 +347,6 @@ pkg/registry/core/persistentvolumeclaim
pkg/registry/core/persistentvolumeclaim/storage
pkg/registry/core/pod
pkg/registry/core/pod/rest
pkg/registry/core/podtemplate
pkg/registry/core/podtemplate/storage
pkg/registry/core/replicationcontroller
pkg/registry/core/replicationcontroller/storage
@ -377,13 +370,10 @@ pkg/registry/extensions/deployment
pkg/registry/extensions/deployment/storage
pkg/registry/extensions/ingress
pkg/registry/extensions/ingress/storage
pkg/registry/extensions/networkpolicy
pkg/registry/extensions/networkpolicy/storage
pkg/registry/extensions/podsecuritypolicy
pkg/registry/extensions/replicaset
pkg/registry/extensions/replicaset/storage
pkg/registry/extensions/rest
pkg/registry/networking/networkpolicy
pkg/registry/networking/networkpolicy/storage
pkg/registry/networking/rest
pkg/registry/policy/poddisruptionbudget
@ -404,7 +394,6 @@ pkg/registry/rbac/validation
pkg/registry/registrytest
pkg/registry/scheduling/priorityclass/storage
pkg/registry/scheduling/rest
pkg/registry/settings/podpreset
pkg/registry/settings/podpreset/storage
pkg/registry/settings/rest
pkg/registry/storage/rest

View File

@ -119,6 +119,7 @@ cni-conf-dir
concurrent-deployment-syncs
concurrent-endpoint-syncs
concurrent-gc-syncs
concurrent-job-syncs
concurrent-namespace-syncs
concurrent-replicaset-syncs
concurrent-resource-quota-syncs
@ -425,6 +426,7 @@ kube-reserved
kube-reserved-cgroup
kube-master-url
kube-reserved
kubemark-external-kubeconfig
kubernetes-anywhere-cluster
kubernetes-anywhere-path
kubernetes-anywhere-phase2-provider

View File

@ -1440,7 +1440,7 @@ type SecretKeySelector struct {
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// An optional identifier to prepend to each key in the ConfigMap.
// +optional
Prefix string
// The ConfigMap to select from.

View File

@ -1548,7 +1548,7 @@ func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList {
if len(ev.Name) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
} else {
for _, msg := range validation.IsCIdentifier(ev.Name) {
for _, msg := range validation.IsEnvVarName(ev.Name) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
}
}
@ -1637,7 +1637,7 @@ func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorL
for i, ev := range vars {
idxPath := fldPath.Index(i)
if len(ev.Prefix) > 0 {
for _, msg := range validation.IsCIdentifier(ev.Prefix) {
for _, msg := range validation.IsEnvVarName(ev.Prefix) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg))
}
}

View File

@ -43,7 +43,7 @@ const (
maxLengthErrMsg = "must be no more than"
namePartErrMsg = "name part must consist of"
nameErrMsg = "a qualified name must consist of"
idErrMsg = "a valid C identifier must"
envVarNameErrMsg = "a valid environment variable name must consist of"
)
func testVolume(name string, namespace string, spec api.PersistentVolumeSpec) *api.PersistentVolume {
@ -2575,6 +2575,8 @@ func TestValidateEnv(t *testing.T) {
{Name: "ABC", Value: "value"},
{Name: "AbC_123", Value: "value"},
{Name: "abc", Value: ""},
{Name: "a.b.c", Value: "value"},
{Name: "a-b-c", Value: "value"},
{
Name: "abc",
ValueFrom: &api.EnvVarSource{
@ -2676,9 +2678,24 @@ func TestValidateEnv(t *testing.T) {
expectedError: "[0].name: Required value",
},
{
name: "name not a C identifier",
envs: []api.EnvVar{{Name: "a.b.c"}},
expectedError: `[0].name: Invalid value: "a.b.c": ` + idErrMsg,
name: "illegal character",
envs: []api.EnvVar{{Name: "a!b"}},
expectedError: `[0].name: Invalid value: "a!b": ` + envVarNameErrMsg,
},
{
name: "dot only",
envs: []api.EnvVar{{Name: "."}},
expectedError: `[0].name: Invalid value: ".": must not be`,
},
{
name: "double dots only",
envs: []api.EnvVar{{Name: ".."}},
expectedError: `[0].name: Invalid value: "..": must not be`,
},
{
name: "leading double dots",
envs: []api.EnvVar{{Name: "..abc"}},
expectedError: `[0].name: Invalid value: "..abc": must not start with`,
},
{
name: "value and valueFrom specified",
@ -2897,6 +2914,12 @@ func TestValidateEnvFrom(t *testing.T) {
LocalObjectReference: api.LocalObjectReference{Name: "abc"},
},
},
{
Prefix: "a.b",
ConfigMapRef: &api.ConfigMapEnvSource{
LocalObjectReference: api.LocalObjectReference{Name: "abc"},
},
},
{
SecretRef: &api.SecretEnvSource{
LocalObjectReference: api.LocalObjectReference{Name: "abc"},
@ -2908,6 +2931,12 @@ func TestValidateEnvFrom(t *testing.T) {
LocalObjectReference: api.LocalObjectReference{Name: "abc"},
},
},
{
Prefix: "a.b",
SecretRef: &api.SecretEnvSource{
LocalObjectReference: api.LocalObjectReference{Name: "abc"},
},
},
}
if errs := ValidateEnvFrom(successCase, field.NewPath("field")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
@ -2942,12 +2971,12 @@ func TestValidateEnvFrom(t *testing.T) {
name: "invalid prefix",
envs: []api.EnvFromSource{
{
Prefix: "a.b",
Prefix: "a!b",
ConfigMapRef: &api.ConfigMapEnvSource{
LocalObjectReference: api.LocalObjectReference{Name: "abc"}},
},
},
expectedError: `field[0].prefix: Invalid value: "a.b": ` + idErrMsg,
expectedError: `field[0].prefix: Invalid value: "a!b": ` + envVarNameErrMsg,
},
{
name: "zero-length name",
@ -2973,12 +3002,12 @@ func TestValidateEnvFrom(t *testing.T) {
name: "invalid prefix",
envs: []api.EnvFromSource{
{
Prefix: "a.b",
Prefix: "a!b",
SecretRef: &api.SecretEnvSource{
LocalObjectReference: api.LocalObjectReference{Name: "abc"}},
},
},
expectedError: `field[0].prefix: Invalid value: "a.b": ` + idErrMsg,
expectedError: `field[0].prefix: Invalid value: "a!b": ` + envVarNameErrMsg,
},
{
name: "no refs",
@ -3374,7 +3403,7 @@ func TestValidateContainers(t *testing.T) {
ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"},
},
"invalid env var name": {
{Name: "abc", Image: "image", Env: []api.EnvVar{{Name: "ev.1"}}, ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"},
{Name: "abc", Image: "image", Env: []api.EnvVar{{Name: "ev!1"}}, ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"},
},
"unknown volume name": {
{Name: "abc", Image: "image", VolumeMounts: []api.VolumeMount{{Name: "anything", MountPath: "/foo"}},

View File

@ -15,6 +15,7 @@ limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions
// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/apps/v1beta1
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta1

View File

@ -22,13 +22,14 @@ package v1beta1
import (
v1beta1 "k8s.io/api/apps/v1beta1"
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps"
extensions "k8s.io/kubernetes/pkg/apis/extensions"
unsafe "unsafe"
)
@ -44,8 +45,32 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_apps_ControllerRevision_To_v1beta1_ControllerRevision,
Convert_v1beta1_ControllerRevisionList_To_apps_ControllerRevisionList,
Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList,
Convert_v1beta1_Deployment_To_extensions_Deployment,
Convert_extensions_Deployment_To_v1beta1_Deployment,
Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition,
Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition,
Convert_v1beta1_DeploymentList_To_extensions_DeploymentList,
Convert_extensions_DeploymentList_To_v1beta1_DeploymentList,
Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback,
Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback,
Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec,
Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec,
Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus,
Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus,
Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy,
Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy,
Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig,
Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig,
Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy,
Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy,
Convert_v1beta1_Scale_To_extensions_Scale,
Convert_extensions_Scale_To_v1beta1_Scale,
Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec,
Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec,
Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus,
Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus,
Convert_v1beta1_StatefulSet_To_apps_StatefulSet,
Convert_apps_StatefulSet_To_v1beta1_StatefulSet,
Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList,
@ -129,8 +154,272 @@ func Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in *a
return autoConvert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in, out, s)
}
func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Deployment_To_extensions_Deployment is an autogenerated conversion function.
func Convert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error {
return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s)
}
func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_extensions_Deployment_To_v1beta1_Deployment is an autogenerated conversion function.
func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error {
return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s)
}
func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error {
out.Type = extensions.DeploymentConditionType(in.Type)
out.Status = api.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function.
func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s)
}
func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error {
out.Type = v1beta1.DeploymentConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function.
func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error {
return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s)
}
func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensions.Deployment, len(*in))
for i := range *in {
if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function.
func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s)
}
func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]v1beta1.Deployment, len(*in))
for i := range *in {
if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = make([]v1beta1.Deployment, 0)
}
return nil
}
// Convert_extensions_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function.
func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error {
return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s)
}
func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function.
func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s)
}
func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function.
func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error {
return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s)
}
func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error {
if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error {
if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*v1beta1.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function.
func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s)
}
func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.Conditions = *(*[]v1beta1.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function.
func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error {
return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s)
}
func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error {
out.Type = extensions.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(extensions.RollingUpdateDeployment)
if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error {
out.Type = v1beta1.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(v1beta1.RollingUpdateDeployment)
if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function.
func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s)
}
func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function.
func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error {
return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s)
}
func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
}
func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
}
func autoConvert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1beta1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := v1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil {
if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
return nil
@ -142,7 +431,7 @@ func Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateState
}
func autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *v1beta1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := v1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil {
if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
return nil
@ -153,6 +442,71 @@ func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateState
return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_v1beta1_Scale_To_extensions_Scale(in *v1beta1.Scale, out *extensions.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Scale_To_extensions_Scale is an autogenerated conversion function.
func Convert_v1beta1_Scale_To_extensions_Scale(in *v1beta1.Scale, out *extensions.Scale, s conversion.Scope) error {
return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s)
}
func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *v1beta1.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_extensions_Scale_To_v1beta1_Scale is an autogenerated conversion function.
func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *v1beta1.Scale, s conversion.Scope) error {
return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s)
}
func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *v1beta1.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec is an autogenerated conversion function.
func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *v1beta1.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s)
}
func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function.
func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error {
return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s)
}
func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *v1beta1.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector)
// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string)
return nil
}
func autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in *v1beta1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
@ -228,10 +582,10 @@ func Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSe
}
func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
@ -246,14 +600,14 @@ func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.Sta
}
func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta1.StatefulSetSpec, s conversion.Scope) error {
if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]core_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.VolumeClaimTemplates = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = v1beta1.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {

View File

@ -15,6 +15,7 @@ limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions
// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/apps/v1beta2
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta2

View File

@ -29,6 +29,7 @@ import (
api "k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps"
extensions "k8s.io/kubernetes/pkg/apis/extensions"
unsafe "unsafe"
)
@ -40,8 +41,54 @@ func init() {
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_v1beta2_DaemonSet_To_extensions_DaemonSet,
Convert_extensions_DaemonSet_To_v1beta2_DaemonSet,
Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList,
Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList,
Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec,
Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec,
Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus,
Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus,
Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy,
Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy,
Convert_v1beta2_Deployment_To_extensions_Deployment,
Convert_extensions_Deployment_To_v1beta2_Deployment,
Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition,
Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition,
Convert_v1beta2_DeploymentList_To_extensions_DeploymentList,
Convert_extensions_DeploymentList_To_v1beta2_DeploymentList,
Convert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback,
Convert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback,
Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec,
Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec,
Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus,
Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus,
Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy,
Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy,
Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet,
Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet,
Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition,
Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition,
Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList,
Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList,
Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec,
Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec,
Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus,
Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus,
Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig,
Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig,
Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet,
Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet,
Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment,
Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy,
Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy,
Convert_v1beta2_Scale_To_extensions_Scale,
Convert_extensions_Scale_To_v1beta2_Scale,
Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec,
Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec,
Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus,
Convert_extensions_ScaleStatus_To_v1beta2_ScaleStatus,
Convert_v1beta2_StatefulSet_To_apps_StatefulSet,
Convert_apps_StatefulSet_To_v1beta2_StatefulSet,
Convert_v1beta2_StatefulSetList_To_apps_StatefulSetList,
@ -55,6 +102,622 @@ func RegisterConversions(scheme *runtime.Scheme) error {
)
}
func autoConvert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *v1beta2.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DaemonSet_To_extensions_DaemonSet is an autogenerated conversion function.
func Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *v1beta2.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSet_To_extensions_DaemonSet(in, out, s)
}
func autoConvert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, out *v1beta2.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_extensions_DaemonSet_To_v1beta2_DaemonSet is an autogenerated conversion function.
func Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, out *v1beta2.DaemonSet, s conversion.Scope) error {
return autoConvert_extensions_DaemonSet_To_v1beta2_DaemonSet(in, out, s)
}
func autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensions.DaemonSet, len(*in))
for i := range *in {
if err := Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in, out, s)
}
func autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]v1beta2.DaemonSet, len(*in))
for i := range *in {
if err := Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = make([]v1beta2.DaemonSet, 0)
}
return nil
}
// Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList is an autogenerated conversion function.
func Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error {
return autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in, out, s)
}
func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.TemplateGeneration = in.TemplateGeneration
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
// Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s)
}
func autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.TemplateGeneration = in.TemplateGeneration
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
// Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec is an autogenerated conversion function.
func Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error {
return autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in, out, s)
}
func autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s)
}
func autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus is an autogenerated conversion function.
func Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in, out, s)
}
func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = extensions.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(extensions.RollingUpdateDaemonSet)
if err := Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = v1beta2.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(v1beta2.RollingUpdateDaemonSet)
if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_v1beta2_Deployment_To_extensions_Deployment(in *v1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_Deployment_To_extensions_Deployment is an autogenerated conversion function.
func Convert_v1beta2_Deployment_To_extensions_Deployment(in *v1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error {
return autoConvert_v1beta2_Deployment_To_extensions_Deployment(in, out, s)
}
func autoConvert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *v1beta2.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_extensions_Deployment_To_v1beta2_Deployment is an autogenerated conversion function.
func Convert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *v1beta2.Deployment, s conversion.Scope) error {
return autoConvert_extensions_Deployment_To_v1beta2_Deployment(in, out, s)
}
func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error {
out.Type = extensions.DeploymentConditionType(in.Type)
out.Status = api.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function.
func Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s)
}
func autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error {
out.Type = v1beta2.DeploymentConditionType(in.Type)
out.Status = core_v1.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition is an autogenerated conversion function.
func Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error {
return autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in, out, s)
}
func autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensions.Deployment, len(*in))
for i := range *in {
if err := Convert_v1beta2_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function.
func Convert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in, out, s)
}
func autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]v1beta2.Deployment, len(*in))
for i := range *in {
if err := Convert_extensions_Deployment_To_v1beta2_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = make([]v1beta2.Deployment, 0)
}
return nil
}
// Convert_extensions_DeploymentList_To_v1beta2_DeploymentList is an autogenerated conversion function.
func Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error {
return autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in, out, s)
}
func autoConvert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta2.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function.
func Convert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta2.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s)
}
func autoConvert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta2.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback is an autogenerated conversion function.
func Convert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta2.DeploymentRollback, s conversion.Scope) error {
return autoConvert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback(in, out, s)
}
func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error {
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta2.DeploymentSpec, s conversion.Scope) error {
if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*v1beta2.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
func autoConvert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta2.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function.
func Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta2.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s)
}
func autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.Conditions = *(*[]v1beta2.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus is an autogenerated conversion function.
func Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error {
return autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in, out, s)
}
func autoConvert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta2.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error {
out.Type = extensions.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(extensions.RollingUpdateDeployment)
if err := Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func autoConvert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error {
out.Type = v1beta2.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(v1beta2.RollingUpdateDeployment)
if err := Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func autoConvert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in *v1beta2.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in *v1beta2.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in, out, s)
}
func autoConvert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet is an autogenerated conversion function.
func Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error {
return autoConvert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error {
out.Type = extensions.ReplicaSetConditionType(in.Type)
out.Status = api.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s)
}
func autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error {
out.Type = v1beta2.ReplicaSetConditionType(in.Type)
out.Status = core_v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition is an autogenerated conversion function.
func Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensions.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s)
}
func autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]v1beta2.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = make([]v1beta2.ReplicaSet, 0)
}
return nil
}
// Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList is an autogenerated conversion function.
func Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error {
return autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error {
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
func autoConvert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error {
if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s)
}
func autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]v1beta2.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus is an autogenerated conversion function.
func Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s)
}
func autoConvert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(in *v1beta2.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function.
func Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(in *v1beta2.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
return autoConvert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(in, out, s)
}
func autoConvert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(in *extensions.RollbackConfig, out *v1beta2.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig is an autogenerated conversion function.
func Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(in *extensions.RollbackConfig, out *v1beta2.RollbackConfig, s conversion.Scope) error {
return autoConvert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(in, out, s)
}
func autoConvert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
}
func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
}
func autoConvert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
}
func autoConvert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
}
func autoConvert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1beta2.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := v1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil {
return err
@ -79,6 +742,71 @@ func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateState
return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_v1beta2_Scale_To_extensions_Scale(in *v1beta2.Scale, out *extensions.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_Scale_To_extensions_Scale is an autogenerated conversion function.
func Convert_v1beta2_Scale_To_extensions_Scale(in *v1beta2.Scale, out *extensions.Scale, s conversion.Scope) error {
return autoConvert_v1beta2_Scale_To_extensions_Scale(in, out, s)
}
func autoConvert_extensions_Scale_To_v1beta2_Scale(in *extensions.Scale, out *v1beta2.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_extensions_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_extensions_Scale_To_v1beta2_Scale is an autogenerated conversion function.
func Convert_extensions_Scale_To_v1beta2_Scale(in *extensions.Scale, out *v1beta2.Scale, s conversion.Scope) error {
return autoConvert_extensions_Scale_To_v1beta2_Scale(in, out, s)
}
func autoConvert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(in *v1beta2.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec is an autogenerated conversion function.
func Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(in *v1beta2.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error {
return autoConvert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(in, out, s)
}
func autoConvert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(in *extensions.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function.
func Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(in *extensions.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error {
return autoConvert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s)
}
func autoConvert_v1beta2_ScaleStatus_To_extensions_ScaleStatus(in *v1beta2.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector)
// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_extensions_ScaleStatus_To_v1beta2_ScaleStatus(in *extensions.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string)
return nil
}
func autoConvert_v1beta2_StatefulSet_To_apps_StatefulSet(in *v1beta2.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {

View File

@ -28,7 +28,7 @@ func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) {
Region: os.region,
})
if err != nil {
glog.Warningf("Failed to find network v2 endpoint: %v", err)
glog.Warningf("Failed to find network v2 endpoint for region %s: %v", os.region, err)
return nil, err
}
return network, nil
@ -39,7 +39,7 @@ func (os *OpenStack) NewComputeV2() (*gophercloud.ServiceClient, error) {
Region: os.region,
})
if err != nil {
glog.Warningf("Failed to find compute v2 endpoint: %v", err)
glog.Warningf("Failed to find compute v2 endpoint for region %s: %v", os.region, err)
return nil, err
}
return compute, nil
@ -50,7 +50,7 @@ func (os *OpenStack) NewBlockStorageV1() (*gophercloud.ServiceClient, error) {
Region: os.region,
})
if err != nil {
glog.Errorf("Unable to initialize cinder v1 client for region: %s", os.region)
glog.Errorf("Unable to initialize cinder v1 client for region %s: %v", os.region, err)
return nil, err
}
return storage, nil
@ -61,7 +61,7 @@ func (os *OpenStack) NewBlockStorageV2() (*gophercloud.ServiceClient, error) {
Region: os.region,
})
if err != nil {
glog.Errorf("Unable to initialize cinder v2 client for region: %s", os.region)
glog.Errorf("Unable to initialize cinder v2 client for region %s: %v", os.region, err)
return nil, err
}
return storage, nil

View File

@ -215,11 +215,7 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
if err != nil {
return "", err
}
if volume.Status != VolumeAvailableStatus {
errmsg := fmt.Sprintf("volume %s status is %s, not %s, can not be attached to instance %s.", volume.Name, volume.Status, VolumeAvailableStatus, instanceID)
glog.Errorf(errmsg)
return "", errors.New(errmsg)
}
cClient, err := os.NewComputeV2()
if err != nil {
return "", err
@ -230,11 +226,9 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
return volume.ID, nil
}
glog.V(2).Infof("Disk %s is attached to a different instance (%s), detaching", volumeID, volume.AttachedServerId)
err = os.DetachDisk(volume.AttachedServerId, volumeID)
if err != nil {
return "", err
}
errmsg := fmt.Sprintf("Disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId)
glog.V(2).Infof(errmsg)
return "", errors.New(errmsg)
}
startTime := time.Now()
@ -258,6 +252,12 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error {
if err != nil {
return err
}
if volume.Status == VolumeAvailableStatus {
// "available" is fine since that means the volume is detached from instance already.
glog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID)
return nil
}
if volume.Status != VolumeInUseStatus {
errmsg := fmt.Sprintf("can not detach volume %s, its status is %s.", volume.Name, volume.Status)
glog.Errorf(errmsg)

View File

@ -18,7 +18,9 @@ go_test(
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/install:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/securitycontext:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
@ -32,6 +34,7 @@ go_test(
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
@ -125,6 +128,7 @@ filegroup(
"//pkg/controller/service:all-srcs",
"//pkg/controller/serviceaccount:all-srcs",
"//pkg/controller/statefulset:all-srcs",
"//pkg/controller/testutil:all-srcs",
"//pkg/controller/ttl:all-srcs",
"//pkg/controller/volume/attachdetach:all-srcs",
"//pkg/controller/volume/events:all-srcs",

View File

@ -43,7 +43,7 @@ go_test(
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/node/testutil:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",

View File

@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/controller/testutil"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
)

View File

@ -885,50 +885,11 @@ func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint *v1.Taint) error {
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
newNode, ok, err := taintutils.AddOrUpdateTaint(oldNode, taint)
if err != nil {
return fmt.Errorf("Failed to update taint annotation!")
}
if !ok {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint, node *v1.Node) error {
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for i := range node.Spec.Taints {
if node.Spec.Taints[i].MatchTaint(taint) {
match = true
break
}
}
if !match {
return nil
}
// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
@ -945,11 +906,77 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint,
if err != nil {
return err
}
newNode, ok, err := taintutils.RemoveTaint(oldNode, taint)
if err != nil {
return fmt.Errorf("Failed to update taint annotation!")
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := taintutils.AddOrUpdateTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("Failed to update taint of node!")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !ok {
if !updated {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for _, taint := range taints {
if taintutils.TaintExists(node.Spec.Taints, taint) {
match = true
break
}
}
if !match {
return nil
}
}
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := taintutils.RemoveTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("Failed to remove taint of node!")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)

View File

@ -37,12 +37,15 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/install"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/controller/testutil"
"k8s.io/kubernetes/pkg/securitycontext"
)
@ -479,3 +482,358 @@ func TestComputeHash(t *testing.T) {
}
}
}
func TestRemoveTaintOffNode(t *testing.T) {
tests := []struct {
name string
nodeHandler *testutil.FakeNodeHandler
nodeName string
taintsToRemove []*v1.Taint
expectedTaints []v1.Taint
requestCount int
}{
{
name: "remove one taint from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
},
requestCount: 4,
},
{
name: "remove multiple taints from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
requestCount: 4,
},
{
name: "remove no-exist taints from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 2,
},
{
name: "remove taint from node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: nil,
requestCount: 2,
},
{
name: "remove empty taint list from node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{},
expectedTaints: nil,
requestCount: 2,
},
{
name: "remove empty taint list from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 2,
},
}
for _, test := range tests {
node, _ := test.nodeHandler.Get(test.nodeName, metav1.GetOptions{})
if err := RemoveTaintOffNode(test.nodeHandler, test.nodeName, node, test.taintsToRemove...); err != nil {
t.Errorf("%s: RemoveTaintOffNode() error = %v", test.name, err)
}
node, _ = test.nodeHandler.Get(test.nodeName, metav1.GetOptions{})
if !reflect.DeepEqual(node.Spec.Taints, test.expectedTaints) {
t.Errorf("%s: failed to remove taint off node: expected %+v, got %+v",
test.name, test.expectedTaints, node.Spec.Taints)
}
if test.nodeHandler.RequestCount != test.requestCount {
t.Errorf("%s: unexpected request count: expected %+v, got %+v",
test.name, test.requestCount, test.nodeHandler.RequestCount)
}
}
}
func TestAddOrUpdateTaintOnNode(t *testing.T) {
tests := []struct {
name string
nodeHandler *testutil.FakeNodeHandler
nodeName string
taintsToAdd []*v1.Taint
expectedTaints []v1.Taint
requestCount int
}{
{
name: "add one taint on node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 3,
},
{
name: "add multiple taints to node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
requestCount: 3,
},
{
name: "add exist taints to node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 3,
},
{
name: "add taint to node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: []v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
requestCount: 3,
},
{
name: "add empty taint list to node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{},
expectedTaints: nil,
requestCount: 1,
},
{
name: "add empty taint list to node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 1,
},
}
for _, test := range tests {
if err := AddOrUpdateTaintOnNode(test.nodeHandler, test.nodeName, test.taintsToAdd...); err != nil {
t.Errorf("%s: AddOrUpdateTaintOnNode() error = %v", test.name, err)
}
node, _ := test.nodeHandler.Get(test.nodeName, metav1.GetOptions{})
if !reflect.DeepEqual(node.Spec.Taints, test.expectedTaints) {
t.Errorf("%s: failed to add taint to node: expected %+v, got %+v",
test.name, test.expectedTaints, node.Spec.Taints)
}
if test.nodeHandler.RequestCount != test.requestCount {
t.Errorf("%s: unexpected request count: expected %+v, got %+v",
test.name, test.requestCount, test.nodeHandler.RequestCount)
}
}
}

View File

@ -1514,7 +1514,6 @@ func TestUpdateNode(t *testing.T) {
{Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse},
{Type: v1.NodeDiskPressure, Status: v1.ConditionFalse},
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse},
{Type: v1.NodeInodePressure, Status: v1.ConditionFalse},
}
return node
}(),
@ -1522,7 +1521,6 @@ func TestUpdateNode(t *testing.T) {
node := newNode("node1", nil)
node.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue},
{Type: v1.NodeInodePressure, Status: v1.ConditionFalse},
}
return node
}(),

View File

@ -328,12 +328,6 @@ func (e *EndpointController) syncService(key string) error {
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err))
// Don't retry, as the key isn't going to magically become understandable.
return nil
}
err = e.client.Core().Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
return err

View File

@ -24,7 +24,7 @@ go_test(
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/node/testutil:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/taints:go_default_library",
@ -115,9 +115,6 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/node/testutil:all-srcs",
],
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/controller/testutil"
)
const (

View File

@ -303,7 +303,7 @@ func swapNodeControllerTaint(kubeClient clientset.Interface, taintToAdd, taintTo
}
glog.V(4).Infof("Added %v Taint to Node %v", taintToAdd, node.Name)
err = controller.RemoveTaintOffNode(kubeClient, node.Name, taintToRemove, node)
err = controller.RemoveTaintOffNode(kubeClient, node.Name, node, taintToRemove)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(

View File

@ -1072,12 +1072,12 @@ func (nc *NodeController) markNodeForTainting(node *v1.Node) bool {
func (nc *NodeController) markNodeAsHealthy(node *v1.Node) (bool, error) {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, UnreachableTaintTemplate, node)
err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, UnreachableTaintTemplate)
if err != nil {
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
return false, err
}
err = controller.RemoveTaintOffNode(nc.kubeClient, node.Name, NotReadyTaintTemplate, node)
err = controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, NotReadyTaintTemplate)
if err != nil {
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
return false, err

View File

@ -39,7 +39,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/controller/testutil"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/node"
taintutils "k8s.io/kubernetes/pkg/util/taints"

View File

@ -24,7 +24,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/controller/testutil"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clienttesting "k8s.io/client-go/testing"

View File

@ -39,7 +39,7 @@ go_test(
tags = ["automanaged"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/node/testutil:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",

View File

@ -30,7 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/controller/testutil"
)
type FakeController struct{}

View File

@ -68,30 +68,31 @@ type FakeNodeHandler struct {
DeleteWaitChan chan struct{}
}
// FakeLegacyHandler is a fake implemtation of CoreV1Interface.
type FakeLegacyHandler struct {
v1core.CoreV1Interface
n *FakeNodeHandler
}
// GetUpdatedNodesCopy returns a slice of Nodes with updates applied.
func (c *FakeNodeHandler) GetUpdatedNodesCopy() []*v1.Node {
c.lock.Lock()
defer c.lock.Unlock()
updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
for i, ptr := range c.UpdatedNodes {
func (m *FakeNodeHandler) GetUpdatedNodesCopy() []*v1.Node {
m.lock.Lock()
defer m.lock.Unlock()
updatedNodesCopy := make([]*v1.Node, len(m.UpdatedNodes), len(m.UpdatedNodes))
for i, ptr := range m.UpdatedNodes {
updatedNodesCopy[i] = ptr
}
return updatedNodesCopy
}
// Core returns fake CoreInterface.
func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.Core(), c}
func (m *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{m.Clientset.Core(), m}
}
// CoreV1 returns fake CoreV1Interface
func (c *FakeNodeHandler) CoreV1() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.CoreV1(), c}
func (m *FakeNodeHandler) CoreV1() v1core.CoreV1Interface {
return &FakeLegacyHandler{m.Clientset.CoreV1(), m}
}
// Nodes return fake NodeInterfaces.
@ -115,9 +116,8 @@ func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
nodeCopy := *node
m.CreatedNodes = append(m.CreatedNodes, &nodeCopy)
return node, nil
} else {
return nil, errors.New("Create error.")
}
return nil, errors.New("create error")
}
// Get returns a Node from the fake store.

View File

@ -156,6 +156,18 @@ func TestParseAnnotations(t *testing.T) {
scenario: "incorrect annotation input (missing =value)",
expectErr: true,
},
{
annotations: []string{"-"},
expectedErr: "invalid annotation format: -",
scenario: "incorrect annotation input (missing key)",
expectErr: true,
},
{
annotations: []string{"=bar"},
expectedErr: "invalid annotation format: =bar",
scenario: "incorrect annotation input (missing key)",
expectErr: true,
},
}
for _, test := range tests {
annotations, remove, err := parseAnnotations(test.annotations)
@ -380,6 +392,18 @@ func TestAnnotateErrors(t *testing.T) {
return strings.Contains(err.Error(), "at least one annotation update is required")
},
},
"wrong annotations": {
args: []string{"pods", "-"},
errFn: func(err error) bool {
return strings.Contains(err.Error(), "at least one annotation update is required")
},
},
"wrong annotations 2": {
args: []string{"pods", "=bar"},
errFn: func(err error) bool {
return strings.Contains(err.Error(), "at least one annotation update is required")
},
},
"no resources remove annotations": {
args: []string{"pods-"},
errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") },

View File

@ -631,6 +631,9 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error {
return err
}
oldData, err := json.Marshal(obj)
if err != nil {
return err
}
node, ok := obj.(*corev1.Node)
if !ok {
return fmt.Errorf("unexpected Type%T, expected Node", obj)
@ -642,6 +645,9 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error {
helper := resource.NewHelper(o.restClient, o.nodeInfo.Mapping)
node.Spec.Unschedulable = desired
newData, err := json.Marshal(obj)
if err != nil {
return err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)
if err != nil {
return err

View File

@ -138,6 +138,7 @@ func NewCmdGet(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Comman
usage := "identifying the resource to get from a server."
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
cmdutil.AddInclude3rdPartyFlags(cmd)
cmdutil.AddOpenAPIFlags(cmd)
cmd.Flags().StringVar(&options.Raw, "raw", options.Raw, "Raw URI to request from the server. Uses the transport specified by the kubeconfig file.")
return cmd
}
@ -456,7 +457,7 @@ func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args [
// if cmd does not specify output format and useOpenAPIPrintColumnFlagLabel flag is true,
// then get the default output options for this mapping from OpenAPI schema.
if !cmdSpecifiesOutputFmt(cmd) && useOpenAPIPrintColumns {
outputOpts, _ = outputOptsForMappingFromOpenAPI(f, mapping)
outputOpts, _ = outputOptsForMappingFromOpenAPI(f, cmdutil.GetOpenAPICacheDir(cmd), mapping)
}
printer, err = f.PrinterForMapping(cmd, false, outputOpts, mapping, allNamespaces)
@ -555,11 +556,11 @@ func cmdSpecifiesOutputFmt(cmd *cobra.Command) bool {
// outputOptsForMappingFromOpenAPI looks for the output format metatadata in the
// openapi schema and returns the output options for the mapping if found.
func outputOptsForMappingFromOpenAPI(f cmdutil.Factory, mapping *meta.RESTMapping) (*printers.OutputOptions, bool) {
func outputOptsForMappingFromOpenAPI(f cmdutil.Factory, openAPIcacheDir string, mapping *meta.RESTMapping) (*printers.OutputOptions, bool) {
// user has not specified any output format, check if OpenAPI has
// default specification to print this resource type
api, err := f.OpenAPISchema()
api, err := f.OpenAPISchema(openAPIcacheDir)
if err != nil {
// Error getting schema
return nil, false

View File

@ -140,7 +140,7 @@ func TestGetUnknownSchemaObject(t *testing.T) {
expected := []runtime.Object{cmdtesting.NewInternalType("", "", "foo")}
actual := tf.Printer.(*testPrinter).Objects
if len(actual) != len(expected) {
t.Fatal(actual)
t.Fatalf("expected: %#v, but actual: %#v", expected, actual)
}
for i, obj := range actual {
expectedJSON := runtime.EncodeOrDie(codec, expected[i])
@ -156,7 +156,7 @@ func TestGetUnknownSchemaObject(t *testing.T) {
}
if !reflect.DeepEqual(expectedMap, actualMap) {
t.Errorf("unexpected object: \n%#v\n%#v", expectedMap, actualMap)
t.Errorf("expectedMap: %#v, but actualMap: %#v", expectedMap, actualMap)
}
}
}
@ -212,7 +212,7 @@ func TestGetObjectsWithOpenAPIOutputFormatPresent(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -265,7 +265,7 @@ func TestGetObjects(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -417,7 +417,7 @@ func TestGetSortedObjects(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -471,7 +471,7 @@ func TestGetObjectsIdentifiedByFile(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -500,7 +500,7 @@ func TestGetListObjects(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -542,7 +542,7 @@ func TestGetAllListObjects(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -571,7 +571,7 @@ func TestGetListComponentStatus(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -610,7 +610,7 @@ func TestGetMultipleTypeObjects(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -719,7 +719,7 @@ func TestGetMultipleTypeObjectsWithSelector(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -764,7 +764,7 @@ func TestGetMultipleTypeObjectsWithDirectReference(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -789,7 +789,7 @@ func TestGetByFormatForcesFlag(t *testing.T) {
showAllFlag, _ := cmd.Flags().GetBool("show-all")
if showAllFlag {
t.Errorf("expected showAll to not be true when getting resource")
t.Error("expected showAll to not be true when getting resource")
}
}
@ -909,7 +909,7 @@ func TestWatchSelector(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -951,7 +951,7 @@ func TestWatchResource(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -994,7 +994,7 @@ func TestWatchResourceIdentifiedByFile(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -1036,7 +1036,7 @@ func TestWatchOnlyResource(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}
@ -1082,7 +1082,7 @@ func TestWatchOnlyList(t *testing.T) {
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
t.Error("unexpected empty output")
}
}

View File

@ -289,6 +289,14 @@ func TestLabelErrors(t *testing.T) {
args: []string{"pods"},
errFn: func(err error) bool { return strings.Contains(err.Error(), "at least one label update is required") },
},
"wrong labels": {
args: []string{"pods", "-"},
errFn: func(err error) bool { return strings.Contains(err.Error(), "at least one label update is required") },
},
"wrong labels 2": {
args: []string{"pods", "=bar"},
errFn: func(err error) bool { return strings.Contains(err.Error(), "at least one label update is required") },
},
"no resources": {
args: []string{"pods-"},
errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") },

View File

@ -418,7 +418,7 @@ func (f *FakeFactory) SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDeclar
return nil, nil
}
func (f *FakeFactory) OpenAPISchema() (openapi.Resources, error) {
func (f *FakeFactory) OpenAPISchema(cacheDir string) (openapi.Resources, error) {
return nil, nil
}
@ -756,7 +756,7 @@ func (f *fakeAPIFactory) SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDec
return nil, nil
}
func (f *fakeAPIFactory) OpenAPISchema() (openapi.Resources, error) {
func (f *fakeAPIFactory) OpenAPISchema(cacheDir string) (openapi.Resources, error) {
if f.tf.OpenAPISchemaFunc != nil {
return f.tf.OpenAPISchemaFunc()
}

View File

@ -224,7 +224,7 @@ type ObjectMappingFactory interface {
// SwaggerSchema returns the schema declaration for the provided group version kind.
SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDeclaration, error)
// OpenAPISchema returns the schema openapi schema definiton
OpenAPISchema() (openapi.Resources, error)
OpenAPISchema(cacheDir string) (openapi.Resources, error)
}
// BuilderFactory holds the second level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods.

View File

@ -439,7 +439,13 @@ func (f *ring1Factory) SwaggerSchema(gvk schema.GroupVersionKind) (*swagger.ApiD
}
// OpenAPISchema returns metadata and structural information about Kubernetes object definitions.
func (f *ring1Factory) OpenAPISchema() (openapi.Resources, error) {
// Will try to cache the data to a local file. Cache is written and read from a
// file created with ioutil.TempFile and obeys the expiration semantics of that file.
// The cache location is a function of the client and server versions so that the open API
// schema will be cached separately for different client / server combinations.
// Note, the cache will not be invalidated if the server changes its open API schema without
// changing the server version.
func (f *ring1Factory) OpenAPISchema(cacheDir string) (openapi.Resources, error) {
discovery, err := f.clientAccessFactory.DiscoveryClient()
if err != nil {
return nil, err
@ -447,8 +453,23 @@ func (f *ring1Factory) OpenAPISchema() (openapi.Resources, error) {
// Lazily initialize the OpenAPIGetter once
f.openAPIGetter.once.Do(func() {
// Get the server version for caching the openapi spec
versionString := ""
version, err := discovery.ServerVersion()
if err != nil {
// Cache the result under the server version
versionString = version.String()
}
// Get the cache directory for caching the openapi spec
cacheDir, err = substituteUserHome(cacheDir)
if err != nil {
// Don't cache the result if we couldn't substitute the home directory
cacheDir = ""
}
// Create the caching OpenAPIGetter
f.openAPIGetter.getter = openapi.NewOpenAPIGetter(discovery)
f.openAPIGetter.getter = openapi.NewOpenAPIGetter(cacheDir, versionString, discovery)
})
// Delegate to the OpenAPIGetter

View File

@ -404,6 +404,19 @@ func AddValidateOptionFlags(cmd *cobra.Command, options *ValidateOptions) {
cmd.MarkFlagFilename("schema-cache-dir")
}
func AddOpenAPIFlags(cmd *cobra.Command) {
cmd.Flags().String("schema-cache-dir",
fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName),
fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'",
clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName),
)
cmd.MarkFlagFilename("schema-cache-dir")
}
func GetOpenAPICacheDir(cmd *cobra.Command) string {
return GetFlagString(cmd, "schema-cache-dir")
}
func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) {
kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, "Filename, directory, or URL to files "+usage)
cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.")
@ -606,7 +619,7 @@ func AddInclude3rdPartyVarFlags(cmd *cobra.Command, include3rdParty *bool) {
func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) {
foundPair := false
for _, s := range args {
nonResource := strings.Contains(s, "=") || strings.HasSuffix(s, "-")
nonResource := (strings.Contains(s, "=") && s[0] != '=') || (strings.HasSuffix(s, "-") && s != "-")
switch {
case !foundPair && nonResource:
foundPair = true
@ -632,7 +645,7 @@ func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPair
var invalidBuf bytes.Buffer
var invalidBufNonEmpty bool
for _, pairArg := range pairArgs {
if strings.Contains(pairArg, "=") {
if strings.Contains(pairArg, "=") && pairArg[0] != '=' {
parts := strings.SplitN(pairArg, "=", 2)
if len(parts) != 2 {
if invalidBufNonEmpty {
@ -643,7 +656,7 @@ func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPair
} else {
newPairs[parts[0]] = parts[1]
}
} else if supportRemove && strings.HasSuffix(pairArg, "-") {
} else if supportRemove && strings.HasSuffix(pairArg, "-") && pairArg != "-" {
removePairs = append(removePairs, pairArg[:len(pairArg)-1])
} else {
if invalidBufNonEmpty {

View File

@ -15,11 +15,15 @@ go_library(
"document.go",
"extensions.go",
"openapi.go",
"openapi_cache.go",
"openapi_getter.go",
],
tags = ["automanaged"],
deps = [
"//pkg/version:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library",
"//vendor/gopkg.in/yaml.v2:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
@ -31,6 +35,7 @@ go_test(
name = "go_default_xtest",
size = "small",
srcs = [
"openapi_cache_test.go",
"openapi_getter_test.go",
"openapi_suite_test.go",
"openapi_test.go",

View File

@ -0,0 +1,163 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openapi
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/golang/glog"
"github.com/golang/protobuf/proto"
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/version"
)
const openapiFileName = "openapi_cache"
type CachingOpenAPIClient struct {
version string
client discovery.OpenAPISchemaInterface
cacheDirName string
}
// NewCachingOpenAPIClient returns a new discovery.OpenAPISchemaInterface
// that will read the openapi spec from a local cache if it exists, and
// if not will then fetch an openapi spec using a client.
// client: used to fetch a new openapi spec if a local cache is not found
// version: the server version and used as part of the cache file location
// cacheDir: the directory under which the cache file will be written
func NewCachingOpenAPIClient(client discovery.OpenAPISchemaInterface, version, cacheDir string) *CachingOpenAPIClient {
return &CachingOpenAPIClient{
client: client,
version: version,
cacheDirName: cacheDir,
}
}
// OpenAPIData returns an openapi spec.
// It will first attempt to read the spec from a local cache
// If it cannot read a local cache, it will read the file
// using the client and then write the cache.
func (c *CachingOpenAPIClient) OpenAPIData() (Resources, error) {
// Try to use the cached version
if c.useCache() {
doc, err := c.readOpenAPICache()
if err == nil {
return NewOpenAPIData(doc)
}
}
// No cached version found, download from server
s, err := c.client.OpenAPISchema()
if err != nil {
glog.V(2).Infof("Failed to download openapi data %v", err)
return nil, err
}
oa, err := NewOpenAPIData(s)
if err != nil {
glog.V(2).Infof("Failed to parse openapi data %v", err)
return nil, err
}
// Try to cache the openapi spec
if c.useCache() {
err = c.writeToCache(s)
if err != nil {
// Just log an message, no need to fail the command since we got the data we need
glog.V(2).Infof("Unable to cache openapi spec %v", err)
}
}
// Return the parsed data
return oa, nil
}
// useCache returns true if the client should try to use the cache file
func (c *CachingOpenAPIClient) useCache() bool {
return len(c.version) > 0 && len(c.cacheDirName) > 0
}
// readOpenAPICache tries to read the openapi spec from the local file cache
func (c *CachingOpenAPIClient) readOpenAPICache() (*openapi_v2.Document, error) {
// Get the filename to read
filename := c.openAPICacheFilename()
// Read the cached file
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
doc := &openapi_v2.Document{}
return doc, proto.Unmarshal(data, doc)
}
// writeToCache tries to write the openapi spec to the local file cache.
// writes the data to a new tempfile, and then links the cache file and the tempfile
func (c *CachingOpenAPIClient) writeToCache(doc *openapi_v2.Document) error {
// Get the constant filename used to read the cache.
cacheFile := c.openAPICacheFilename()
// Binary encode the spec. This is 10x as fast as using json encoding. (60ms vs 600ms)
b, err := proto.Marshal(doc)
if err != nil {
return fmt.Errorf("Could not binary encode openapi spec: %v", err)
}
// Create a new temp file for the cached openapi spec.
cacheDir := filepath.Dir(cacheFile)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return fmt.Errorf("Could not create directory: %v %v", cacheDir, err)
}
tmpFile, err := ioutil.TempFile(cacheDir, "openapi")
if err != nil {
return fmt.Errorf("Could not create temp cache file: %v %v", cacheFile, err)
}
// Write the binary encoded openapi spec to the temp file
if _, err := io.Copy(tmpFile, bytes.NewBuffer(b)); err != nil {
return fmt.Errorf("Could not write temp cache file: %v", err)
}
// Link the temp cache file to the constant cache filepath
return linkFiles(tmpFile.Name(), cacheFile)
}
// openAPICacheFilename returns the filename to read the cache from
func (c *CachingOpenAPIClient) openAPICacheFilename() string {
// Cache using the client and server versions
return filepath.Join(c.cacheDirName, c.version, version.Get().GitVersion, openapiFileName)
}
// linkFiles links the old file to the new file
func linkFiles(old, new string) error {
if err := os.Link(old, new); err != nil {
// If we can't write due to file existing, or permission problems, keep going.
if os.IsExist(err) || os.IsPermission(err) {
return nil
}
return err
}
return nil
}

View File

@ -0,0 +1,268 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openapi_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"gopkg.in/yaml.v2"
"github.com/googleapis/gnostic/OpenAPIv2"
"github.com/googleapis/gnostic/compiler"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi"
)
var _ = Describe("When reading openAPIData", func() {
var tmpDir string
var err error
var client *fakeOpenAPIClient
var instance *openapi.CachingOpenAPIClient
var expectedData openapi.Resources
BeforeEach(func() {
tmpDir, err = ioutil.TempDir("", "openapi_cache_test")
Expect(err).To(BeNil())
client = &fakeOpenAPIClient{}
instance = openapi.NewCachingOpenAPIClient(client, "v1.6", tmpDir)
d, err := data.OpenAPISchema()
Expect(err).To(BeNil())
expectedData, err = openapi.NewOpenAPIData(d)
Expect(err).To(BeNil())
})
AfterEach(func() {
os.RemoveAll(tmpDir)
})
It("should write to the cache", func() {
By("getting the live openapi spec from the server")
result, err := instance.OpenAPIData()
Expect(err).To(BeNil())
Expect(result).To(Equal(expectedData))
Expect(client.calls).To(Equal(1))
By("writing the live openapi spec to a local cache file")
names, err := getFilenames(tmpDir)
Expect(err).To(BeNil())
Expect(names).To(ConsistOf("v1.6"))
names, err = getFilenames(filepath.Join(tmpDir, "v1.6"))
Expect(err).To(BeNil())
Expect(names).To(HaveLen(1))
clientVersion := names[0]
names, err = getFilenames(filepath.Join(tmpDir, "v1.6", clientVersion))
Expect(err).To(BeNil())
Expect(names).To(ContainElement("openapi_cache"))
})
It("should read from the cache", func() {
// First call should use the client
result, err := instance.OpenAPIData()
Expect(err).To(BeNil())
Expect(result).To(Equal(expectedData))
Expect(client.calls).To(Equal(1))
// Second call shouldn't use the client
result, err = instance.OpenAPIData()
Expect(err).To(BeNil())
Expect(result).To(Equal(expectedData))
Expect(client.calls).To(Equal(1))
names, err := getFilenames(tmpDir)
Expect(err).To(BeNil())
Expect(names).To(ConsistOf("v1.6"))
})
It("propagate errors that are encountered", func() {
// Expect an error
client.err = fmt.Errorf("expected error")
result, err := instance.OpenAPIData()
Expect(err.Error()).To(Equal(client.err.Error()))
Expect(result).To(BeNil())
Expect(client.calls).To(Equal(1))
// No cache file is written
files, err := ioutil.ReadDir(tmpDir)
Expect(err).To(BeNil())
Expect(files).To(HaveLen(0))
// Client error is not cached
result, err = instance.OpenAPIData()
Expect(err.Error()).To(Equal(client.err.Error()))
Expect(result).To(BeNil())
Expect(client.calls).To(Equal(2))
})
})
var _ = Describe("Reading openAPIData", func() {
var tmpDir string
var serverVersion string
var cacheDir string
BeforeEach(func() {
var err error
tmpDir, err = ioutil.TempDir("", "openapi_cache_test")
Expect(err).To(BeNil())
})
AfterEach(func() {
os.RemoveAll(tmpDir)
})
// Set the serverVersion to empty
Context("when the server version is empty", func() {
BeforeEach(func() {
serverVersion = ""
cacheDir = tmpDir
})
It("should not cache the result", func() {
client := &fakeOpenAPIClient{}
instance := openapi.NewCachingOpenAPIClient(client, serverVersion, cacheDir)
d, err := data.OpenAPISchema()
Expect(err).To(BeNil())
expectedData, err := openapi.NewOpenAPIData(d)
Expect(err).To(BeNil())
By("getting the live openapi schema")
result, err := instance.OpenAPIData()
Expect(err).To(BeNil())
Expect(result).To(Equal(expectedData))
Expect(client.calls).To(Equal(1))
files, err := ioutil.ReadDir(tmpDir)
Expect(err).To(BeNil())
Expect(files).To(HaveLen(0))
})
})
Context("when the cache directory is empty", func() {
BeforeEach(func() {
serverVersion = "v1.6"
cacheDir = ""
})
It("should not cache the result", func() {
client := &fakeOpenAPIClient{}
instance := openapi.NewCachingOpenAPIClient(client, serverVersion, cacheDir)
d, err := data.OpenAPISchema()
Expect(err).To(BeNil())
expectedData, err := openapi.NewOpenAPIData(d)
Expect(err).To(BeNil())
By("getting the live openapi schema")
result, err := instance.OpenAPIData()
Expect(err).To(BeNil())
Expect(result).To(Equal(expectedData))
Expect(client.calls).To(Equal(1))
files, err := ioutil.ReadDir(tmpDir)
Expect(err).To(BeNil())
Expect(files).To(HaveLen(0))
})
})
})
// Test Utils
func getFilenames(path string) ([]string, error) {
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
result := []string{}
for _, n := range files {
result = append(result, n.Name())
}
return result, nil
}
type fakeOpenAPIClient struct {
calls int
err error
}
func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) {
f.calls = f.calls + 1
if f.err != nil {
return nil, f.err
}
return data.OpenAPISchema()
}
// Test utils
var data apiData
type apiData struct {
sync.Once
data *openapi_v2.Document
err error
}
func (d *apiData) OpenAPISchema() (*openapi_v2.Document, error) {
d.Do(func() {
// Get the path to the swagger.json file
wd, err := os.Getwd()
if err != nil {
d.err = err
return
}
abs, err := filepath.Abs(wd)
if err != nil {
d.err = err
return
}
root := filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(abs)))))
specpath := filepath.Join(root, "api", "openapi-spec", "swagger.json")
_, err = os.Stat(specpath)
if err != nil {
d.err = err
return
}
spec, err := ioutil.ReadFile(specpath)
if err != nil {
d.err = err
return
}
var info yaml.MapSlice
err = yaml.Unmarshal(spec, &info)
if err != nil {
d.err = err
return
}
d.data, d.err = openapi_v2.NewDocument(info, compiler.NewContext("$root", nil))
})
return d.data, d.err
}

View File

@ -29,6 +29,8 @@ type synchronizedOpenAPIGetter struct {
openAPISchema Resources
err error
serverVersion string
cacheDir string
openAPIClient discovery.OpenAPISchemaInterface
}
@ -40,10 +42,12 @@ type Getter interface {
Get() (Resources, error)
}
// NewOpenAPIGetter returns an object to return OpenAPIDatas which reads
// from a server, and then stores in memory for subsequent invocations
func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) Getter {
// NewOpenAPIGetter returns an object to return OpenAPIDatas which either read from a
// local file cache or read from a server, and then stored in memory for subsequent invocations
func NewOpenAPIGetter(cacheDir, serverVersion string, openAPIClient discovery.OpenAPISchemaInterface) Getter {
return &synchronizedOpenAPIGetter{
serverVersion: serverVersion,
cacheDir: cacheDir,
openAPIClient: openAPIClient,
}
}
@ -51,13 +55,15 @@ func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) Getter {
// Resources implements Getter
func (g *synchronizedOpenAPIGetter) Get() (Resources, error) {
g.Do(func() {
s, err := g.openAPIClient.OpenAPISchema()
client := NewCachingOpenAPIClient(g.openAPIClient, g.serverVersion, g.cacheDir)
result, err := client.OpenAPIData()
if err != nil {
g.err = err
return
}
g.openAPISchema, g.err = NewOpenAPIData(s)
// Save the result
g.openAPISchema = result
})
// Return the save result

View File

@ -18,83 +18,13 @@ package openapi_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"gopkg.in/yaml.v2"
"github.com/googleapis/gnostic/OpenAPIv2"
"github.com/googleapis/gnostic/compiler"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi"
)
// Test utils
var data apiData
type apiData struct {
sync.Once
data *openapi_v2.Document
err error
}
func (d *apiData) OpenAPISchema() (*openapi_v2.Document, error) {
d.Do(func() {
// Get the path to the swagger.json file
wd, err := os.Getwd()
if err != nil {
d.err = err
return
}
abs, err := filepath.Abs(wd)
if err != nil {
d.err = err
return
}
root := filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(abs)))))
specpath := filepath.Join(root, "api", "openapi-spec", "swagger.json")
_, err = os.Stat(specpath)
if err != nil {
d.err = err
return
}
spec, err := ioutil.ReadFile(specpath)
if err != nil {
d.err = err
return
}
var info yaml.MapSlice
err = yaml.Unmarshal(spec, &info)
if err != nil {
d.err = err
return
}
d.data, d.err = openapi_v2.NewDocument(info, compiler.NewContext("$root", nil))
})
return d.data, d.err
}
type fakeOpenAPIClient struct {
calls int
err error
}
func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) {
f.calls = f.calls + 1
if f.err != nil {
return nil, f.err
}
return data.OpenAPISchema()
}
var _ = Describe("Getting the Resources", func() {
var client *fakeOpenAPIClient
var expectedData openapi.Resources
@ -108,7 +38,7 @@ var _ = Describe("Getting the Resources", func() {
expectedData, err = openapi.NewOpenAPIData(d)
Expect(err).To(BeNil())
instance = openapi.NewOpenAPIGetter(client)
instance = openapi.NewOpenAPIGetter("", "", client)
})
Context("when the server returns a successful result", func() {

View File

@ -157,7 +157,7 @@ func TestConfigMapGenerate(t *testing.T) {
expectErr: true,
},
{
setup: setupEnvFile("key.1=value1"),
setup: setupEnvFile("key#1=value1"),
params: map[string]interface{}{
"name": "invalid_key",
"from-env-file": "file.env",

View File

@ -55,7 +55,7 @@ func proccessEnvFileLine(line []byte, filePath string,
data := strings.SplitN(string(line), "=", 2)
key = data[0]
if errs := validation.IsCIdentifier(key); len(errs) != 0 {
if errs := validation.IsEnvVarName(key); len(errs) != 0 {
return ``, ``, fmt.Errorf("%q is not a valid key name: %s", key, strings.Join(errs, ";"))
}

View File

@ -868,7 +868,7 @@ func parseEnvs(envArray []string) ([]v1.EnvVar, error) {
if len(name) == 0 {
return nil, fmt.Errorf("invalid env: %v", env)
}
if len(validation.IsCIdentifier(name)) != 0 {
if len(validation.IsEnvVarName(name)) != 0 {
return nil, fmt.Errorf("invalid env: %v", env)
}
envVar := v1.EnvVar{Name: name, Value: value}

View File

@ -1030,6 +1030,7 @@ func TestParseEnv(t *testing.T) {
{
envArray: []string{
"THIS_ENV=isOK",
"this.dotted.env=isOKToo",
"HAS_COMMAS=foo,bar",
"HAS_EQUALS=jJnro54iUu75xNy==",
},
@ -1038,6 +1039,10 @@ func TestParseEnv(t *testing.T) {
Name: "THIS_ENV",
Value: "isOK",
},
{
Name: "this.dotted.env",
Value: "isOKToo",
},
{
Name: "HAS_COMMAS",
Value: "foo,bar",

View File

@ -157,7 +157,7 @@ func TestSecretGenerate(t *testing.T) {
expectErr: true,
},
{
setup: setupEnvFile("key.1=value1"),
setup: setupEnvFile("key#1=value1"),
params: map[string]interface{}{
"name": "invalid_key",
"from-env-file": "file.env",

View File

@ -101,7 +101,7 @@ func New(address string, port uint, runtime string, rootPath string) (Interface,
sysFs := sysfs.NewRealSysFs()
// Create and start the cAdvisor container manager.
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisormetrics.MetricSet{cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}}, http.DefaultClient)
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisormetrics.MetricSet{cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}, cadvisormetrics.NetworkUdpUsageMetrics: struct{}{}}, http.DefaultClient)
if err != nil {
return nil, err
}

View File

@ -472,7 +472,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsCIdentifier(k); len(errMsgs) != 0 {
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
@ -507,7 +507,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsCIdentifier(k); len(errMsgs) != 0 {
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}

View File

@ -1219,14 +1219,14 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Name: "test-secret",
},
Data: map[string][]byte{
"1234": []byte("abc"),
"1z": []byte("abc"),
"key": []byte("value"),
"1234": []byte("abc"),
"1z": []byte("abc"),
"key.1": []byte("value"),
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "key",
Name: "key.1",
Value: "value",
},
},
@ -1250,12 +1250,12 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Name: "test-secret",
},
Data: map[string][]byte{
"1234": []byte("abc"),
"1234.name": []byte("abc"),
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "p_1234",
Name: "p_1234.name",
Value: "abc",
},
},

View File

@ -10,6 +10,7 @@ load(
go_library(
name = "go_default_library",
srcs = [
"controller.go",
"hollow_kubelet.go",
"hollow_proxy.go",
],
@ -22,6 +23,7 @@ go_library(
"//pkg/apis/componentconfig:go_default_library",
"//pkg/apis/componentconfig/v1alpha1:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/cadvisor:go_default_library",
"//pkg/kubelet/cm:go_default_library",
@ -44,9 +46,15 @@ go_library(
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],

343
pkg/kubemark/controller.go Normal file
View File

@ -0,0 +1,343 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubemark
import (
"fmt"
"math/rand"
"sync"
"time"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
informersv1 "k8s.io/client-go/informers/core/v1"
kubeclient "k8s.io/client-go/kubernetes"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
)
const (
namespaceKubemark = "kubemark"
hollowNodeName = "hollow-node"
nodeGroupLabel = "autoscaling.k8s.io/nodegroup"
numRetries = 3
)
// KubemarkController is a simplified version of cloud provider for kubemark. It allows
// to add and delete nodes from a kubemark cluster and introduces nodegroups
// by applying labels to the kubemark's hollow-nodes.
type KubemarkController struct {
nodeTemplate *apiv1.ReplicationController
externalCluster externalCluster
kubemarkCluster kubemarkCluster
rand *rand.Rand
}
// externalCluster is used to communicate with the external cluster that hosts
// kubemark, in order to be able to list, create and delete hollow nodes
// by manipulating the replication controllers.
type externalCluster struct {
rcLister listersv1.ReplicationControllerLister
rcSynced cache.InformerSynced
podLister listersv1.PodLister
podSynced cache.InformerSynced
client kubeclient.Interface
}
// kubemarkCluster is used to delete nodes from kubemark cluster once their
// respective replication controllers have been deleted and the nodes have
// become unready. This is to cover for the fact that there is no proper cloud
// provider for kubemark that would care for deleting the nodes.
type kubemarkCluster struct {
client kubeclient.Interface
nodeLister listersv1.NodeLister
nodeSynced cache.InformerSynced
nodesToDelete map[string]bool
nodesToDeleteLock sync.Mutex
}
// NewKubemarkController creates KubemarkController using the provided clients to talk to external
// and kubemark clusters.
func NewKubemarkController(externalClient kubeclient.Interface, externalInformerFactory informers.SharedInformerFactory,
kubemarkClient kubeclient.Interface, kubemarkNodeInformer informersv1.NodeInformer) (*KubemarkController, error) {
rcInformer := externalInformerFactory.InformerFor(&apiv1.ReplicationController{}, newReplicationControllerInformer)
podInformer := externalInformerFactory.InformerFor(&apiv1.Pod{}, newPodInformer)
controller := &KubemarkController{
externalCluster: externalCluster{
rcLister: listersv1.NewReplicationControllerLister(rcInformer.GetIndexer()),
rcSynced: rcInformer.HasSynced,
podLister: listersv1.NewPodLister(podInformer.GetIndexer()),
podSynced: podInformer.HasSynced,
client: externalClient,
},
kubemarkCluster: kubemarkCluster{
nodeLister: kubemarkNodeInformer.Lister(),
nodeSynced: kubemarkNodeInformer.Informer().HasSynced,
client: kubemarkClient,
nodesToDelete: make(map[string]bool),
nodesToDeleteLock: sync.Mutex{},
},
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
}
kubemarkNodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: controller.kubemarkCluster.removeUnneededNodes,
})
return controller, nil
}
// Init waits for population of caches and populates the node template needed
// for creation of kubemark nodes.
func (kubemarkController *KubemarkController) Init(stopCh chan struct{}) {
if !controller.WaitForCacheSync("kubemark", stopCh,
kubemarkController.externalCluster.rcSynced,
kubemarkController.externalCluster.podSynced,
kubemarkController.kubemarkCluster.nodeSynced) {
return
}
// Get hollow node template from an existing hollow node to be able to create
// new nodes based on it.
nodeTemplate, err := kubemarkController.getNodeTemplate()
if err != nil {
glog.Fatalf("Failed to get node template: %s", err)
}
kubemarkController.nodeTemplate = nodeTemplate
}
// GetNodesForNodegroup returns list of the nodes in the node group.
func (kubemarkController *KubemarkController) GetNodeNamesForNodegroup(nodeGroup string) ([]string, error) {
selector := labels.SelectorFromSet(labels.Set{nodeGroupLabel: nodeGroup})
pods, err := kubemarkController.externalCluster.podLister.List(selector)
if err != nil {
return nil, err
}
result := make([]string, 0, len(pods))
for _, pod := range pods {
result = append(result, pod.ObjectMeta.Name)
}
return result, nil
}
// GetNodeGroupSize returns the current size for the node group.
func (kubemarkController *KubemarkController) GetNodeGroupSize(nodeGroup string) (int, error) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{nodeGroupLabel: nodeGroup}))
nodes, err := kubemarkController.externalCluster.rcLister.List(selector)
if err != nil {
return 0, err
}
return len(nodes), nil
}
// SetNodeGroupSize changes the size of node group by adding or removing nodes.
func (kubemarkController *KubemarkController) SetNodeGroupSize(nodeGroup string, size int) error {
currSize, err := kubemarkController.GetNodeGroupSize(nodeGroup)
if err != nil {
return err
}
switch delta := size - currSize; {
case delta < 0:
absDelta := -delta
nodes, err := kubemarkController.GetNodeNamesForNodegroup(nodeGroup)
if err != nil {
return err
}
if len(nodes) > absDelta {
return fmt.Errorf("can't remove %d nodes from %s nodegroup, not enough nodes", absDelta, nodeGroup)
}
for i, node := range nodes {
if i == absDelta {
return nil
}
if err := kubemarkController.removeNodeFromNodeGroup(nodeGroup, node); err != nil {
return err
}
}
case delta > 0:
for i := 0; i < delta; i++ {
if err := kubemarkController.addNodeToNodeGroup(nodeGroup); err != nil {
return err
}
}
}
return nil
}
func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup string) error {
templateCopy, err := api.Scheme.Copy(kubemarkController.nodeTemplate)
if err != nil {
return err
}
node := templateCopy.(*apiv1.ReplicationController)
node.Name = fmt.Sprintf("%s-%d", nodeGroup, kubemarkController.rand.Int63())
node.Labels = map[string]string{nodeGroupLabel: nodeGroup, "name": node.Name}
node.Spec.Template.Labels = node.Labels
for i := 0; i < numRetries; i++ {
_, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(node)
if err == nil {
return nil
}
}
return err
}
func (kubemarkController *KubemarkController) removeNodeFromNodeGroup(nodeGroup string, node string) error {
pods, err := kubemarkController.externalCluster.podLister.List(labels.Everything())
if err != nil {
return err
}
for _, pod := range pods {
if pod.ObjectMeta.Name == node {
if pod.ObjectMeta.Labels[nodeGroupLabel] != nodeGroup {
return fmt.Errorf("can't delete node %s from nodegroup %s. Node is not in nodegroup", node, nodeGroup)
}
policy := metav1.DeletePropagationForeground
for i := 0; i < numRetries; i++ {
err := kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete(
pod.ObjectMeta.Labels["name"],
&metav1.DeleteOptions{PropagationPolicy: &policy})
if err == nil {
glog.Infof("marking node %s for deletion", node)
// Mark node for deletion from kubemark cluster.
// Once it becomes unready after replication controller
// deletion has been noticed, we will delete it explicitly.
// This is to cover for the fact that kubemark does not
// take care of this itself.
kubemarkController.kubemarkCluster.markNodeForDeletion(node)
return nil
}
}
}
}
return fmt.Errorf("can't delete node %s from nodegroup %s. Node does not exist", node, nodeGroup)
}
func (kubemarkController *KubemarkController) getReplicationControllerByName(name string) *apiv1.ReplicationController {
rcs, err := kubemarkController.externalCluster.rcLister.List(labels.Everything())
if err != nil {
return nil
}
for _, rc := range rcs {
if rc.ObjectMeta.Name == name {
return rc
}
}
return nil
}
func (kubemarkController *KubemarkController) getNodeNameForPod(podName string) (string, error) {
pods, err := kubemarkController.externalCluster.podLister.List(labels.Everything())
if err != nil {
return "", err
}
for _, pod := range pods {
if pod.ObjectMeta.Name == podName {
return pod.Labels["name"], nil
}
}
return "", fmt.Errorf("pod %s not found", podName)
}
// getNodeTemplate returns the template for hollow node replication controllers
// by looking for an existing hollow node specification. This requires at least
// one kubemark node to be present on startup.
func (kubemarkController *KubemarkController) getNodeTemplate() (*apiv1.ReplicationController, error) {
podName, err := kubemarkController.kubemarkCluster.getHollowNodeName()
if err != nil {
return nil, err
}
hollowNodeName, err := kubemarkController.getNodeNameForPod(podName)
if err != nil {
return nil, err
}
if hollowNode := kubemarkController.getReplicationControllerByName(hollowNodeName); hollowNode != nil {
nodeTemplate := &apiv1.ReplicationController{
Spec: apiv1.ReplicationControllerSpec{
Template: hollowNode.Spec.Template,
},
}
nodeTemplate.Spec.Selector = nil
nodeTemplate.Namespace = namespaceKubemark
one := int32(1)
nodeTemplate.Spec.Replicas = &one
return nodeTemplate, nil
}
return nil, fmt.Errorf("can't get hollow node template")
}
func (kubemarkCluster *kubemarkCluster) getHollowNodeName() (string, error) {
nodes, err := kubemarkCluster.nodeLister.List(labels.Everything())
if err != nil {
return "", err
}
for _, node := range nodes {
return node.Name, nil
}
return "", fmt.Errorf("did not find any hollow nodes in the cluster")
}
func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, newObj interface{}) {
node, ok := newObj.(*apiv1.Node)
if !ok {
return
}
for _, condition := range node.Status.Conditions {
// Delete node if it is in unready state, and it has been
// explicitly marked for deletion.
if condition.Type == apiv1.NodeReady && condition.Status != apiv1.ConditionTrue {
kubemarkCluster.nodesToDeleteLock.Lock()
defer kubemarkCluster.nodesToDeleteLock.Unlock()
if kubemarkCluster.nodesToDelete[node.Name] {
kubemarkCluster.nodesToDelete[node.Name] = false
if err := kubemarkCluster.client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("failed to delete node %s from kubemark cluster", node.Name)
}
}
return
}
}
}
func (kubemarkCluster *kubemarkCluster) markNodeForDeletion(name string) {
kubemarkCluster.nodesToDeleteLock.Lock()
defer kubemarkCluster.nodesToDeleteLock.Unlock()
kubemarkCluster.nodesToDelete[name] = true
}
func newReplicationControllerInformer(kubeClient kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
rcListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "replicationcontrollers", namespaceKubemark, fields.Everything())
return cache.NewSharedIndexInformer(rcListWatch, &apiv1.ReplicationController{}, resyncPeriod, nil)
}
func newPodInformer(kubeClient kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", namespaceKubemark, fields.Everything())
return cache.NewSharedIndexInformer(podListWatch, &apiv1.Pod{}, resyncPeriod, nil)
}

View File

@ -46,7 +46,7 @@ func (h ClientCARegistrationHook) PostStartHook(hookContext genericapiserver.Pos
return nil
}
// intializing CAs is important so that aggregated API servers can come up with "normal" config.
// initializing CAs is important so that aggregated API servers can come up with "normal" config.
// We've seen lagging etcd before, so we want to retry this a few times before we decide to crashloop
// the API server on it.
err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
@ -62,7 +62,7 @@ func (h ClientCARegistrationHook) PostStartHook(hookContext genericapiserver.Pos
return h.tryToWriteClientCAs(client)
})
// if we're never able to make it through intialization, kill the API server
// if we're never able to make it through initialization, kill the API server
if err != nil {
return fmt.Errorf("unable to initialize client CA configmap: %v", err)
}

View File

@ -836,15 +836,16 @@ func printCronJobList(list *batch.CronJobList, options printers.PrintOptions) ([
// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes.
func loadBalancerStatusStringer(s api.LoadBalancerStatus, wide bool) string {
ingress := s.Ingress
result := []string{}
result := sets.NewString()
for i := range ingress {
if ingress[i].IP != "" {
result = append(result, ingress[i].IP)
result.Insert(ingress[i].IP)
} else if ingress[i].Hostname != "" {
result = append(result, ingress[i].Hostname)
result.Insert(ingress[i].Hostname)
}
}
r := strings.Join(result, ",")
r := strings.Join(result.List(), ",")
if !wide && len(r) > loadBalancerWidth {
r = r[0:(loadBalancerWidth-3)] + "..."
}

View File

@ -18,13 +18,9 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/apis/admissionregistration:go_default_library",
"//pkg/apis/admissionregistration/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
],
)

View File

@ -40,7 +40,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*admissionregistration.ExternalAdmissionHookConfiguration).Name, nil
},
PredicateFunc: externaladmissionhookconfiguration.MatchExternalAdmissionHookConfiguration,
DefaultQualifiedResource: admissionregistration.Resource("externaladmissionhookconfigurations"),
WatchCacheSize: cachesize.GetWatchCacheSizeByResource("externaladmissionhookconfigurations"),
@ -48,7 +47,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
UpdateStrategy: externaladmissionhookconfiguration.Strategy,
DeleteStrategy: externaladmissionhookconfiguration.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: externaladmissionhookconfiguration.GetAttrs}
options := &generic.StoreOptions{RESTOptions: optsGetter}
if err := store.CompleteWithOptions(options); err != nil {
panic(err) // TODO: Propagate error up
}

View File

@ -17,16 +17,11 @@ limitations under the License.
package externaladmissionhookconfiguration
import (
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
apistorage "k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/admissionregistration"
@ -93,28 +88,3 @@ func (externaladmissionhookConfigurationStrategy) ValidateUpdate(ctx genericapir
func (externaladmissionhookConfigurationStrategy) AllowUnconditionalUpdate() bool {
return false
}
// MatchReplicaSet is the filter used by the generic etcd backend to route
// watch events from etcd to clients of the apiserver only interested in specific
// labels/fields.
func MatchExternalAdmissionHookConfiguration(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate {
return apistorage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
}
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, bool, error) {
ic, ok := obj.(*admissionregistration.ExternalAdmissionHookConfiguration)
if !ok {
return nil, nil, false, fmt.Errorf("Given object is not a ExternalAdmissionHookConfiguration.")
}
return labels.Set(ic.ObjectMeta.Labels), ExternalAdmissionHookConfigurationToSelectableFields(ic), ic.Initializers != nil, nil
}
// ExternalAdmissionHookConfigurationToSelectableFields returns a field set that represents the object.
func ExternalAdmissionHookConfigurationToSelectableFields(ic *admissionregistration.ExternalAdmissionHookConfiguration) fields.Set {
return generic.ObjectMetaFieldsSet(&ic.ObjectMeta, false)
}

View File

@ -18,13 +18,9 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/apis/admissionregistration:go_default_library",
"//pkg/apis/admissionregistration/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
],
)

View File

@ -40,7 +40,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*admissionregistration.InitializerConfiguration).Name, nil
},
PredicateFunc: initializerconfiguration.MatchInitializerConfiguration,
DefaultQualifiedResource: admissionregistration.Resource("initializerconfigurations"),
WatchCacheSize: cachesize.GetWatchCacheSizeByResource("initializerconfigurations"),
@ -48,7 +47,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
UpdateStrategy: initializerconfiguration.Strategy,
DeleteStrategy: initializerconfiguration.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: initializerconfiguration.GetAttrs}
options := &generic.StoreOptions{RESTOptions: optsGetter}
if err := store.CompleteWithOptions(options); err != nil {
panic(err) // TODO: Propagate error up
}

View File

@ -17,16 +17,11 @@ limitations under the License.
package initializerconfiguration
import (
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
apistorage "k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/admissionregistration"
@ -93,28 +88,3 @@ func (initializerConfigurationStrategy) ValidateUpdate(ctx genericapirequest.Con
func (initializerConfigurationStrategy) AllowUnconditionalUpdate() bool {
return false
}
// MatchReplicaSet is the filter used by the generic etcd backend to route
// watch events from etcd to clients of the apiserver only interested in specific
// labels/fields.
func MatchInitializerConfiguration(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate {
return apistorage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
}
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, bool, error) {
ic, ok := obj.(*admissionregistration.InitializerConfiguration)
if !ok {
return nil, nil, false, fmt.Errorf("Given object is not a InitializerConfiguration.")
}
return labels.Set(ic.ObjectMeta.Labels), InitializerConfigurationToSelectableFields(ic), ic.ObjectMeta.Initializers != nil, nil
}
// InitializerConfigurationToSelectableFields returns a field set that represents the object.
func InitializerConfigurationToSelectableFields(ic *admissionregistration.InitializerConfiguration) fields.Set {
return generic.ObjectMetaFieldsSet(&ic.ObjectMeta, false)
}

View File

@ -17,8 +17,6 @@ go_test(
"//pkg/api:go_default_library",
"//pkg/apis/apps:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
],
@ -35,14 +33,10 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/apps/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
],
)

View File

@ -37,7 +37,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
Copier: api.Scheme,
NewFunc: func() runtime.Object { return &apps.ControllerRevision{} },
NewListFunc: func() runtime.Object { return &apps.ControllerRevisionList{} },
PredicateFunc: controllerrevision.MatchControllerRevision,
DefaultQualifiedResource: apps.Resource("controllerrevisions"),
WatchCacheSize: cachesize.GetWatchCacheSizeByResource("controllerrevisions"),
@ -45,7 +44,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
UpdateStrategy: controllerrevision.Strategy,
DeleteStrategy: controllerrevision.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: controllerrevision.GetAttrs}
options := &generic.StoreOptions{RESTOptions: optsGetter}
if err := store.CompleteWithOptions(options); err != nil {
panic(err)
}

View File

@ -17,16 +17,10 @@ limitations under the License.
package controllerrevision
import (
"errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
apistorage "k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/apps"
@ -83,26 +77,3 @@ func (strategy) ValidateUpdate(ctx genericapirequest.Context, newObj, oldObj run
oldRevision, newRevision := oldObj.(*apps.ControllerRevision), newObj.(*apps.ControllerRevision)
return validation.ValidateControllerRevisionUpdate(newRevision, oldRevision)
}
// ControllerRevisionToSelectableFields returns a field set that represents the object for matching purposes.
func ControllerRevisionToSelectableFields(revision *apps.ControllerRevision) fields.Set {
return generic.ObjectMetaFieldsSet(&revision.ObjectMeta, true)
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, bool, error) {
history, ok := obj.(*apps.ControllerRevision)
if !ok {
return nil, nil, false, errors.New("supplied object is not an ControllerRevision")
}
return labels.Set(history.ObjectMeta.Labels), ControllerRevisionToSelectableFields(history), history.Initializers != nil, nil
}
// MatchControllerRevision returns a generic matcher for a given label and field selector.
func MatchControllerRevision(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate {
return apistorage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
}
}

View File

@ -20,8 +20,6 @@ import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/api"
@ -127,55 +125,6 @@ func TestStrategy_ValidateUpdate(t *testing.T) {
}
}
func TestControllerRevisionToSelectableFields(t *testing.T) {
rev := newControllerRevision("validname", "validns", newObject(), 0)
fieldSet := ControllerRevisionToSelectableFields(rev)
if fieldSet.Get("metadata.name") != rev.Name {
t.Errorf("expeted %s found %s", rev.Name, fieldSet.Get("metadata.name"))
}
if fieldSet.Get("metadata.namespace") != rev.Namespace {
t.Errorf("expeted %s found %s", rev.Namespace, fieldSet.Get("metadata.namespace"))
}
}
func TestGetAttrs(t *testing.T) {
rev := newControllerRevision("validname", "validns", newObject(), 0)
labelSet, fieldSet, uninitialized, err := GetAttrs(rev)
if err != nil {
t.Fatal(err)
}
if uninitialized {
t.Errorf("unexpected attrs")
}
if fieldSet.Get("metadata.name") != rev.Name {
t.Errorf("expeted %s found %s", rev.Name, fieldSet.Get("metadata.name"))
}
if fieldSet.Get("metadata.namespace") != rev.Namespace {
t.Errorf("expeted %s found %s", rev.Namespace, fieldSet.Get("metadata.namespace"))
}
if labelSet.Get("foo") != rev.Labels["foo"] {
t.Errorf("expected %s found %s", rev.Labels["foo"], labelSet.Get("foo"))
}
}
func TestMatchControllerRevision(t *testing.T) {
rev := newControllerRevision("validname", "validns", newObject(), 0)
ls := labels.SelectorFromSet(labels.Set(rev.Labels))
pred := MatchControllerRevision(ls, nil)
if matches, err := pred.Matches(rev); err != nil {
t.Error(err)
} else if !matches {
t.Error("failed to match ControllerRevision by labels")
}
fs := fields.SelectorFromSet(ControllerRevisionToSelectableFields(rev))
pred = MatchControllerRevision(ls, fs)
if matches, err := pred.Matches(rev); err != nil {
t.Error(err)
} else if !matches {
t.Error("failed to match ControllerRevision by fields")
}
}
func newControllerRevision(name, namespace string, data runtime.Object, revision int64) *apps.ControllerRevision {
return &apps.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{

View File

@ -63,9 +63,10 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag
storage["deployments/scale"] = deploymentStorage.Scale
}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("statefulsets")) {
statefulsetStorage, statefulsetStatusStorage := statefulsetstore.NewREST(restOptionsGetter)
storage["statefulsets"] = statefulsetStorage
storage["statefulsets/status"] = statefulsetStatusStorage
statefulSetStorage := statefulsetstore.NewStorage(restOptionsGetter)
storage["statefulsets"] = statefulSetStorage.StatefulSet
storage["statefulsets/status"] = statefulSetStorage.Status
storage["statefulsets/scale"] = statefulSetStorage.Scale
}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("controllerrevisions")) {
historyStorage := controllerrevisionsstore.NewREST(restOptionsGetter)
@ -86,9 +87,10 @@ func (p RESTStorageProvider) v1beta2Storage(apiResourceConfigSource serverstorag
storage["deployments/scale"] = deploymentStorage.Scale
}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("statefulsets")) {
statefulsetStorage, statefulsetStatusStorage := statefulsetstore.NewREST(restOptionsGetter)
storage["statefulsets"] = statefulsetStorage
storage["statefulsets/status"] = statefulsetStatusStorage
statefulSetStorage := statefulsetstore.NewStorage(restOptionsGetter)
storage["statefulsets"] = statefulSetStorage.StatefulSet
storage["statefulsets/status"] = statefulSetStorage.Status
storage["statefulsets/scale"] = statefulSetStorage.Scale
}
if apiResourceConfigSource.ResourceEnabled(version.WithResource("daemonsets")) {
daemonSetStorage, daemonSetStatusStorage := daemonsetstore.NewREST(restOptionsGetter)

View File

@ -12,6 +12,7 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"registry.go",
"strategy.go",
],
tags = ["automanaged"],
@ -20,14 +21,14 @@ go_library(
"//pkg/apis/apps:go_default_library",
"//pkg/apis/apps/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
],
)

View File

@ -0,0 +1,95 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/apps"
)
// Registry is an interface for things that know how to store StatefulSets.
type Registry interface {
ListStatefulSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*apps.StatefulSetList, error)
WatchStatefulSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error)
GetStatefulSet(ctx genericapirequest.Context, statefulSetID string, options *metav1.GetOptions) (*apps.StatefulSet, error)
CreateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error)
UpdateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error)
DeleteStatefulSet(ctx genericapirequest.Context, statefulSetID string) error
}
// storage puts strong typing around storage calls
type storage struct {
rest.StandardStorage
}
// NewRegistry returns a new Registry interface for the given Storage. Any mismatched
// types will panic.
func NewRegistry(s rest.StandardStorage) Registry {
return &storage{s}
}
func (s *storage) ListStatefulSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*apps.StatefulSetList, error) {
if options != nil && options.FieldSelector != nil && !options.FieldSelector.Empty() {
return nil, fmt.Errorf("field selector not supported yet")
}
obj, err := s.List(ctx, options)
if err != nil {
return nil, err
}
return obj.(*apps.StatefulSetList), err
}
func (s *storage) WatchStatefulSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {
return s.Watch(ctx, options)
}
func (s *storage) GetStatefulSet(ctx genericapirequest.Context, statefulSetID string, options *metav1.GetOptions) (*apps.StatefulSet, error) {
obj, err := s.Get(ctx, statefulSetID, options)
if err != nil {
return nil, errors.NewNotFound(apps.Resource("statefulsets/scale"), statefulSetID)
}
return obj.(*apps.StatefulSet), nil
}
func (s *storage) CreateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) {
obj, err := s.Create(ctx, statefulSet, false)
if err != nil {
return nil, err
}
return obj.(*apps.StatefulSet), nil
}
func (s *storage) UpdateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) {
obj, _, err := s.Update(ctx, statefulSet.Name, rest.DefaultUpdatedObjectInfo(statefulSet, api.Scheme))
if err != nil {
return nil, err
}
return obj.(*apps.StatefulSet), nil
}
func (s *storage) DeleteStatefulSet(ctx genericapirequest.Context, statefulSetID string) error {
_, _, err := s.Delete(ctx, statefulSetID, nil)
return err
}

View File

@ -16,10 +16,14 @@ go_test(
deps = [
"//pkg/api:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/registry/registrytest:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library",
@ -34,8 +38,11 @@ go_library(
deps = [
"//pkg/api:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/validation:go_default_library",
"//pkg/registry/apps/statefulset:go_default_library",
"//pkg/registry/cachesize:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",

Some files were not shown because too many files have changed in this diff Show More