diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json
index 9684eb25e16..4c712323f5a 100644
--- a/api/openapi-spec/swagger.json
+++ b/api/openapi-spec/swagger.json
@@ -23906,78 +23906,6 @@
}
]
},
- "/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/rollback": {
- "post": {
- "description": "create rollback of a Deployment",
- "consumes": [
- "*/*"
- ],
- "produces": [
- "application/json",
- "application/yaml",
- "application/vnd.kubernetes.protobuf"
- ],
- "schemes": [
- "https"
- ],
- "tags": [
- "apps_v1beta2"
- ],
- "operationId": "createAppsV1beta2NamespacedDeploymentRollback",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentRollback"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "OK",
- "schema": {
- "$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentRollback"
- }
- },
- "401": {
- "description": "Unauthorized"
- }
- },
- "x-kubernetes-action": "post",
- "x-kubernetes-group-version-kind": {
- "group": "apps",
- "kind": "DeploymentRollback",
- "version": "v1beta2"
- }
- },
- "parameters": [
- {
- "uniqueItems": true,
- "type": "string",
- "description": "name of the DeploymentRollback",
- "name": "name",
- "in": "path",
- "required": true
- },
- {
- "uniqueItems": true,
- "type": "string",
- "description": "object name and auth scope, such as for teams and projects",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "uniqueItems": true,
- "type": "string",
- "description": "If 'true', then the output is pretty printed.",
- "name": "pretty",
- "in": "query"
- }
- ]
- },
"/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale": {
"get": {
"description": "read scale of the specified Deployment",
@@ -51264,7 +51192,7 @@
]
},
"io.k8s.api.apps.v1beta1.DeploymentRollback": {
- "description": "DeploymentRollback stores the information required to rollback a deployment.",
+ "description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
@@ -51333,7 +51261,7 @@
"format": "int32"
},
"rollbackTo": {
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
+ "description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"$ref": "#/definitions/io.k8s.api.apps.v1beta1.RollbackConfig"
},
"selector": {
@@ -51413,6 +51341,7 @@
}
},
"io.k8s.api.apps.v1beta1.RollbackConfig": {
+ "description": "DEPRECATED.",
"properties": {
"revision": {
"description": "The revision to rollback to. If set to 0, rollback to the last revision.",
@@ -51942,45 +51871,6 @@
}
]
},
- "io.k8s.api.apps.v1beta2.DeploymentRollback": {
- "description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentRollback stores the information required to rollback a deployment.",
- "required": [
- "name",
- "rollbackTo"
- ],
- "properties": {
- "apiVersion": {
- "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
- "type": "string"
- },
- "kind": {
- "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
- "type": "string"
- },
- "name": {
- "description": "Required: This must match the Name of a deployment.",
- "type": "string"
- },
- "rollbackTo": {
- "description": "The config of this deployment rollback.",
- "$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollbackConfig"
- },
- "updatedAnnotations": {
- "description": "The annotations to be updated to a deployment",
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- }
- },
- "x-kubernetes-group-version-kind": [
- {
- "group": "apps",
- "kind": "DeploymentRollback",
- "version": "v1beta2"
- }
- ]
- },
"io.k8s.api.apps.v1beta2.DeploymentSpec": {
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentSpec is the specification of the desired behavior of the Deployment.",
"required": [
@@ -52011,10 +51901,6 @@
"type": "integer",
"format": "int32"
},
- "rollbackTo": {
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
- "$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollbackConfig"
- },
"selector": {
"description": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"
@@ -52251,16 +52137,6 @@
}
}
},
- "io.k8s.api.apps.v1beta2.RollbackConfig": {
- "description": "WIP: This is not ready to be used and we plan to make breaking changes to it.",
- "properties": {
- "revision": {
- "description": "The revision to rollback to. If set to 0, rollback to the last revision.",
- "type": "integer",
- "format": "int64"
- }
- }
- },
"io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet": {
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. Spec to control the desired behavior of daemon set rolling update.",
"properties": {
@@ -58688,7 +58564,7 @@
]
},
"io.k8s.api.extensions.v1beta1.DeploymentRollback": {
- "description": "DeploymentRollback stores the information required to rollback a deployment.",
+ "description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
@@ -58757,7 +58633,7 @@
"format": "int32"
},
"rollbackTo": {
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
+ "description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.RollbackConfig"
},
"selector": {
@@ -59500,6 +59376,7 @@
}
},
"io.k8s.api.extensions.v1beta1.RollbackConfig": {
+ "description": "DEPRECATED.",
"properties": {
"revision": {
"description": "The revision to rollback to. If set to 0, rollback to the last revision.",
diff --git a/api/swagger-spec/apps_v1beta1.json b/api/swagger-spec/apps_v1beta1.json
index 2f384bd93c8..4fc11e5b530 100644
--- a/api/swagger-spec/apps_v1beta1.json
+++ b/api/swagger-spec/apps_v1beta1.json
@@ -3810,7 +3810,7 @@
},
"rollbackTo": {
"$ref": "v1beta1.RollbackConfig",
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
+ "description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done."
},
"progressDeadlineSeconds": {
"type": "integer",
@@ -5854,6 +5854,7 @@
},
"v1beta1.RollbackConfig": {
"id": "v1beta1.RollbackConfig",
+ "description": "DEPRECATED.",
"properties": {
"revision": {
"type": "integer",
@@ -5946,7 +5947,7 @@
},
"v1beta1.DeploymentRollback": {
"id": "v1beta1.DeploymentRollback",
- "description": "DeploymentRollback stores the information required to rollback a deployment.",
+ "description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
diff --git a/api/swagger-spec/apps_v1beta2.json b/api/swagger-spec/apps_v1beta2.json
index 0dd7d9ae7e4..b41d7f721ee 100644
--- a/api/swagger-spec/apps_v1beta2.json
+++ b/api/swagger-spec/apps_v1beta2.json
@@ -1895,67 +1895,6 @@
}
]
},
- {
- "path": "/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/rollback",
- "description": "API at /apis/apps/v1beta2",
- "operations": [
- {
- "type": "v1beta2.DeploymentRollback",
- "method": "POST",
- "summary": "create rollback of a Deployment",
- "nickname": "createNamespacedDeploymentRollback",
- "parameters": [
- {
- "type": "string",
- "paramType": "query",
- "name": "pretty",
- "description": "If 'true', then the output is pretty printed.",
- "required": false,
- "allowMultiple": false
- },
- {
- "type": "v1beta2.DeploymentRollback",
- "paramType": "body",
- "name": "body",
- "description": "",
- "required": true,
- "allowMultiple": false
- },
- {
- "type": "string",
- "paramType": "path",
- "name": "namespace",
- "description": "object name and auth scope, such as for teams and projects",
- "required": true,
- "allowMultiple": false
- },
- {
- "type": "string",
- "paramType": "path",
- "name": "name",
- "description": "name of the DeploymentRollback",
- "required": true,
- "allowMultiple": false
- }
- ],
- "responseMessages": [
- {
- "code": 200,
- "message": "OK",
- "responseModel": "v1beta2.DeploymentRollback"
- }
- ],
- "produces": [
- "application/json",
- "application/yaml",
- "application/vnd.kubernetes.protobuf"
- ],
- "consumes": [
- "*/*"
- ]
- }
- ]
- },
{
"path": "/apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale",
"description": "API at /apis/apps/v1beta2",
@@ -7282,10 +7221,6 @@
"type": "boolean",
"description": "Indicates that the deployment is paused."
},
- "rollbackTo": {
- "$ref": "v1beta2.RollbackConfig",
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
- },
"progressDeadlineSeconds": {
"type": "integer",
"format": "int32",
@@ -7321,17 +7256,6 @@
}
}
},
- "v1beta2.RollbackConfig": {
- "id": "v1beta2.RollbackConfig",
- "description": "WIP: This is not ready to be used and we plan to make breaking changes to it.",
- "properties": {
- "revision": {
- "type": "integer",
- "format": "int64",
- "description": "The revision to rollback to. If set to 0, rollback to the last revision."
- }
- }
- },
"v1beta2.DeploymentStatus": {
"id": "v1beta2.DeploymentStatus",
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentStatus is the most recently observed status of the Deployment.",
@@ -7414,36 +7338,6 @@
}
}
},
- "v1beta2.DeploymentRollback": {
- "id": "v1beta2.DeploymentRollback",
- "description": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentRollback stores the information required to rollback a deployment.",
- "required": [
- "name",
- "rollbackTo"
- ],
- "properties": {
- "kind": {
- "type": "string",
- "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
- },
- "apiVersion": {
- "type": "string",
- "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources"
- },
- "name": {
- "type": "string",
- "description": "Required: This must match the Name of a deployment."
- },
- "updatedAnnotations": {
- "type": "object",
- "description": "The annotations to be updated to a deployment"
- },
- "rollbackTo": {
- "$ref": "v1beta2.RollbackConfig",
- "description": "The config of this deployment rollback."
- }
- }
- },
"v1beta2.Scale": {
"id": "v1beta2.Scale",
"description": "WIP: This is not ready to be used and we plan to make breaking changes to it. Scale represents a scaling request for a resource.",
diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json
index 2c74fe32ae9..22c5a0ada73 100644
--- a/api/swagger-spec/extensions_v1beta1.json
+++ b/api/swagger-spec/extensions_v1beta1.json
@@ -8759,7 +8759,7 @@
},
"rollbackTo": {
"$ref": "v1beta1.RollbackConfig",
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
+ "description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done."
},
"progressDeadlineSeconds": {
"type": "integer",
@@ -8798,6 +8798,7 @@
},
"v1beta1.RollbackConfig": {
"id": "v1beta1.RollbackConfig",
+ "description": "DEPRECATED.",
"properties": {
"revision": {
"type": "integer",
@@ -8890,7 +8891,7 @@
},
"v1beta1.DeploymentRollback": {
"id": "v1beta1.DeploymentRollback",
- "description": "DeploymentRollback stores the information required to rollback a deployment.",
+ "description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
diff --git a/docs/api-reference/apps/v1beta1/definitions.html b/docs/api-reference/apps/v1beta1/definitions.html
index e28807b9ddc..8f81db43766 100755
--- a/docs/api-reference/apps/v1beta1/definitions.html
+++ b/docs/api-reference/apps/v1beta1/definitions.html
@@ -2463,6 +2463,9 @@ When an object is created, the system will populate this list with the current s
v1beta1.RollbackConfig
+
@@ -3490,7 +3493,7 @@ The StatefulSet guarantees that a given network identity will always map to the
rollbackTo |
-The config this deployment is rolling back to. Will be cleared after rollback is done. |
+DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done. |
false |
v1beta1.RollbackConfig |
|
@@ -4857,7 +4860,7 @@ Examples:
v1beta1.DeploymentRollback
-
DeploymentRollback stores the information required to rollback a deployment.
+
DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.
diff --git a/docs/api-reference/apps/v1beta2/definitions.html b/docs/api-reference/apps/v1beta2/definitions.html
new file mode 100755
index 00000000000..46acbe132ce
--- /dev/null
+++ b/docs/api-reference/apps/v1beta2/definitions.html
@@ -0,0 +1,7267 @@
+
+
+
+
+
+
+Top Level API Objects
+
+
+
+
+
+
+
Top Level API Objects
+
+
+
+
Definitions
+
+
+
v1.APIResourceList
+
+
APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+kind |
+Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds |
+false |
+string |
+ |
+
+
+apiVersion |
+APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources |
+false |
+string |
+ |
+
+
+groupVersion |
+groupVersion is the group and version this APIResourceList is for. |
+true |
+string |
+ |
+
+
+resources |
+resources contains the name of the resources and if they are namespaced. |
+true |
+v1.APIResource array |
+ |
+
+
+
+
+
+
+
v1.Affinity
+
+
Affinity is a group of affinity scheduling rules.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+nodeAffinity |
+Describes node affinity scheduling rules for the pod. |
+false |
+v1.NodeAffinity |
+ |
+
+
+podAffinity |
+Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). |
+false |
+v1.PodAffinity |
+ |
+
+
+podAntiAffinity |
+Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). |
+false |
+v1.PodAntiAffinity |
+ |
+
+
+
+
+
+
+
v1.NodeSelectorTerm
+
+
A null or empty node selector term matches no objects.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+matchExpressions |
+Required. A list of node selector requirements. The requirements are ANDed. |
+true |
+v1.NodeSelectorRequirement array |
+ |
+
+
+
+
+
+
+
v1.Preconditions
+
+
Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+uid |
+Specifies the target UID. |
+false |
+types.UID |
+ |
+
+
+
+
+
+
+
v1.ObjectFieldSelector
+
+
ObjectFieldSelector selects an APIVersioned field of an object.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+apiVersion |
+Version of the schema the FieldPath is written in terms of, defaults to "v1". |
+false |
+string |
+ |
+
+
+fieldPath |
+Path of the field to select in the specified API version. |
+true |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.DaemonSetList
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSetList is a collection of daemon sets.
+
+
+
+
+
+
v1.SELinuxOptions
+
+
SELinuxOptions are the labels to be applied to the container
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+user |
+User is a SELinux user label that applies to the container. |
+false |
+string |
+ |
+
+
+role |
+Role is a SELinux role label that applies to the container. |
+false |
+string |
+ |
+
+
+type |
+Type is a SELinux type label that applies to the container. |
+false |
+string |
+ |
+
+
+level |
+Level is SELinux level label that applies to the container. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.VolumeMount
+
+
VolumeMount describes a mounting of a Volume within a container.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+This must match the Name of a Volume. |
+true |
+string |
+ |
+
+
+readOnly |
+Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. |
+false |
+boolean |
+false |
+
+
+mountPath |
+Path within the container at which the volume should be mounted. Must not contain :. |
+true |
+string |
+ |
+
+
+subPath |
+Path within the volume from which the container’s volume should be mounted. Defaults to "" (volume’s root). |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.DownwardAPIProjection
+
+
Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+items |
+Items is a list of DownwardAPIVolume file |
+false |
+v1.DownwardAPIVolumeFile array |
+ |
+
+
+
+
+
+
+
v1.LabelSelector
+
+
A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+matchLabels |
+matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. |
+false |
+object |
+ |
+
+
+matchExpressions |
+matchExpressions is a list of label selector requirements. The requirements are ANDed. |
+false |
+v1.LabelSelectorRequirement array |
+ |
+
+
+
+
+
+
+
v1.PersistentVolumeClaimSpec
+
+
PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes
+
+
+
+
+
+
v1.CephFSVolumeSource
+
+
Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.
+
+
+
+
+
+
v1.DownwardAPIVolumeSource
+
+
DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+items |
+Items is a list of downward API volume file |
+false |
+v1.DownwardAPIVolumeFile array |
+ |
+
+
+defaultMode |
+Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.GCEPersistentDiskVolumeSource
+
+
Represents a Persistent Disk resource in Google Compute Engine.
+
+
+
A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.
+
+
+
+
+
+
v1beta2.StatefulSetList
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSetList is a collection of StatefulSets.
+
+
+
+
+
+
v1.ConfigMapVolumeSource
+
+
Adapts a ConfigMap into a volume.
+
+
+
The contents of the target ConfigMap’s Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names |
+false |
+string |
+ |
+
+
+items |
+If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the .. path or start with ... |
+false |
+v1.KeyToPath array |
+ |
+
+
+defaultMode |
+Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. |
+false |
+integer (int32) |
+ |
+
+
+optional |
+Specify whether the ConfigMap or it’s keys must be defined |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1beta2.Scale
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. Scale represents a scaling request for a resource.
+
+
+
+
+
+
v1beta2.RollingUpdateDaemonSet
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. Spec to control the desired behavior of daemon set rolling update.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+maxUnavailable |
+The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.GitRepoVolumeSource
+
+
Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+repository |
+Repository URL |
+true |
+string |
+ |
+
+
+revision |
+Commit hash for the specified revision. |
+false |
+string |
+ |
+
+
+directory |
+Target directory name. Must not contain or start with ... If . is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.SecretEnvSource
+
+
SecretEnvSource selects a Secret to populate the environment variables with.
+
+
+
The contents of the target Secret’s Data field will represent the key-value pairs as environment variables.
+
+
+
+
+
+
v1.PortworxVolumeSource
+
+
PortworxVolumeSource represents a Portworx volume resource.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+volumeID |
+VolumeID uniquely identifies a Portworx volume |
+true |
+string |
+ |
+
+
+fsType |
+FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+readOnly |
+Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.Capabilities
+
+
Adds and removes POSIX capabilities from running containers.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+add |
+Added capabilities |
+false |
+v1.Capability array |
+ |
+
+
+drop |
+Removed capabilities |
+false |
+v1.Capability array |
+ |
+
+
+
+
+
+
+
v1.Initializer
+
+
Initializer is information about an initializer that has not yet completed.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+name of the process that is responsible for initializing this object. |
+true |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.StatefulSetStatus
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSetStatus represents the current state of a StatefulSet.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+observedGeneration |
+observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet’s generation, which is updated on mutation by the API Server. |
+false |
+integer (int64) |
+ |
+
+
+replicas |
+replicas is the number of Pods created by the StatefulSet controller. |
+true |
+integer (int32) |
+ |
+
+
+readyReplicas |
+readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. |
+false |
+integer (int32) |
+ |
+
+
+currentReplicas |
+currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. |
+false |
+integer (int32) |
+ |
+
+
+updatedReplicas |
+updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. |
+false |
+integer (int32) |
+ |
+
+
+currentRevision |
+currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). |
+false |
+string |
+ |
+
+
+updateRevision |
+updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.LocalObjectReference
+
+
LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
+
+
+
+
+
+
v1.ProjectedVolumeSource
+
+
Represents a projected volume source
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+sources |
+list of volume projections |
+true |
+v1.VolumeProjection array |
+ |
+
+
+defaultMode |
+Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.ExecAction
+
+
ExecAction describes a "run in container" action.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+command |
+Command is the command line to execute inside the container, the working directory for the command is root (/) in the container’s filesystem. The command is simply exec’d, it is not run inside a shell, so traditional shell instructions (' |
+', etc) won’t work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. |
+false |
+string array |
+
+
+
+
+
+
+
+
+
ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names |
+false |
+string |
+ |
+
+
+generateName |
+GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
+
+If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
+
+Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency |
+false |
+string |
+ |
+
+
+namespace |
+Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
+
+Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces |
+false |
+string |
+ |
+
+
+selfLink |
+SelfLink is a URL representing this object. Populated by the system. Read-only. |
+false |
+string |
+ |
+
+
+uid |
+UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
+
+Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids |
+false |
+string |
+ |
+
+
+resourceVersion |
+An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
+
+Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency |
+false |
+string |
+ |
+
+
+generation |
+A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. |
+false |
+integer (int64) |
+ |
+
+
+creationTimestamp |
+CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+
+Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata |
+false |
+string |
+ |
+
+
+deletionTimestamp |
+DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.
+
+Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata |
+false |
+string |
+ |
+
+
+deletionGracePeriodSeconds |
+Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. |
+false |
+integer (int64) |
+ |
+
+
+labels |
+Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels |
+false |
+object |
+ |
+
+
+annotations |
+Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations |
+false |
+object |
+ |
+
+
+ownerReferences |
+List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. |
+false |
+v1.OwnerReference array |
+ |
+
+
+initializers |
+An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven’t explicitly asked to observe uninitialized objects.
+
+When an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user. |
+false |
+v1.Initializers |
+ |
+
+
+finalizers |
+Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. |
+false |
+string array |
+ |
+
+
+clusterName |
+The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.DeploymentStrategy
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentStrategy describes how to replace existing pods with new ones.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+type |
+Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. |
+false |
+string |
+ |
+
+
+rollingUpdate |
+Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. |
+false |
+v1beta2.RollingUpdateDeployment |
+ |
+
+
+
+
+
+
+
types.UID
+
+
+
+
v1.AzureFileVolumeSource
+
+
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+secretName |
+the name of secret that contains Azure Storage Account Name and Key |
+true |
+string |
+ |
+
+
+shareName |
+Share Name |
+true |
+string |
+ |
+
+
+readOnly |
+Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.ISCSIVolumeSource
+
+
Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+targetPortal |
+iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). |
+true |
+string |
+ |
+
+
+iqn |
+Target iSCSI Qualified Name. |
+true |
+string |
+ |
+
+
+lun |
+iSCSI target lun number. |
+true |
+integer (int32) |
+ |
+
+
+iscsiInterface |
+Optional: Defaults to default (tcp). iSCSI interface name that uses an iSCSI transport. |
+false |
+string |
+ |
+
+
+fsType |
+Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi |
+false |
+string |
+ |
+
+
+readOnly |
+ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. |
+false |
+boolean |
+false |
+
+
+portals |
+iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). |
+false |
+string array |
+ |
+
+
+chapAuthDiscovery |
+whether support iSCSI Discovery CHAP authentication |
+false |
+boolean |
+false |
+
+
+chapAuthSession |
+whether support iSCSI Session CHAP authentication |
+false |
+boolean |
+false |
+
+
+secretRef |
+CHAP secret for iSCSI target and initiator authentication |
+false |
+v1.LocalObjectReference |
+ |
+
+
+
+
+
+
+
v1beta2.DeploymentStatus
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentStatus is the most recently observed status of the Deployment.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+observedGeneration |
+The generation observed by the deployment controller. |
+false |
+integer (int64) |
+ |
+
+
+replicas |
+Total number of non-terminated pods targeted by this deployment (their labels match the selector). |
+false |
+integer (int32) |
+ |
+
+
+updatedReplicas |
+Total number of non-terminated pods targeted by this deployment that have the desired template spec. |
+false |
+integer (int32) |
+ |
+
+
+readyReplicas |
+Total number of ready pods targeted by this deployment. |
+false |
+integer (int32) |
+ |
+
+
+availableReplicas |
+Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. |
+false |
+integer (int32) |
+ |
+
+
+unavailableReplicas |
+Total number of unavailable pods targeted by this deployment. |
+false |
+integer (int32) |
+ |
+
+
+conditions |
+Represents the latest available observations of a deployment’s current state. |
+false |
+v1beta2.DeploymentCondition array |
+ |
+
+
+collisionCount |
+Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. |
+false |
+integer (int64) |
+ |
+
+
+
+
+
+
+
v1.EmptyDirVolumeSource
+
+
Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+medium |
+What type of storage medium should back this directory. The default is "" which means to use the node’s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir |
+false |
+string |
+ |
+
+
+sizeLimit |
+Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.RollingUpdateStatefulSetStrategy
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+partition |
+Partition indicates the ordinal at which the StatefulSet should be partitioned. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.PodAffinityTerm
+
+
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+labelSelector |
+A label query over a set of resources, in this case pods. |
+false |
+v1.LabelSelector |
+ |
+
+
+namespaces |
+namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod’s namespace" |
+false |
+string array |
+ |
+
+
+topologyKey |
+This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.DaemonSetStatus
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSetStatus represents the current status of a daemon set.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+currentNumberScheduled |
+The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ |
+true |
+integer (int32) |
+ |
+
+
+numberMisscheduled |
+The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ |
+true |
+integer (int32) |
+ |
+
+
+desiredNumberScheduled |
+The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ |
+true |
+integer (int32) |
+ |
+
+
+numberReady |
+The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. |
+true |
+integer (int32) |
+ |
+
+
+observedGeneration |
+The most recent generation observed by the daemon set controller. |
+false |
+integer (int64) |
+ |
+
+
+updatedNumberScheduled |
+The total number of nodes that are running updated daemon pod |
+false |
+integer (int32) |
+ |
+
+
+numberAvailable |
+The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) |
+false |
+integer (int32) |
+ |
+
+
+numberUnavailable |
+The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) |
+false |
+integer (int32) |
+ |
+
+
+collisionCount |
+Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. |
+false |
+integer (int64) |
+ |
+
+
+
+
+
+
+
v1.EnvFromSource
+
+
EnvFromSource represents the source of a set of ConfigMaps
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+prefix |
+An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. |
+false |
+string |
+ |
+
+
+configMapRef |
+The ConfigMap to select from |
+false |
+v1.ConfigMapEnvSource |
+ |
+
+
+secretRef |
+The Secret to select from |
+false |
+v1.SecretEnvSource |
+ |
+
+
+
+
+
+
+
v1.PersistentVolumeClaim
+
+
PersistentVolumeClaim is a user’s request for and claim to a persistent volume
+
+
+
+
+
+
v1.PodAffinity
+
+
Pod affinity is a group of inter pod affinity scheduling rules.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+requiredDuringSchedulingIgnoredDuringExecution |
+NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm json:"requiredDuringSchedulingRequiredDuringExecution,omitempty" If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. |
+false |
+v1.PodAffinityTerm array |
+ |
+
+
+preferredDuringSchedulingIgnoredDuringExecution |
+The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. |
+false |
+v1.WeightedPodAffinityTerm array |
+ |
+
+
+
+
+
+
+
v1.FlockerVolumeSource
+
+
Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+datasetName |
+Name of the dataset stored as metadata → name on the dataset for Flocker should be considered as deprecated |
+false |
+string |
+ |
+
+
+datasetUUID |
+UUID of the dataset. This is unique identifier of a Flocker dataset |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.PersistentVolumeClaimVolumeSource
+
+
PersistentVolumeClaimVolumeSource references the user’s PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).
+
+
+
+
+
+
+
+
ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+selfLink |
+SelfLink is a URL representing this object. Populated by the system. Read-only. |
+false |
+string |
+ |
+
+
+resourceVersion |
+String that identifies the server’s internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.PersistentVolumeClaimStatus
+
+
PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+
+
+
+
+
+
v1beta2.DeploymentCondition
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentCondition describes the state of a deployment at a certain point.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+type |
+Type of deployment condition. |
+true |
+string |
+ |
+
+
+status |
+Status of the condition, one of True, False, Unknown. |
+true |
+string |
+ |
+
+
+lastUpdateTime |
+The last time this condition was updated. |
+false |
+string |
+ |
+
+
+lastTransitionTime |
+Last time the condition transitioned from one status to another. |
+false |
+string |
+ |
+
+
+reason |
+The reason for the condition’s last transition. |
+false |
+string |
+ |
+
+
+message |
+A human readable message indicating details about the transition. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.StatefulSetSpec
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. A StatefulSetSpec is the specification of a StatefulSet.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+replicas |
+replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. |
+false |
+integer (int32) |
+ |
+
+
+selector |
+selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors |
+false |
+v1.LabelSelector |
+ |
+
+
+template |
+template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. |
+true |
+v1.PodTemplateSpec |
+ |
+
+
+volumeClaimTemplates |
+volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. |
+false |
+v1.PersistentVolumeClaim array |
+ |
+
+
+serviceName |
+serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller. |
+true |
+string |
+ |
+
+
+podManagementPolicy |
+podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is OrderedReady , where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is Parallel which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. |
+false |
+string |
+ |
+
+
+updateStrategy |
+updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template. |
+false |
+v1beta2.StatefulSetUpdateStrategy |
+ |
+
+
+revisionHistoryLimit |
+revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet’s revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1beta2.ReplicaSetStatus
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetStatus represents the current status of a ReplicaSet.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+replicas |
+Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller |
+true |
+integer (int32) |
+ |
+
+
+fullyLabeledReplicas |
+The number of pods that have labels matching the labels of the pod template of the replicaset. |
+false |
+integer (int32) |
+ |
+
+
+readyReplicas |
+The number of ready replicas for this replica set. |
+false |
+integer (int32) |
+ |
+
+
+availableReplicas |
+The number of available replicas (ready for at least minReadySeconds) for this replica set. |
+false |
+integer (int32) |
+ |
+
+
+observedGeneration |
+ObservedGeneration reflects the generation of the most recently observed ReplicaSet. |
+false |
+integer (int64) |
+ |
+
+
+conditions |
+Represents the latest available observations of a replica set’s current state. |
+false |
+v1beta2.ReplicaSetCondition array |
+ |
+
+
+
+
+
+
+
v1beta2.DaemonSet
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSet represents the configuration of a daemon set.
+
+
+
+
+
+
v1.SecretVolumeSource
+
+
Adapts a Secret into a volume.
+
+
+
The contents of the target Secret’s Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+secretName |
+Name of the secret in the pod’s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret |
+false |
+string |
+ |
+
+
+items |
+If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the .. path or start with ... |
+false |
+v1.KeyToPath array |
+ |
+
+
+defaultMode |
+Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. |
+false |
+integer (int32) |
+ |
+
+
+optional |
+Specify whether the Secret or it’s keys must be defined |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.FlexVolumeSource
+
+
FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+driver |
+Driver is the name of the driver to use for this volume. |
+true |
+string |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. |
+false |
+string |
+ |
+
+
+secretRef |
+Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. |
+false |
+v1.LocalObjectReference |
+ |
+
+
+readOnly |
+Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+options |
+Optional: Extra command options if any. |
+false |
+object |
+ |
+
+
+
+
+
+
+
v1.EnvVarSource
+
+
EnvVarSource represents a source for the value of an EnvVar.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+fieldRef |
+Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. |
+false |
+v1.ObjectFieldSelector |
+ |
+
+
+resourceFieldRef |
+Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. |
+false |
+v1.ResourceFieldSelector |
+ |
+
+
+configMapKeyRef |
+Selects a key of a ConfigMap. |
+false |
+v1.ConfigMapKeySelector |
+ |
+
+
+secretKeyRef |
+Selects a key of a secret in the pod’s namespace |
+false |
+v1.SecretKeySelector |
+ |
+
+
+
+
+
+
+
v1beta2.ReplicaSetList
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetList is a collection of ReplicaSets.
+
+
+
+
+
+
v1.AzureDiskVolumeSource
+
+
AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+diskName |
+The Name of the data disk in the blob storage |
+true |
+string |
+ |
+
+
+diskURI |
+The URI the data disk in the blob storage |
+true |
+string |
+ |
+
+
+cachingMode |
+Host Caching mode: None, Read Only, Read Write. |
+false |
+v1.AzureDataDiskCachingMode |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+readOnly |
+Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+kind |
+Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared |
+false |
+v1.AzureDataDiskKind |
+ |
+
+
+
+
+
+
+
v1.KeyToPath
+
+
Maps a string key to a path within a volume.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+key |
+The key to project. |
+true |
+string |
+ |
+
+
+path |
+The relative path of the file to map the key to. May not be an absolute path. May not contain the path element ... May not start with the string ... |
+true |
+string |
+ |
+
+
+mode |
+Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.VsphereVirtualDiskVolumeSource
+
+
Represents a vSphere volume resource.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+volumePath |
+Path that identifies vSphere volume vmdk |
+true |
+string |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+storagePolicyName |
+Storage Policy Based Management (SPBM) profile name. |
+false |
+string |
+ |
+
+
+storagePolicyID |
+Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1beta2.StatefulSet
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSet represents a set of pods with consistent identities. Identities are defined as:
+ - Network: A single stable DNS and hostname.
+ - Storage: As many VolumeClaims as requested.
+The StatefulSet guarantees that a given network identity will always map to the same storage identity.
+
+
+
+
+
+
v1.DeleteOptions
+
+
DeleteOptions may be provided when deleting an API object.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+kind |
+Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds |
+false |
+string |
+ |
+
+
+apiVersion |
+APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources |
+false |
+string |
+ |
+
+
+gracePeriodSeconds |
+The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. |
+false |
+integer (int64) |
+ |
+
+
+preconditions |
+Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned. |
+false |
+v1.Preconditions |
+ |
+
+
+orphanDependents |
+Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object’s finalizers list. Either this field or PropagationPolicy may be set, but not both. |
+false |
+boolean |
+false |
+
+
+propagationPolicy |
+Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. |
+false |
+v1.DeletionPropagation |
+ |
+
+
+
+
+
+
+
v1.Volume
+
+
Volume represents a named volume in a pod that may be accessed by any container in the pod.
+
+
+
+
+
+
v1.ResourceFieldSelector
+
+
ResourceFieldSelector represents container resources (cpu, memory) and their output format
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+containerName |
+Container name: required for volumes, optional for env vars |
+false |
+string |
+ |
+
+
+resource |
+Required: resource to select |
+true |
+string |
+ |
+
+
+divisor |
+Specifies the output format of the exposed resources, defaults to "1" |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.VolumeProjection
+
+
Projection that may be projected along with other supported volume types
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+secret |
+information about the secret data to project |
+false |
+v1.SecretProjection |
+ |
+
+
+downwardAPI |
+information about the downwardAPI data to project |
+false |
+v1.DownwardAPIProjection |
+ |
+
+
+configMap |
+information about the configMap data to project |
+false |
+v1.ConfigMapProjection |
+ |
+
+
+
+
+
+
+
v1.Probe
+
+
Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+exec |
+One and only one of the following should be specified. Exec specifies the action to take. |
+false |
+v1.ExecAction |
+ |
+
+
+httpGet |
+HTTPGet specifies the http request to perform. |
+false |
+v1.HTTPGetAction |
+ |
+
+
+tcpSocket |
+TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported |
+false |
+v1.TCPSocketAction |
+ |
+
+
+initialDelaySeconds |
+Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes |
+false |
+integer (int32) |
+ |
+
+
+timeoutSeconds |
+Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes |
+false |
+integer (int32) |
+ |
+
+
+periodSeconds |
+How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. |
+false |
+integer (int32) |
+ |
+
+
+successThreshold |
+Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. |
+false |
+integer (int32) |
+ |
+
+
+failureThreshold |
+Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.WeightedPodAffinityTerm
+
+
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+weight |
+weight associated with matching the corresponding podAffinityTerm, in the range 1-100. |
+true |
+integer (int32) |
+ |
+
+
+podAffinityTerm |
+Required. A pod affinity term, associated with the corresponding weight. |
+true |
+v1.PodAffinityTerm |
+ |
+
+
+
+
+
+
+
v1.SecretKeySelector
+
+
SecretKeySelector selects a key of a Secret.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names |
+false |
+string |
+ |
+
+
+key |
+The key of the secret to select from. Must be a valid secret key. |
+true |
+string |
+ |
+
+
+optional |
+Specify whether the Secret or it’s key must be defined |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.Capability
+
+
+
+
v1.DownwardAPIVolumeFile
+
+
DownwardAPIVolumeFile represents information to create the file containing the pod field
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+path |
+Required: Path is the relative path name of the file to be created. Must not be absolute or contain the .. path. Must be utf-8 encoded. The first item of the relative path must not start with .. |
+true |
+string |
+ |
+
+
+fieldRef |
+Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. |
+false |
+v1.ObjectFieldSelector |
+ |
+
+
+resourceFieldRef |
+Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. |
+false |
+v1.ResourceFieldSelector |
+ |
+
+
+mode |
+Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.PodSpec
+
+
PodSpec is a description of a pod.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+volumes |
+List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes |
+false |
+v1.Volume array |
+ |
+
+
+initContainers |
+List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ |
+false |
+v1.Container array |
+ |
+
+
+containers |
+List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. |
+true |
+v1.Container array |
+ |
+
+
+restartPolicy |
+Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy |
+false |
+string |
+ |
+
+
+terminationGracePeriodSeconds |
+Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. |
+false |
+integer (int64) |
+ |
+
+
+activeDeadlineSeconds |
+Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. |
+false |
+integer (int64) |
+ |
+
+
+dnsPolicy |
+Set DNS policy for containers within the pod. One of ClusterFirstWithHostNet, ClusterFirst or Default. Defaults to "ClusterFirst". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to ClusterFirstWithHostNet. |
+false |
+string |
+ |
+
+
+nodeSelector |
+NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node’s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ |
+false |
+object |
+ |
+
+
+serviceAccountName |
+ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ |
+false |
+string |
+ |
+
+
+serviceAccount |
+DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. |
+false |
+string |
+ |
+
+
+automountServiceAccountToken |
+AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. |
+false |
+boolean |
+false |
+
+
+nodeName |
+NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. |
+false |
+string |
+ |
+
+
+hostNetwork |
+Host networking requested for this pod. Use the host’s network namespace. If this option is set, the ports that will be used must be specified. Default to false. |
+false |
+boolean |
+false |
+
+
+hostPID |
+Use the host’s pid namespace. Optional: Default to false. |
+false |
+boolean |
+false |
+
+
+hostIPC |
+Use the host’s ipc namespace. Optional: Default to false. |
+false |
+boolean |
+false |
+
+
+securityContext |
+SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. |
+false |
+v1.PodSecurityContext |
+ |
+
+
+imagePullSecrets |
+ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod |
+false |
+v1.LocalObjectReference array |
+ |
+
+
+hostname |
+Specifies the hostname of the Pod If not specified, the pod’s hostname will be set to a system-defined value. |
+false |
+string |
+ |
+
+
+subdomain |
+If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". If not specified, the pod will not have a domainname at all. |
+false |
+string |
+ |
+
+
+affinity |
+If specified, the pod’s scheduling constraints |
+false |
+v1.Affinity |
+ |
+
+
+schedulerName |
+If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. |
+false |
+string |
+ |
+
+
+tolerations |
+If specified, the pod’s tolerations. |
+false |
+v1.Toleration array |
+ |
+
+
+hostAliases |
+HostAliases is an optional list of hosts and IPs that will be injected into the pod’s hosts file if specified. This is only valid for non-hostNetwork pods. |
+false |
+v1.HostAlias array |
+ |
+
+
+priorityClassName |
+If specified, indicates the pod’s priority. "SYSTEM" is a special keyword which indicates the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. |
+false |
+string |
+ |
+
+
+priority |
+The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.ContainerPort
+
+
ContainerPort represents a network port in a single container.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. |
+false |
+string |
+ |
+
+
+hostPort |
+Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. |
+false |
+integer (int32) |
+ |
+
+
+containerPort |
+Number of port to expose on the pod’s IP address. This must be a valid port number, 0 < x < 65536. |
+true |
+integer (int32) |
+ |
+
+
+protocol |
+Protocol for port. Must be UDP or TCP. Defaults to "TCP". |
+false |
+string |
+ |
+
+
+hostIP |
+What host IP to bind the external port to. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.Lifecycle
+
+
Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.
+
+
+
+
+
+
v1.GlusterfsVolumeSource
+
+
Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.
+
+
+
+
+
+
v1.Handler
+
+
Handler defines a specific action that should be taken
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+exec |
+One and only one of the following should be specified. Exec specifies the action to take. |
+false |
+v1.ExecAction |
+ |
+
+
+httpGet |
+HTTPGet specifies the http request to perform. |
+false |
+v1.HTTPGetAction |
+ |
+
+
+tcpSocket |
+TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported |
+false |
+v1.TCPSocketAction |
+ |
+
+
+
+
+
+
+
v1.Toleration
+
+
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+key |
+Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. |
+false |
+string |
+ |
+
+
+operator |
+Operator represents a key’s relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. |
+false |
+string |
+ |
+
+
+value |
+Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. |
+false |
+string |
+ |
+
+
+effect |
+Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. |
+false |
+string |
+ |
+
+
+tolerationSeconds |
+TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. |
+false |
+integer (int64) |
+ |
+
+
+
+
+
+
+
v1.StatusCause
+
+
StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+reason |
+A machine-readable description of the cause of the error. If this value is empty there is no information available. |
+false |
+string |
+ |
+
+
+message |
+A human-readable description of the cause of the error. This field may be presented as-is to a reader. |
+false |
+string |
+ |
+
+
+field |
+The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
+
+Examples:
+ "name" - the field "name" on the current resource
+ "items[0].name" - the field "name" on the first array entry in "items" |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.RBDVolumeSource
+
+
Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
v1.ConfigMapProjection
+
+
Adapts a ConfigMap into a projected volume.
+
+
+
The contents of the target ConfigMap’s Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names |
+false |
+string |
+ |
+
+
+items |
+If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the .. path or start with ... |
+false |
+v1.KeyToPath array |
+ |
+
+
+optional |
+Specify whether the ConfigMap or it’s keys must be defined |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.PhotonPersistentDiskVolumeSource
+
+
Represents a Photon Controller persistent disk resource.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+pdID |
+ID that identifies Photon Controller persistent disk |
+true |
+string |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.ScaleIOVolumeSource
+
+
ScaleIOVolumeSource represents a persistent ScaleIO volume
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+gateway |
+The host address of the ScaleIO API Gateway. |
+true |
+string |
+ |
+
+
+system |
+The name of the storage system as configured in ScaleIO. |
+true |
+string |
+ |
+
+
+secretRef |
+SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. |
+true |
+v1.LocalObjectReference |
+ |
+
+
+sslEnabled |
+Flag to enable/disable SSL communication with Gateway, default false |
+false |
+boolean |
+false |
+
+
+protectionDomain |
+The name of the Protection Domain for the configured storage (defaults to "default"). |
+false |
+string |
+ |
+
+
+storagePool |
+The Storage Pool associated with the protection domain (defaults to "default"). |
+false |
+string |
+ |
+
+
+storageMode |
+Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). |
+false |
+string |
+ |
+
+
+volumeName |
+The name of a volume already created in the ScaleIO system that is associated with this volume source. |
+false |
+string |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+readOnly |
+Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1beta2.ReplicaSet
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSet represents the configuration of a ReplicaSet.
+
+
+
+
+
+
v1.Initializers
+
+
Initializers tracks the progress of initialization.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+pending |
+Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients. |
+true |
+v1.Initializer array |
+ |
+
+
+result |
+If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion. |
+false |
+v1.Status |
+ |
+
+
+
+
+
+
+
v1beta2.ScaleStatus
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ScaleStatus represents the current status of a scale subresource.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+replicas |
+actual number of observed instances of the scaled object. |
+true |
+integer (int32) |
+ |
+
+
+selector |
+label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors |
+false |
+object |
+ |
+
+
+targetSelector |
+label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.Status
+
+
Status is a return value for calls that don’t return other objects.
+
+
+
+
+
+
v1.NFSVolumeSource
+
+
Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.
+
+
+
+
+
+
v1beta2.DeploymentSpec
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentSpec is the specification of the desired behavior of the Deployment.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+replicas |
+Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. |
+false |
+integer (int32) |
+ |
+
+
+selector |
+Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. |
+false |
+v1.LabelSelector |
+ |
+
+
+template |
+Template describes the pods that will be created. |
+true |
+v1.PodTemplateSpec |
+ |
+
+
+strategy |
+The deployment strategy to use to replace existing pods with new ones. |
+false |
+v1beta2.DeploymentStrategy |
+ |
+
+
+minReadySeconds |
+Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) |
+false |
+integer (int32) |
+ |
+
+
+revisionHistoryLimit |
+The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. |
+false |
+integer (int32) |
+ |
+
+
+paused |
+Indicates that the deployment is paused. |
+false |
+boolean |
+false |
+
+
+progressDeadlineSeconds |
+The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
+
+
HTTPHeader describes a custom header to be used in HTTP probes
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+The header field name |
+true |
+string |
+ |
+
+
+value |
+The header field value |
+true |
+string |
+ |
+
+
+
+
+
+
+
v1.FCVolumeSource
+
+
Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+targetWWNs |
+Required: FC target worldwide names (WWNs) |
+true |
+string array |
+ |
+
+
+lun |
+Required: FC target lun number |
+true |
+integer (int32) |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+readOnly |
+Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.PodAntiAffinity
+
+
Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+requiredDuringSchedulingIgnoredDuringExecution |
+NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm json:"requiredDuringSchedulingRequiredDuringExecution,omitempty" If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. |
+false |
+v1.PodAffinityTerm array |
+ |
+
+
+preferredDuringSchedulingIgnoredDuringExecution |
+The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. |
+false |
+v1.WeightedPodAffinityTerm array |
+ |
+
+
+
+
+
+
+
v1.DeletionPropagation
+
+
+
+
v1.TCPSocketAction
+
+
TCPSocketAction describes an action based on opening a socket
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+port |
+Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. |
+true |
+string |
+ |
+
+
+host |
+Optional: Host name to connect to, defaults to the pod IP. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.HTTPGetAction
+
+
HTTPGetAction describes an action based on HTTP Get requests.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+path |
+Path to access on the HTTP server. |
+false |
+string |
+ |
+
+
+port |
+Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. |
+true |
+string |
+ |
+
+
+host |
+Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. |
+false |
+string |
+ |
+
+
+scheme |
+Scheme to use for connecting to the host. Defaults to HTTP. |
+false |
+string |
+ |
+
+
+httpHeaders |
+Custom headers to set in the request. HTTP allows repeated headers. |
+false |
+v1.HTTPHeader array |
+ |
+
+
+
+
+
+
+
v1.StatusDetails
+
+
StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described). |
+false |
+string |
+ |
+
+
+group |
+The group attribute of the resource associated with the status StatusReason. |
+false |
+string |
+ |
+
+
+kind |
+The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds |
+false |
+string |
+ |
+
+
+uid |
+UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids |
+false |
+string |
+ |
+
+
+causes |
+The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes. |
+false |
+v1.StatusCause array |
+ |
+
+
+retryAfterSeconds |
+If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.Container
+
+
A single application container that you want to run within a pod.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. |
+true |
+string |
+ |
+
+
+image |
+Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images |
+true |
+string |
+ |
+
+
+command |
+Entrypoint array. Not executed within a shell. The docker image’s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container’s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell |
+false |
+string array |
+ |
+
+
+args |
+Arguments to the entrypoint. The docker image’s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container’s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell |
+false |
+string array |
+ |
+
+
+workingDir |
+Container’s working directory. If not specified, the container runtime’s default will be used, which might be configured in the container image. Cannot be updated. |
+false |
+string |
+ |
+
+
+ports |
+List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. |
+false |
+v1.ContainerPort array |
+ |
+
+
+envFrom |
+List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. |
+false |
+v1.EnvFromSource array |
+ |
+
+
+env |
+List of environment variables to set in the container. Cannot be updated. |
+false |
+v1.EnvVar array |
+ |
+
+
+resources |
+Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources |
+false |
+v1.ResourceRequirements |
+ |
+
+
+volumeMounts |
+Pod volumes to mount into the container’s filesystem. Cannot be updated. |
+false |
+v1.VolumeMount array |
+ |
+
+
+livenessProbe |
+Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes |
+false |
+v1.Probe |
+ |
+
+
+readinessProbe |
+Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes |
+false |
+v1.Probe |
+ |
+
+
+lifecycle |
+Actions that the management system should take in response to container lifecycle events. Cannot be updated. |
+false |
+v1.Lifecycle |
+ |
+
+
+terminationMessagePath |
+Optional: Path at which the file to which the container’s termination message will be written is mounted into the container’s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. |
+false |
+string |
+ |
+
+
+terminationMessagePolicy |
+Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. |
+false |
+string |
+ |
+
+
+imagePullPolicy |
+Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images |
+false |
+string |
+ |
+
+
+securityContext |
+Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md |
+false |
+v1.SecurityContext |
+ |
+
+
+stdin |
+Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. |
+false |
+boolean |
+false |
+
+
+stdinOnce |
+Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false |
+false |
+boolean |
+false |
+
+
+tty |
+Whether this container should allocate a TTY for itself, also requires stdin to be true. Default is false. |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.PodSecurityContext
+
+
PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+seLinuxOptions |
+The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. |
+false |
+v1.SELinuxOptions |
+ |
+
+
+runAsUser |
+The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. |
+false |
+integer (int64) |
+ |
+
+
+runAsNonRoot |
+Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. |
+false |
+boolean |
+false |
+
+
+supplementalGroups |
+A list of groups applied to the first process run in each container, in addition to the container’s primary GID. If unspecified, no groups will be added to any container. |
+false |
+integer (int32) array |
+ |
+
+
+fsGroup |
+A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
+
+1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR’d with rw-rw |
+false |
+integer (int64) |
+ |
+
+
+
+
+
+
+
v1.OwnerReference
+
+
OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+apiVersion |
+API version of the referent. |
+true |
+string |
+ |
+
+
+kind |
+Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds |
+true |
+string |
+ |
+
+
+name |
+Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names |
+true |
+string |
+ |
+
+
+uid |
+UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids |
+true |
+string |
+ |
+
+
+controller |
+If true, this reference points to the managing controller. |
+false |
+boolean |
+false |
+
+
+blockOwnerDeletion |
+If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.APIResource
+
+
APIResource specifies the name of a resource and whether it is namespaced.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+name is the plural name of the resource. |
+true |
+string |
+ |
+
+
+singularName |
+singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface. |
+true |
+string |
+ |
+
+
+namespaced |
+namespaced indicates if a resource is namespaced or not. |
+true |
+boolean |
+false |
+
+
+kind |
+kind is the kind for the resource (e.g. Foo is the kind for a resource foo) |
+true |
+string |
+ |
+
+
+verbs |
+verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy) |
+true |
+string array |
+ |
+
+
+shortNames |
+shortNames is a list of suggested short names of the resource. |
+false |
+string array |
+ |
+
+
+categories |
+categories is a list of the grouped resources this resource belongs to (e.g. all) |
+false |
+string array |
+ |
+
+
+
+
+
+
+
v1.NodeSelectorRequirement
+
+
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+key |
+The label key that the selector applies to. |
+true |
+string |
+ |
+
+
+operator |
+Represents a key’s relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. |
+true |
+string |
+ |
+
+
+values |
+An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. |
+false |
+string array |
+ |
+
+
+
+
+
+
+
v1.HostPathVolumeSource
+
+
Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.
+
+
+
+
+
+
v1.SecretProjection
+
+
Adapts a secret into a projected volume.
+
+
+
The contents of the target Secret’s Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names |
+false |
+string |
+ |
+
+
+items |
+If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the .. path or start with ... |
+false |
+v1.KeyToPath array |
+ |
+
+
+optional |
+Specify whether the Secret or its key must be defined |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.CinderVolumeSource
+
+
Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
v1.SecurityContext
+
+
SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+capabilities |
+The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. |
+false |
+v1.Capabilities |
+ |
+
+
+privileged |
+Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. |
+false |
+boolean |
+false |
+
+
+seLinuxOptions |
+The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. |
+false |
+v1.SELinuxOptions |
+ |
+
+
+runAsUser |
+The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. |
+false |
+integer (int64) |
+ |
+
+
+runAsNonRoot |
+Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. |
+false |
+boolean |
+false |
+
+
+readOnlyRootFilesystem |
+Whether this container has a read-only root filesystem. Default is false. |
+false |
+boolean |
+false |
+
+
+allowPrivilegeEscalation |
+AllowPrivilegeEscalation controls whether a process can gain more privileges than it’s parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1beta2.Deployment
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. Deployment enables declarative updates for Pods and ReplicaSets.
+
+
+
+
+
+
v1beta2.DeploymentList
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentList is a list of Deployments.
+
+
+
+
+
+
v1.AWSElasticBlockStoreVolumeSource
+
+
Represents a Persistent Disk resource in AWS.
+
+
+
An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+volumeID |
+Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore |
+true |
+string |
+ |
+
+
+fsType |
+Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore |
+false |
+string |
+ |
+
+
+partition |
+The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). |
+false |
+integer (int32) |
+ |
+
+
+readOnly |
+Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore |
+false |
+boolean |
+false |
+
+
+
+
+
+
+
v1.QuobyteVolumeSource
+
+
Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+registry |
+Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes |
+true |
+string |
+ |
+
+
+volume |
+Volume is a string that references an already created Quobyte volume by name. |
+true |
+string |
+ |
+
+
+readOnly |
+ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. |
+false |
+boolean |
+false |
+
+
+user |
+User to map volume access to Defaults to serivceaccount user |
+false |
+string |
+ |
+
+
+group |
+Group to map volume access to Default is no group |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.WatchEvent
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+type |
+ |
+true |
+string |
+ |
+
+
+object |
+ |
+true |
+string |
+ |
+
+
+
+
+
+
+
v1.LabelSelectorRequirement
+
+
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+key |
+key is the label key that the selector applies to. |
+true |
+string |
+ |
+
+
+operator |
+operator represents a key’s relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist. |
+true |
+string |
+ |
+
+
+values |
+values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. |
+false |
+string array |
+ |
+
+
+
+
+
+
+
v1.EnvVar
+
+
EnvVar represents an environment variable present in a Container.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+name |
+Name of the environment variable. Must be a C_IDENTIFIER. |
+true |
+string |
+ |
+
+
+value |
+Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". |
+false |
+string |
+ |
+
+
+valueFrom |
+Source for the environment variable’s value. Cannot be used if value is not empty. |
+false |
+v1.EnvVarSource |
+ |
+
+
+
+
+
+
+
v1.PersistentVolumeAccessMode
+
+
+
+
v1.ResourceRequirements
+
+
ResourceRequirements describes the compute resource requirements.
+
+
+
+
+
+
v1.HostAlias
+
+
HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod’s hosts file.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+ip |
+IP address of the host file entry. |
+false |
+string |
+ |
+
+
+hostnames |
+Hostnames for the above IP address. |
+false |
+string array |
+ |
+
+
+
+
+
+
+
v1beta2.RollingUpdateDeployment
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. Spec to control the desired behavior of rolling update.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+maxUnavailable |
+The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. |
+false |
+string |
+ |
+
+
+maxSurge |
+The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.PodTemplateSpec
+
+
PodTemplateSpec describes the data a pod should have when created from a template
+
+
+
+
+
+
v1beta2.ScaleSpec
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ScaleSpec describes the attributes of a scale subresource
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+replicas |
+desired number of instances for the scaled object. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.NodeSelector
+
+
A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+nodeSelectorTerms |
+Required. A list of node selector terms. The terms are ORed. |
+true |
+v1.NodeSelectorTerm array |
+ |
+
+
+
+
+
+
+
v1beta2.DaemonSetSpec
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSetSpec is the specification of a daemon set.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+selector |
+A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors |
+false |
+v1.LabelSelector |
+ |
+
+
+template |
+An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template’s node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template |
+true |
+v1.PodTemplateSpec |
+ |
+
+
+updateStrategy |
+An update strategy to replace existing DaemonSet pods with new pods. |
+false |
+v1beta2.DaemonSetUpdateStrategy |
+ |
+
+
+minReadySeconds |
+The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). |
+false |
+integer (int32) |
+ |
+
+
+templateGeneration |
+DEPRECATED. A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation. |
+false |
+integer (int64) |
+ |
+
+
+revisionHistoryLimit |
+The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
v1.Patch
+
+
Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
+
+
+
+
v1.ConfigMapEnvSource
+
+
ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.
+
+
+
The contents of the target ConfigMap’s Data field will represent the key-value pairs as environment variables.
+
+
+
+
+
+
v1beta2.ReplicaSetCondition
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetCondition describes the state of a replica set at a certain point.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+type |
+Type of replica set condition. |
+true |
+string |
+ |
+
+
+status |
+Status of the condition, one of True, False, Unknown. |
+true |
+string |
+ |
+
+
+lastTransitionTime |
+The last time the condition transitioned from one status to another. |
+false |
+string |
+ |
+
+
+reason |
+The reason for the condition’s last transition. |
+false |
+string |
+ |
+
+
+message |
+A human readable message indicating details about the transition. |
+false |
+string |
+ |
+
+
+
+
+
+
+
v1.StorageOSVolumeSource
+
+
Represents a StorageOS persistent volume resource.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+volumeName |
+VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. |
+false |
+string |
+ |
+
+
+volumeNamespace |
+VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod’s namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. |
+false |
+string |
+ |
+
+
+fsType |
+Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
+false |
+string |
+ |
+
+
+readOnly |
+Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. |
+false |
+boolean |
+false |
+
+
+secretRef |
+SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. |
+false |
+v1.LocalObjectReference |
+ |
+
+
+
+
+
+
+
v1beta2.DaemonSetUpdateStrategy
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+type |
+Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is OnDelete. |
+false |
+string |
+ |
+
+
+rollingUpdate |
+Rolling update config params. Present only if type = "RollingUpdate". |
+false |
+v1beta2.RollingUpdateDaemonSet |
+ |
+
+
+
+
+
+
+
v1beta2.ReplicaSetSpec
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetSpec is the specification of a ReplicaSet.
+
+
+
+
+
+
v1.NodeAffinity
+
+
Node affinity is a group of node affinity scheduling rules.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+requiredDuringSchedulingIgnoredDuringExecution |
+If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. |
+false |
+v1.NodeSelector |
+ |
+
+
+preferredDuringSchedulingIgnoredDuringExecution |
+The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. |
+false |
+v1.PreferredSchedulingTerm array |
+ |
+
+
+
+
+
+
+
v1.AzureDataDiskKind
+
+
+
+
v1.PreferredSchedulingTerm
+
+
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it’s a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+weight |
+Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. |
+true |
+integer (int32) |
+ |
+
+
+preference |
+A node selector term, associated with the corresponding weight. |
+true |
+v1.NodeSelectorTerm |
+ |
+
+
+
+
+
+
+
v1.ConfigMapKeySelector
+
+
Selects a key from a ConfigMap.
+
+
+
+
+
+
v1beta2.StatefulSetUpdateStrategy
+
+
WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.
+
+
+
+
+
+
+
+
+
+
+
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+type |
+Type indicates the type of the StatefulSetUpdateStrategy. |
+false |
+string |
+ |
+
+
+rollingUpdate |
+RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. |
+false |
+v1beta2.RollingUpdateStatefulSetStrategy |
+ |
+
+
+
+
+
+
+
v1.AzureDataDiskCachingMode
+
+
+
+
any
+
+
Represents an untyped JSON map - see the description of the field for more info about the structure of this object.
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/api-reference/apps/v1beta2/operations.html b/docs/api-reference/apps/v1beta2/operations.html
index c7f4095ff51..72c3a90c1d6 100755
--- a/docs/api-reference/apps/v1beta2/operations.html
+++ b/docs/api-reference/apps/v1beta2/operations.html
@@ -3114,10 +3114,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
create rollback of a Deployment
+
read scale of the specified Deployment
-
POST /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/rollback
+
GET /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale
@@ -3151,14 +3151,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
-BodyParameter |
-body |
- |
-true |
-v1beta2.DeploymentRollback |
- |
-
-
PathParameter |
namespace |
object name and auth scope, such as for teams and projects |
@@ -3169,7 +3161,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
PathParameter |
name |
-name of the DeploymentRollback |
+name of the Scale |
true |
string |
|
@@ -3197,7 +3189,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
200 |
success |
-v1beta2.DeploymentRollback |
+v1beta2.Scale |
@@ -3241,10 +3233,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
read scale of the specified Deployment
+
replace scale of the specified Deployment
-
GET /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale
@@ -3278,6 +3270,14 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
+BodyParameter |
+body |
+ |
+true |
+v1beta2.Scale |
+ |
+
+
PathParameter |
namespace |
object name and auth scope, such as for teams and projects |
@@ -3360,133 +3360,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
replace scale of the specified Deployment
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/scale
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.Scale |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the Scale |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.Scale |
-
-
-
-
-
-
-
-
-
-
partially update scale of the specified Deployment
@@ -3494,7 +3367,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -3552,7 +3425,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -3577,7 +3450,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -3593,6 +3466,125 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+
+
+
+
read status of the specified Deployment
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/status
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the Deployment |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.Deployment |
+
+
+
+
+
+
+
Produces
@@ -3620,10 +3612,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
read status of the specified Deployment
+
replace status of the specified Deployment
-
GET /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/status
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/status
@@ -3657,6 +3649,14 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
+BodyParameter |
+body |
+ |
+true |
+v1beta2.Deployment |
+ |
+
+
PathParameter |
namespace |
object name and auth scope, such as for teams and projects |
@@ -3739,133 +3739,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
replace status of the specified Deployment
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/deployments/{name}/status
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.Deployment |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the Deployment |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.Deployment |
-
-
-
-
-
-
-
-
-
-
partially update status of the specified Deployment
@@ -3873,7 +3746,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -3931,7 +3804,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -3956,7 +3829,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -3972,6 +3845,165 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+
+
+
+
list or watch objects of kind ReplicaSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+QueryParameter |
+labelSelector |
+A selector to restrict the list of returned objects by their labels. Defaults to everything. |
+false |
+string |
+ |
+
+
+QueryParameter |
+fieldSelector |
+A selector to restrict the list of returned objects by their fields. Defaults to everything. |
+false |
+string |
+ |
+
+
+QueryParameter |
+includeUninitialized |
+If true, partially initialized resources are included in the response. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+watch |
+Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+resourceVersion |
+When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. |
+false |
+string |
+ |
+
+
+QueryParameter |
+timeoutSeconds |
+Timeout for the list/watch call. |
+false |
+integer (int32) |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+
+
+
+
+
+
Produces
@@ -3984,6 +4016,12 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
application/vnd.kubernetes.protobuf
+-
+
application/json;stream=watch
+
+-
+
application/vnd.kubernetes.protobuf;stream=watch
+
@@ -3999,10 +4037,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
list or watch objects of kind ReplicaSet
+
delete collection of ReplicaSet
-
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets
+
DELETE /apis/apps/v1beta2/namespaces/{namespace}/replicasets
@@ -4143,12 +4181,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
application/vnd.kubernetes.protobuf
-
-application/json;stream=watch
-
-
-application/vnd.kubernetes.protobuf;stream=watch
-
@@ -4164,10 +4196,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
delete collection of ReplicaSet
+
create a ReplicaSet
-
DELETE /apis/apps/v1beta2/namespaces/{namespace}/replicasets
+
POST /apis/apps/v1beta2/namespaces/{namespace}/replicasets
@@ -4201,6 +4233,1429 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
+BodyParameter |
+body |
+ |
+true |
+v1beta2.ReplicaSet |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
+
+
+
+
read the specified ReplicaSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+QueryParameter |
+export |
+Should this value be exported. Export strips fields that a user can not specify. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+exact |
+Should the export be exact. Exact export maintains cluster-specific fields like Namespace. |
+false |
+boolean |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
+
+
+
+
replace the specified ReplicaSet
+
+
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1beta2.ReplicaSet |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
+
+
+
+
delete a ReplicaSet
+
+
+
DELETE /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1.DeleteOptions |
+ |
+
+
+QueryParameter |
+gracePeriodSeconds |
+The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. |
+false |
+integer (int32) |
+ |
+
+
+QueryParameter |
+orphanDependents |
+Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object’s finalizers list. Either this field or PropagationPolicy may be set, but not both. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+propagationPolicy |
+Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. |
+false |
+string |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1.Status |
+
+
+
+
+
+
+
+
+
+
+
partially update the specified ReplicaSet
+
+
+
PATCH /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1.Patch |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
Consumes
+
+
+-
+
application/json-patch+json
+
+-
+
application/merge-patch+json
+
+-
+
application/strategic-merge-patch+json
+
+
+
+
+
+
+
+
+
read scale of the specified ReplicaSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/scale
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the Scale |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.Scale |
+
+
+
+
+
+
+
+
+
+
+
replace scale of the specified ReplicaSet
+
+
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/scale
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1beta2.Scale |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the Scale |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.Scale |
+
+
+
+
+
+
+
+
+
+
+
partially update scale of the specified ReplicaSet
+
+
+
PATCH /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/scale
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1.Patch |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the Scale |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.Scale |
+
+
+
+
+
+
+
Consumes
+
+
+-
+
application/json-patch+json
+
+-
+
application/merge-patch+json
+
+-
+
application/strategic-merge-patch+json
+
+
+
+
+
+
+
+
+
read status of the specified ReplicaSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/status
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
+
+
+
+
replace status of the specified ReplicaSet
+
+
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/status
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1beta2.ReplicaSet |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
+
+
+
+
partially update status of the specified ReplicaSet
+
+
+
PATCH /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/status
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+BodyParameter |
+body |
+ |
+true |
+v1.Patch |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the ReplicaSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.ReplicaSet |
+
+
+
+
+
+
+
Consumes
+
+
+-
+
application/json-patch+json
+
+-
+
application/merge-patch+json
+
+-
+
application/strategic-merge-patch+json
+
+
+
+
+
+
+
+
+
list or watch objects of kind StatefulSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
QueryParameter |
labelSelector |
A selector to restrict the list of returned objects by their labels. Defaults to everything. |
@@ -4259,1423 +5714,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1.Status |
-
-
-
-
-
-
-
-
-
-
-
create a ReplicaSet
-
-
-
POST /apis/apps/v1beta2/namespaces/{namespace}/replicasets
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.ReplicaSet |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.ReplicaSet |
-
-
-
-
-
-
-
-
-
-
-
read the specified ReplicaSet
-
-
-
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-QueryParameter |
-export |
-Should this value be exported. Export strips fields that a user can not specify. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-exact |
-Should the export be exact. Exact export maintains cluster-specific fields like Namespace. |
-false |
-boolean |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.ReplicaSet |
-
-
-
-
-
-
-
-
-
-
-
replace the specified ReplicaSet
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.ReplicaSet |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.ReplicaSet |
-
-
-
-
-
-
-
-
-
-
-
delete a ReplicaSet
-
-
-
DELETE /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1.DeleteOptions |
- |
-
-
-QueryParameter |
-gracePeriodSeconds |
-The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. |
-false |
-integer (int32) |
- |
-
-
-QueryParameter |
-orphanDependents |
-Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object’s finalizers list. Either this field or PropagationPolicy may be set, but not both. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-propagationPolicy |
-Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. |
-false |
-string |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1.Status |
-
-
-
-
-
-
-
-
-
-
-
partially update the specified ReplicaSet
-
-
-
PATCH /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1.Patch |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.ReplicaSet |
-
-
-
-
-
-
-
Consumes
-
-
--
-
application/json-patch+json
-
--
-
application/merge-patch+json
-
--
-
application/strategic-merge-patch+json
-
-
-
-
-
-
-
-
-
read scale of the specified ReplicaSet
-
-
-
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/scale
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the Scale |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.Scale |
-
-
-
-
-
-
-
-
-
-
-
replace scale of the specified ReplicaSet
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/scale
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.Scale |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the Scale |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.Scale |
-
-
-
-
-
-
-
-
-
-
-
partially update scale of the specified ReplicaSet
-
-
-
PATCH /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/scale
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1.Patch |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the Scale |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.Scale |
-
-
-
-
-
-
-
Consumes
-
-
--
-
application/json-patch+json
-
--
-
application/merge-patch+json
-
--
-
application/strategic-merge-patch+json
-
-
-
-
-
-
-
-
-
read status of the specified ReplicaSet
-
-
-
GET /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/status
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.ReplicaSet |
-
-
-
-
-
-
-
-
-
-
-
replace status of the specified ReplicaSet
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/status
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.ReplicaSet |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.ReplicaSet |
-
-
-
-
-
-
-
-
-
-
-
partially update status of the specified ReplicaSet
-
-
-
PATCH /apis/apps/v1beta2/namespaces/{namespace}/replicasets/{name}/status
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1.Patch |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the ReplicaSet |
-true |
-string |
- |
-
-
-
-
@@ -5707,13 +5745,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
-
application/json-patch+json
-
--
-
application/merge-patch+json
-
--
-
application/strategic-merge-patch+json
+/
@@ -5731,6 +5763,12 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
application/vnd.kubernetes.protobuf
+
+application/json;stream=watch
+
+
+application/vnd.kubernetes.protobuf;stream=watch
+
@@ -5746,10 +5784,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
list or watch objects of kind StatefulSet
+
delete collection of StatefulSet
-
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets
+
DELETE /apis/apps/v1beta2/namespaces/{namespace}/statefulsets
@@ -5890,12 +5928,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
application/vnd.kubernetes.protobuf
-
-application/json;stream=watch
-
-
-application/vnd.kubernetes.protobuf;stream=watch
-
@@ -5911,10 +5943,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
delete collection of StatefulSet
+
create a StatefulSet
-
DELETE /apis/apps/v1beta2/namespaces/{namespace}/statefulsets
+
POST /apis/apps/v1beta2/namespaces/{namespace}/statefulsets
@@ -5948,51 +5980,11 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
-QueryParameter |
-labelSelector |
-A selector to restrict the list of returned objects by their labels. Defaults to everything. |
-false |
-string |
+BodyParameter |
+body |
|
-
-
-QueryParameter |
-fieldSelector |
-A selector to restrict the list of returned objects by their fields. Defaults to everything. |
-false |
-string |
- |
-
-
-QueryParameter |
-includeUninitialized |
-If true, partially initialized resources are included in the response. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-watch |
-Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-resourceVersion |
-When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. |
-false |
-string |
- |
-
-
-QueryParameter |
-timeoutSeconds |
-Timeout for the list/watch call. |
-false |
-integer (int32) |
+true |
+v1beta2.StatefulSet |
|
@@ -6026,7 +6018,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
200 |
success |
-v1.Status |
+v1beta2.StatefulSet |
@@ -6070,10 +6062,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
create a StatefulSet
+
read the specified StatefulSet
-
POST /apis/apps/v1beta2/namespaces/{namespace}/statefulsets
+
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}
@@ -6107,11 +6099,19 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
-BodyParameter |
-body |
+QueryParameter |
+export |
+Should this value be exported. Export strips fields that a user can not specify. |
+false |
+boolean |
|
-true |
-v1beta2.StatefulSet |
+
+
+QueryParameter |
+exact |
+Should the export be exact. Exact export maintains cluster-specific fields like Namespace. |
+false |
+boolean |
|
@@ -6122,6 +6122,14 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
string |
|
+
+PathParameter |
+name |
+name of the StatefulSet |
+true |
+string |
+ |
+
@@ -6189,10 +6197,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
read the specified StatefulSet
+
replace the specified StatefulSet
-
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}
@@ -6226,19 +6234,11 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
-QueryParameter |
-export |
-Should this value be exported. Export strips fields that a user can not specify. |
-false |
-boolean |
+BodyParameter |
+body |
|
-
-
-QueryParameter |
-exact |
-Should the export be exact. Exact export maintains cluster-specific fields like Namespace. |
-false |
-boolean |
+true |
+v1beta2.StatefulSet |
|
@@ -6324,133 +6324,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
replace the specified StatefulSet
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.StatefulSet |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the StatefulSet |
-true |
-string |
- |
-
-
-
-
-
-
-
-
-
-
-
delete a StatefulSet
@@ -6458,7 +6331,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -6540,7 +6413,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -6565,7 +6438,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -6575,7 +6448,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -6591,7 +6464,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -6609,7 +6482,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -6667,7 +6540,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -6692,7 +6565,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -6708,6 +6581,125 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+
+
+
+
read scale of the specified StatefulSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the Scale |
+true |
+string |
+ |
+
+
+
+
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1beta2.Scale |
+
+
+
+
+
+
+
Produces
@@ -6735,10 +6727,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
read scale of the specified StatefulSet
+
replace scale of the specified StatefulSet
-
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale
@@ -6772,6 +6764,14 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
+BodyParameter |
+body |
+ |
+true |
+v1beta2.Scale |
+ |
+
+
PathParameter |
namespace |
object name and auth scope, such as for teams and projects |
@@ -6854,133 +6854,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
replace scale of the specified StatefulSet
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/scale
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.Scale |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the Scale |
-true |
-string |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1beta2.Scale |
-
-
-
-
-
-
-
-
-
-
partially update scale of the specified StatefulSet
@@ -6988,7 +6861,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -7046,7 +6919,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -7071,7 +6944,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -7087,6 +6960,125 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+
+
+
+
read status of the specified StatefulSet
+
+
+
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+PathParameter |
+namespace |
+object name and auth scope, such as for teams and projects |
+true |
+string |
+ |
+
+
+PathParameter |
+name |
+name of the StatefulSet |
+true |
+string |
+ |
+
+
+
+
+
+
+
+
Produces
@@ -7114,10 +7106,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
read status of the specified StatefulSet
+
replace status of the specified StatefulSet
-
GET /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status
+
PUT /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status
@@ -7151,6 +7143,14 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
+BodyParameter |
+body |
+ |
+true |
+v1beta2.StatefulSet |
+ |
+
+
PathParameter |
namespace |
object name and auth scope, such as for teams and projects |
@@ -7233,133 +7233,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
replace status of the specified StatefulSet
-
-
-
PUT /apis/apps/v1beta2/namespaces/{namespace}/statefulsets/{name}/status
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-BodyParameter |
-body |
- |
-true |
-v1beta2.StatefulSet |
- |
-
-
-PathParameter |
-namespace |
-object name and auth scope, such as for teams and projects |
-true |
-string |
- |
-
-
-PathParameter |
-name |
-name of the StatefulSet |
-true |
-string |
- |
-
-
-
-
-
-
-
-
-
-
-
partially update status of the specified StatefulSet
@@ -7367,7 +7240,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -7425,7 +7298,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -7450,7 +7323,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -7466,6 +7339,157 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+
+
+
+
list or watch objects of kind ReplicaSet
+
+
+
GET /apis/apps/v1beta2/replicasets
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+QueryParameter |
+labelSelector |
+A selector to restrict the list of returned objects by their labels. Defaults to everything. |
+false |
+string |
+ |
+
+
+QueryParameter |
+fieldSelector |
+A selector to restrict the list of returned objects by their fields. Defaults to everything. |
+false |
+string |
+ |
+
+
+QueryParameter |
+includeUninitialized |
+If true, partially initialized resources are included in the response. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+watch |
+Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+resourceVersion |
+When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. |
+false |
+string |
+ |
+
+
+QueryParameter |
+timeoutSeconds |
+Timeout for the list/watch call. |
+false |
+integer (int32) |
+ |
+
+
+
+
+
+
+
+
Produces
@@ -7478,6 +7502,12 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
application/vnd.kubernetes.protobuf
+-
+
application/json;stream=watch
+
+-
+
application/vnd.kubernetes.protobuf;stream=watch
+
@@ -7493,10 +7523,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
list or watch objects of kind ReplicaSet
+
list or watch objects of kind StatefulSet
-
GET /apis/apps/v1beta2/replicasets
+
GET /apis/apps/v1beta2/statefulsets
@@ -7650,10 +7680,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
list or watch objects of kind StatefulSet
+
watch individual changes to a list of DaemonSet
-
GET /apis/apps/v1beta2/statefulsets
+
GET /apis/apps/v1beta2/watch/daemonsets
@@ -7807,10 +7837,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
watch individual changes to a list of DaemonSet
+
watch individual changes to a list of Deployment
-
GET /apis/apps/v1beta2/watch/daemonsets
+
GET /apis/apps/v1beta2/watch/deployments
@@ -7964,163 +7994,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
watch individual changes to a list of Deployment
-
-
-
GET /apis/apps/v1beta2/watch/deployments
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-QueryParameter |
-labelSelector |
-A selector to restrict the list of returned objects by their labels. Defaults to everything. |
-false |
-string |
- |
-
-
-QueryParameter |
-fieldSelector |
-A selector to restrict the list of returned objects by their fields. Defaults to everything. |
-false |
-string |
- |
-
-
-QueryParameter |
-includeUninitialized |
-If true, partially initialized resources are included in the response. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-watch |
-Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-resourceVersion |
-When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. |
-false |
-string |
- |
-
-
-QueryParameter |
-timeoutSeconds |
-Timeout for the list/watch call. |
-false |
-integer (int32) |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1.WatchEvent |
-
-
-
-
-
-
-
-
Produces
-
-
--
-
application/json
-
--
-
application/yaml
-
--
-
application/vnd.kubernetes.protobuf
-
--
-
application/json;stream=watch
-
--
-
application/vnd.kubernetes.protobuf;stream=watch
-
-
-
-
-
-
-
watch individual changes to a list of DaemonSet
@@ -8128,7 +8001,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -8218,7 +8091,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -8243,7 +8116,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -8253,7 +8126,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -8275,7 +8148,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -8293,7 +8166,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -8391,7 +8264,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -8416,7 +8289,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -8426,7 +8299,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -8448,7 +8321,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -8466,7 +8339,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -8556,7 +8429,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -8581,7 +8454,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -8591,7 +8464,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -8613,7 +8486,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -8631,7 +8504,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -8729,7 +8602,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -8754,7 +8627,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -8764,7 +8637,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -8786,7 +8659,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -8804,7 +8677,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -8894,7 +8767,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -8919,7 +8792,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -8929,7 +8802,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -8951,7 +8824,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -8969,7 +8842,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -9067,7 +8940,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -9092,7 +8965,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -9102,7 +8975,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -9124,7 +8997,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -9142,7 +9015,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -9232,7 +9105,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Responses
+
Responses
@@ -9257,7 +9130,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Consumes
+
Consumes
-
@@ -9267,7 +9140,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Produces
+
Produces
-
@@ -9289,7 +9162,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
+
-
@@ -9307,7 +9180,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
Parameters
+
Parameters
@@ -9403,6 +9276,163 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+
+
+
Responses
+
+
+
+
+
+
+
+
+HTTP Code |
+Description |
+Schema |
+
+
+
+
+200 |
+success |
+v1.WatchEvent |
+
+
+
+
+
+
+
+
Produces
+
+
+-
+
application/json
+
+-
+
application/yaml
+
+-
+
application/vnd.kubernetes.protobuf
+
+-
+
application/json;stream=watch
+
+-
+
application/vnd.kubernetes.protobuf;stream=watch
+
+
+
+
+
+
+
+
watch individual changes to a list of ReplicaSet
+
+
+
GET /apis/apps/v1beta2/watch/replicasets
+
+
+
+
Parameters
+
+
+
+
+
+
+
+
+
+
+
+Type |
+Name |
+Description |
+Required |
+Schema |
+Default |
+
+
+
+
+QueryParameter |
+pretty |
+If true, then the output is pretty printed. |
+false |
+string |
+ |
+
+
+QueryParameter |
+labelSelector |
+A selector to restrict the list of returned objects by their labels. Defaults to everything. |
+false |
+string |
+ |
+
+
+QueryParameter |
+fieldSelector |
+A selector to restrict the list of returned objects by their fields. Defaults to everything. |
+false |
+string |
+ |
+
+
+QueryParameter |
+includeUninitialized |
+If true, partially initialized resources are included in the response. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+watch |
+Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. |
+false |
+boolean |
+ |
+
+
+QueryParameter |
+resourceVersion |
+When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. |
+false |
+string |
+ |
+
+
+QueryParameter |
+timeoutSeconds |
+Timeout for the list/watch call. |
+false |
+integer (int32) |
+ |
+
+
+
+
Responses
@@ -9473,10 +9503,10 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
watch individual changes to a list of ReplicaSet
+
watch individual changes to a list of StatefulSet
-
GET /apis/apps/v1beta2/watch/replicasets
+
GET /apis/apps/v1beta2/watch/statefulsets
@@ -9629,163 +9659,6 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-
-
watch individual changes to a list of StatefulSet
-
-
-
GET /apis/apps/v1beta2/watch/statefulsets
-
-
-
-
Parameters
-
-
-
-
-
-
-
-
-
-
-
-Type |
-Name |
-Description |
-Required |
-Schema |
-Default |
-
-
-
-
-QueryParameter |
-pretty |
-If true, then the output is pretty printed. |
-false |
-string |
- |
-
-
-QueryParameter |
-labelSelector |
-A selector to restrict the list of returned objects by their labels. Defaults to everything. |
-false |
-string |
- |
-
-
-QueryParameter |
-fieldSelector |
-A selector to restrict the list of returned objects by their fields. Defaults to everything. |
-false |
-string |
- |
-
-
-QueryParameter |
-includeUninitialized |
-If true, partially initialized resources are included in the response. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-watch |
-Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. |
-false |
-boolean |
- |
-
-
-QueryParameter |
-resourceVersion |
-When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it’s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. |
-false |
-string |
- |
-
-
-QueryParameter |
-timeoutSeconds |
-Timeout for the list/watch call. |
-false |
-integer (int32) |
- |
-
-
-
-
-
-
-
Responses
-
-
-
-
-
-
-
-
-HTTP Code |
-Description |
-Schema |
-
-
-
-
-200 |
-success |
-v1.WatchEvent |
-
-
-
-
-
-
-
-
Produces
-
-
--
-
application/json
-
--
-
application/yaml
-
--
-
application/vnd.kubernetes.protobuf
-
--
-
application/json;stream=watch
-
--
-
application/vnd.kubernetes.protobuf;stream=watch
-
-
-
-
-
-
diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html
index 40a6afd78f3..0242c5f4433 100755
--- a/docs/api-reference/extensions/v1beta1/definitions.html
+++ b/docs/api-reference/extensions/v1beta1/definitions.html
@@ -3002,6 +3002,9 @@ When an object is created, the system will populate this list with the current s
v1beta1.RollbackConfig
+
@@ -4139,7 +4142,7 @@ When an object is created, the system will populate this list with the current s
rollbackTo |
-The config this deployment is rolling back to. Will be cleared after rollback is done. |
+DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done. |
false |
v1beta1.RollbackConfig |
|
@@ -5582,7 +5585,7 @@ Examples:
v1beta1.DeploymentRollback
-
DeploymentRollback stores the information required to rollback a deployment.
+
DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.
diff --git a/federation/apis/openapi-spec/swagger.json b/federation/apis/openapi-spec/swagger.json
index 3348e64685e..ec66398aa0d 100644
--- a/federation/apis/openapi-spec/swagger.json
+++ b/federation/apis/openapi-spec/swagger.json
@@ -12418,7 +12418,7 @@
]
},
"io.k8s.api.extensions.v1beta1.DeploymentRollback": {
- "description": "DeploymentRollback stores the information required to rollback a deployment.",
+ "description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
@@ -12487,7 +12487,7 @@
"format": "int32"
},
"rollbackTo": {
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
+ "description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.RollbackConfig"
},
"selector": {
@@ -12901,6 +12901,7 @@
}
},
"io.k8s.api.extensions.v1beta1.RollbackConfig": {
+ "description": "DEPRECATED.",
"properties": {
"revision": {
"description": "The revision to rollback to. If set to 0, rollback to the last revision.",
diff --git a/federation/apis/swagger-spec/extensions_v1beta1.json b/federation/apis/swagger-spec/extensions_v1beta1.json
index 6eac8ab38f1..796fc2d4388 100644
--- a/federation/apis/swagger-spec/extensions_v1beta1.json
+++ b/federation/apis/swagger-spec/extensions_v1beta1.json
@@ -7118,7 +7118,7 @@
},
"rollbackTo": {
"$ref": "v1beta1.RollbackConfig",
- "description": "The config this deployment is rolling back to. Will be cleared after rollback is done."
+ "description": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done."
},
"progressDeadlineSeconds": {
"type": "integer",
@@ -7157,6 +7157,7 @@
},
"v1beta1.RollbackConfig": {
"id": "v1beta1.RollbackConfig",
+ "description": "DEPRECATED.",
"properties": {
"revision": {
"type": "integer",
@@ -7249,7 +7250,7 @@
},
"v1beta1.DeploymentRollback": {
"id": "v1beta1.DeploymentRollback",
- "description": "DeploymentRollback stores the information required to rollback a deployment.",
+ "description": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"required": [
"name",
"rollbackTo"
diff --git a/federation/docs/api-reference/extensions/v1beta1/definitions.html b/federation/docs/api-reference/extensions/v1beta1/definitions.html
index 371167b51e8..c8834375e69 100755
--- a/federation/docs/api-reference/extensions/v1beta1/definitions.html
+++ b/federation/docs/api-reference/extensions/v1beta1/definitions.html
@@ -2757,6 +2757,9 @@ When an object is created, the system will populate this list with the current s
v1beta1.RollbackConfig
+
@@ -3853,7 +3856,7 @@ When an object is created, the system will populate this list with the current s
rollbackTo |
-The config this deployment is rolling back to. Will be cleared after rollback is done. |
+DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done. |
false |
v1beta1.RollbackConfig |
|
@@ -5144,7 +5147,7 @@ Examples:
v1beta1.DeploymentRollback
-
DeploymentRollback stores the information required to rollback a deployment.
+
DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.
diff --git a/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/pkg/apis/apps/v1beta2/zz_generated.conversion.go
index caf130e614d..edc7371240a 100644
--- a/pkg/apis/apps/v1beta2/zz_generated.conversion.go
+++ b/pkg/apis/apps/v1beta2/zz_generated.conversion.go
@@ -57,8 +57,6 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition,
Convert_v1beta2_DeploymentList_To_extensions_DeploymentList,
Convert_extensions_DeploymentList_To_v1beta2_DeploymentList,
- Convert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback,
- Convert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback,
Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec,
Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec,
Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus,
@@ -75,8 +73,6 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec,
Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus,
Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus,
- Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig,
- Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig,
Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet,
Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet,
Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
@@ -299,11 +295,6 @@ func autoConvert_v1beta2_Deployment_To_extensions_Deployment(in *v1beta2.Deploym
return nil
}
-// Convert_v1beta2_Deployment_To_extensions_Deployment is an autogenerated conversion function.
-func Convert_v1beta2_Deployment_To_extensions_Deployment(in *v1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error {
- return autoConvert_v1beta2_Deployment_To_extensions_Deployment(in, out, s)
-}
-
func autoConvert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *v1beta2.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
@@ -315,11 +306,6 @@ func autoConvert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Depl
return nil
}
-// Convert_extensions_Deployment_To_v1beta2_Deployment is an autogenerated conversion function.
-func Convert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *v1beta2.Deployment, s conversion.Scope) error {
- return autoConvert_extensions_Deployment_To_v1beta2_Deployment(in, out, s)
-}
-
func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error {
out.Type = extensions.DeploymentConditionType(in.Type)
out.Status = api.ConditionStatus(in.Status)
@@ -392,34 +378,6 @@ func Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.
return autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in, out, s)
}
-func autoConvert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta2.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
- out.Name = in.Name
- out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
- if err := Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function.
-func Convert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta2.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
- return autoConvert_v1beta2_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s)
-}
-
-func autoConvert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta2.DeploymentRollback, s conversion.Scope) error {
- out.Name = in.Name
- out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
- if err := Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback is an autogenerated conversion function.
-func Convert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta2.DeploymentRollback, s conversion.Scope) error {
- return autoConvert_extensions_DeploymentRollback_To_v1beta2_DeploymentRollback(in, out, s)
-}
-
func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error {
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
@@ -434,7 +392,6 @@ func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
- out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
@@ -453,7 +410,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensi
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
- out.RollbackTo = (*v1beta2.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
+ // WARNING: in.RollbackTo requires manual conversion: does not exist in peer-type
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
@@ -676,26 +633,6 @@ func Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensi
return autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s)
}
-func autoConvert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(in *v1beta2.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
- out.Revision = in.Revision
- return nil
-}
-
-// Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function.
-func Convert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(in *v1beta2.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
- return autoConvert_v1beta2_RollbackConfig_To_extensions_RollbackConfig(in, out, s)
-}
-
-func autoConvert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(in *extensions.RollbackConfig, out *v1beta2.RollbackConfig, s conversion.Scope) error {
- out.Revision = in.Revision
- return nil
-}
-
-// Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig is an autogenerated conversion function.
-func Convert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(in *extensions.RollbackConfig, out *v1beta2.RollbackConfig, s conversion.Scope) error {
- return autoConvert_extensions_RollbackConfig_To_v1beta2_RollbackConfig(in, out, s)
-}
-
func autoConvert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error {
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
return nil
diff --git a/pkg/apis/apps/v1beta2/zz_generated.defaults.go b/pkg/apis/apps/v1beta2/zz_generated.defaults.go
new file mode 100644
index 00000000000..02ced2d8e39
--- /dev/null
+++ b/pkg/apis/apps/v1beta2/zz_generated.defaults.go
@@ -0,0 +1,613 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by defaulter-gen. Do not edit it manually!
+
+package v1beta2
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&v1beta2.DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*v1beta2.DaemonSet)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*v1beta2.DaemonSetList)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*v1beta2.Deployment)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*v1beta2.DeploymentList)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*v1beta2.ReplicaSet)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*v1beta2.ReplicaSetList)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*v1beta2.StatefulSet)) })
+ scheme.AddTypeDefaultingFunc(&v1beta2.StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*v1beta2.StatefulSetList)) })
+ return nil
+}
+
+func SetObjectDefaults_DaemonSet(in *v1beta2.DaemonSet) {
+ SetDefaults_DaemonSet(in)
+ v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
+ for i := range in.Spec.Template.Spec.Volumes {
+ a := &in.Spec.Template.Spec.Volumes[i]
+ v1.SetDefaults_Volume(a)
+ if a.VolumeSource.Secret != nil {
+ v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
+ }
+ if a.VolumeSource.ISCSI != nil {
+ v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
+ }
+ if a.VolumeSource.RBD != nil {
+ v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
+ }
+ if a.VolumeSource.DownwardAPI != nil {
+ v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
+ for j := range a.VolumeSource.DownwardAPI.Items {
+ b := &a.VolumeSource.DownwardAPI.Items[j]
+ if b.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
+ }
+ }
+ }
+ if a.VolumeSource.ConfigMap != nil {
+ v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
+ }
+ if a.VolumeSource.AzureDisk != nil {
+ v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
+ }
+ if a.VolumeSource.Projected != nil {
+ v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
+ for j := range a.VolumeSource.Projected.Sources {
+ b := &a.VolumeSource.Projected.Sources[j]
+ if b.DownwardAPI != nil {
+ for k := range b.DownwardAPI.Items {
+ c := &b.DownwardAPI.Items[k]
+ if c.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
+ }
+ }
+ }
+ }
+ }
+ if a.VolumeSource.ScaleIO != nil {
+ v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
+ }
+ }
+ for i := range in.Spec.Template.Spec.InitContainers {
+ a := &in.Spec.Template.Spec.InitContainers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+ for i := range in.Spec.Template.Spec.Containers {
+ a := &in.Spec.Template.Spec.Containers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+}
+
+func SetObjectDefaults_DaemonSetList(in *v1beta2.DaemonSetList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_DaemonSet(a)
+ }
+}
+
+func SetObjectDefaults_Deployment(in *v1beta2.Deployment) {
+ SetDefaults_Deployment(in)
+ v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
+ for i := range in.Spec.Template.Spec.Volumes {
+ a := &in.Spec.Template.Spec.Volumes[i]
+ v1.SetDefaults_Volume(a)
+ if a.VolumeSource.Secret != nil {
+ v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
+ }
+ if a.VolumeSource.ISCSI != nil {
+ v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
+ }
+ if a.VolumeSource.RBD != nil {
+ v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
+ }
+ if a.VolumeSource.DownwardAPI != nil {
+ v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
+ for j := range a.VolumeSource.DownwardAPI.Items {
+ b := &a.VolumeSource.DownwardAPI.Items[j]
+ if b.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
+ }
+ }
+ }
+ if a.VolumeSource.ConfigMap != nil {
+ v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
+ }
+ if a.VolumeSource.AzureDisk != nil {
+ v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
+ }
+ if a.VolumeSource.Projected != nil {
+ v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
+ for j := range a.VolumeSource.Projected.Sources {
+ b := &a.VolumeSource.Projected.Sources[j]
+ if b.DownwardAPI != nil {
+ for k := range b.DownwardAPI.Items {
+ c := &b.DownwardAPI.Items[k]
+ if c.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
+ }
+ }
+ }
+ }
+ }
+ if a.VolumeSource.ScaleIO != nil {
+ v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
+ }
+ }
+ for i := range in.Spec.Template.Spec.InitContainers {
+ a := &in.Spec.Template.Spec.InitContainers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+ for i := range in.Spec.Template.Spec.Containers {
+ a := &in.Spec.Template.Spec.Containers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+}
+
+func SetObjectDefaults_DeploymentList(in *v1beta2.DeploymentList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Deployment(a)
+ }
+}
+
+func SetObjectDefaults_ReplicaSet(in *v1beta2.ReplicaSet) {
+ SetDefaults_ReplicaSet(in)
+ v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
+ for i := range in.Spec.Template.Spec.Volumes {
+ a := &in.Spec.Template.Spec.Volumes[i]
+ v1.SetDefaults_Volume(a)
+ if a.VolumeSource.Secret != nil {
+ v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
+ }
+ if a.VolumeSource.ISCSI != nil {
+ v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
+ }
+ if a.VolumeSource.RBD != nil {
+ v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
+ }
+ if a.VolumeSource.DownwardAPI != nil {
+ v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
+ for j := range a.VolumeSource.DownwardAPI.Items {
+ b := &a.VolumeSource.DownwardAPI.Items[j]
+ if b.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
+ }
+ }
+ }
+ if a.VolumeSource.ConfigMap != nil {
+ v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
+ }
+ if a.VolumeSource.AzureDisk != nil {
+ v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
+ }
+ if a.VolumeSource.Projected != nil {
+ v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
+ for j := range a.VolumeSource.Projected.Sources {
+ b := &a.VolumeSource.Projected.Sources[j]
+ if b.DownwardAPI != nil {
+ for k := range b.DownwardAPI.Items {
+ c := &b.DownwardAPI.Items[k]
+ if c.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
+ }
+ }
+ }
+ }
+ }
+ if a.VolumeSource.ScaleIO != nil {
+ v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
+ }
+ }
+ for i := range in.Spec.Template.Spec.InitContainers {
+ a := &in.Spec.Template.Spec.InitContainers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+ for i := range in.Spec.Template.Spec.Containers {
+ a := &in.Spec.Template.Spec.Containers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+}
+
+func SetObjectDefaults_ReplicaSetList(in *v1beta2.ReplicaSetList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_ReplicaSet(a)
+ }
+}
+
+func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) {
+ SetDefaults_StatefulSet(in)
+ v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
+ for i := range in.Spec.Template.Spec.Volumes {
+ a := &in.Spec.Template.Spec.Volumes[i]
+ v1.SetDefaults_Volume(a)
+ if a.VolumeSource.Secret != nil {
+ v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
+ }
+ if a.VolumeSource.ISCSI != nil {
+ v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
+ }
+ if a.VolumeSource.RBD != nil {
+ v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
+ }
+ if a.VolumeSource.DownwardAPI != nil {
+ v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
+ for j := range a.VolumeSource.DownwardAPI.Items {
+ b := &a.VolumeSource.DownwardAPI.Items[j]
+ if b.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
+ }
+ }
+ }
+ if a.VolumeSource.ConfigMap != nil {
+ v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
+ }
+ if a.VolumeSource.AzureDisk != nil {
+ v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
+ }
+ if a.VolumeSource.Projected != nil {
+ v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
+ for j := range a.VolumeSource.Projected.Sources {
+ b := &a.VolumeSource.Projected.Sources[j]
+ if b.DownwardAPI != nil {
+ for k := range b.DownwardAPI.Items {
+ c := &b.DownwardAPI.Items[k]
+ if c.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
+ }
+ }
+ }
+ }
+ }
+ if a.VolumeSource.ScaleIO != nil {
+ v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
+ }
+ }
+ for i := range in.Spec.Template.Spec.InitContainers {
+ a := &in.Spec.Template.Spec.InitContainers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+ for i := range in.Spec.Template.Spec.Containers {
+ a := &in.Spec.Template.Spec.Containers[i]
+ v1.SetDefaults_Container(a)
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ v1.SetDefaults_ContainerPort(b)
+ }
+ for j := range a.Env {
+ b := &a.Env[j]
+ if b.ValueFrom != nil {
+ if b.ValueFrom.FieldRef != nil {
+ v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
+ }
+ }
+ }
+ v1.SetDefaults_ResourceList(&a.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Resources.Requests)
+ if a.LivenessProbe != nil {
+ v1.SetDefaults_Probe(a.LivenessProbe)
+ if a.LivenessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.ReadinessProbe != nil {
+ v1.SetDefaults_Probe(a.ReadinessProbe)
+ if a.ReadinessProbe.Handler.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
+ }
+ }
+ if a.Lifecycle != nil {
+ if a.Lifecycle.PostStart != nil {
+ if a.Lifecycle.PostStart.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
+ }
+ }
+ if a.Lifecycle.PreStop != nil {
+ if a.Lifecycle.PreStop.HTTPGet != nil {
+ v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
+ }
+ }
+ }
+ }
+ for i := range in.Spec.VolumeClaimTemplates {
+ a := &in.Spec.VolumeClaimTemplates[i]
+ v1.SetDefaults_PersistentVolumeClaim(a)
+ v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
+ v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
+ v1.SetDefaults_ResourceList(&a.Status.Capacity)
+ }
+}
+
+func SetObjectDefaults_StatefulSetList(in *v1beta2.StatefulSetList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_StatefulSet(a)
+ }
+}
diff --git a/staging/src/k8s.io/api/apps/v1beta1/generated.proto b/staging/src/k8s.io/api/apps/v1beta1/generated.proto
index 476571e389a..0a81339d227 100644
--- a/staging/src/k8s.io/api/apps/v1beta1/generated.proto
+++ b/staging/src/k8s.io/api/apps/v1beta1/generated.proto
@@ -109,6 +109,7 @@ message DeploymentList {
repeated Deployment items = 2;
}
+// DEPRECATED.
// DeploymentRollback stores the information required to rollback a deployment.
message DeploymentRollback {
// Required: This must match the Name of a deployment.
@@ -157,6 +158,7 @@ message DeploymentSpec {
// +optional
optional bool paused = 7;
+ // DEPRECATED.
// The config this deployment is rolling back to. Will be cleared after rollback is done.
// +optional
optional RollbackConfig rollbackTo = 8;
@@ -224,6 +226,7 @@ message DeploymentStrategy {
optional RollingUpdateDeployment rollingUpdate = 2;
}
+// DEPRECATED.
message RollbackConfig {
// The revision to rollback to. If set to 0, rollback to the last revision.
// +optional
diff --git a/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
index 00d1a617fcc..a4ea771081d 100644
--- a/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
+++ b/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
@@ -84,7 +84,7 @@ func (DeploymentList) SwaggerDoc() map[string]string {
}
var map_DeploymentRollback = map[string]string{
- "": "DeploymentRollback stores the information required to rollback a deployment.",
+ "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"name": "Required: This must match the Name of a deployment.",
"updatedAnnotations": "The annotations to be updated to a deployment",
"rollbackTo": "The config of this deployment rollback.",
@@ -103,7 +103,7 @@ var map_DeploymentSpec = map[string]string{
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.",
"paused": "Indicates that the deployment is paused.",
- "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
+ "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
}
@@ -138,6 +138,7 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string {
}
var map_RollbackConfig = map[string]string{
+ "": "DEPRECATED.",
"revision": "The revision to rollback to. If set to 0, rollback to the last revision.",
}
diff --git a/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go b/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go
new file mode 100644
index 00000000000..95ca19bac2a
--- /dev/null
+++ b/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go
@@ -0,0 +1,6549 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1beta2 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto
+
+ It has these top-level messages:
+ DaemonSet
+ DaemonSetList
+ DaemonSetSpec
+ DaemonSetStatus
+ DaemonSetUpdateStrategy
+ Deployment
+ DeploymentCondition
+ DeploymentList
+ DeploymentSpec
+ DeploymentStatus
+ DeploymentStrategy
+ ReplicaSet
+ ReplicaSetCondition
+ ReplicaSetList
+ ReplicaSetSpec
+ ReplicaSetStatus
+ RollingUpdateDaemonSet
+ RollingUpdateDeployment
+ RollingUpdateStatefulSetStrategy
+ Scale
+ ScaleSpec
+ ScaleStatus
+ StatefulSet
+ StatefulSetList
+ StatefulSetSpec
+ StatefulSetStatus
+ StatefulSetUpdateStrategy
+*/
+package v1beta2
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_api_core_v1 "k8s.io/api/core/v1"
+
+import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
+
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
+func (*DaemonSet) ProtoMessage() {}
+func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
+
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
+func (*DaemonSetList) ProtoMessage() {}
+func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} }
+
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
+func (*DaemonSetSpec) ProtoMessage() {}
+func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} }
+
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
+func (*DaemonSetStatus) ProtoMessage() {}
+func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} }
+
+func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
+func (*DaemonSetUpdateStrategy) ProtoMessage() {}
+func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
+
+func (m *Deployment) Reset() { *m = Deployment{} }
+func (*Deployment) ProtoMessage() {}
+func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} }
+
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
+func (*DeploymentCondition) ProtoMessage() {}
+func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} }
+
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
+func (*DeploymentList) ProtoMessage() {}
+func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} }
+
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
+func (*DeploymentSpec) ProtoMessage() {}
+func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} }
+
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
+func (*DeploymentStatus) ProtoMessage() {}
+func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
+
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
+func (*DeploymentStrategy) ProtoMessage() {}
+func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} }
+
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
+func (*ReplicaSet) ProtoMessage() {}
+func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} }
+
+func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
+func (*ReplicaSetCondition) ProtoMessage() {}
+func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
+
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
+func (*ReplicaSetList) ProtoMessage() {}
+func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} }
+
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
+func (*ReplicaSetSpec) ProtoMessage() {}
+func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
+
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
+func (*ReplicaSetStatus) ProtoMessage() {}
+func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
+
+func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
+func (*RollingUpdateDaemonSet) ProtoMessage() {}
+func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
+
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
+func (*RollingUpdateDeployment) ProtoMessage() {}
+func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{17}
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
+func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
+func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{18}
+}
+
+func (m *Scale) Reset() { *m = Scale{} }
+func (*Scale) ProtoMessage() {}
+func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
+
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
+func (*ScaleSpec) ProtoMessage() {}
+func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} }
+
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
+func (*ScaleStatus) ProtoMessage() {}
+func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
+
+func (m *StatefulSet) Reset() { *m = StatefulSet{} }
+func (*StatefulSet) ProtoMessage() {}
+func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
+
+func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
+func (*StatefulSetList) ProtoMessage() {}
+func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} }
+
+func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
+func (*StatefulSetSpec) ProtoMessage() {}
+func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
+
+func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
+func (*StatefulSetStatus) ProtoMessage() {}
+func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
+
+func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
+func (*StatefulSetUpdateStrategy) ProtoMessage() {}
+func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{26}
+}
+
+func init() {
+ proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet")
+ proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList")
+ proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec")
+ proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus")
+ proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.DaemonSetUpdateStrategy")
+ proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta2.Deployment")
+ proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta2.DeploymentCondition")
+ proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta2.DeploymentList")
+ proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta2.DeploymentSpec")
+ proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta2.DeploymentStatus")
+ proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta2.DeploymentStrategy")
+ proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1beta2.ReplicaSet")
+ proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetCondition")
+ proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetList")
+ proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetSpec")
+ proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetStatus")
+ proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDaemonSet")
+ proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDeployment")
+ proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateStatefulSetStrategy")
+ proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale")
+ proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec")
+ proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus")
+ proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet")
+ proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList")
+ proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec")
+ proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus")
+ proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy")
+}
+func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *DaemonSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
+ n4, err := m.ListMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Selector != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
+ n5, err := m.Selector.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
+ n6, err := m.Template.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size()))
+ n7, err := m.UpdateStrategy.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration))
+ if m.RevisionHistoryLimit != nil {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ }
+ return i, nil
+}
+
+func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled))
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady))
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled))
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable))
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable))
+ if m.CollisionCount != nil {
+ dAtA[i] = 0x48
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ }
+ return i, nil
+}
+
+func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i += copy(dAtA[i:], m.Type)
+ if m.RollingUpdate != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size()))
+ n8, err := m.RollingUpdate.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+
+func (m *Deployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Deployment) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
+ n9, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
+ n10, err := m.Spec.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
+ n11, err := m.Status.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ return i, nil
+}
+
+func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i += copy(dAtA[i:], m.Type)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i += copy(dAtA[i:], m.Status)
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i += copy(dAtA[i:], m.Reason)
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i += copy(dAtA[i:], m.Message)
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size()))
+ n12, err := m.LastUpdateTime.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
+ n13, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ return i, nil
+}
+
+func (m *DeploymentList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
+ n14, err := m.ListMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
+ n15, err := m.Selector.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
+ n16, err := m.Template.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size()))
+ n17, err := m.Strategy.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ }
+ dAtA[i] = 0x38
+ i++
+ if m.Paused {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ if m.ProgressDeadlineSeconds != nil {
+ dAtA[i] = 0x48
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds))
+ }
+ return i, nil
+}
+
+func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ if m.CollisionCount != nil {
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ }
+ return i, nil
+}
+
+func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i += copy(dAtA[i:], m.Type)
+ if m.RollingUpdate != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size()))
+ n18, err := m.RollingUpdate.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ return i, nil
+}
+
+func (m *ReplicaSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
+ n19, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
+ n20, err := m.Spec.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
+ n21, err := m.Status.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ return i, nil
+}
+
+func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i += copy(dAtA[i:], m.Type)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i += copy(dAtA[i:], m.Status)
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
+ n22, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i += copy(dAtA[i:], m.Reason)
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i += copy(dAtA[i:], m.Message)
+ return i, nil
+}
+
+func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
+ n23, err := m.ListMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
+ n24, err := m.Selector.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
+ n25, err := m.Template.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n25
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ return i, nil
+}
+
+func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size()))
+ n26, err := m.MaxUnavailable.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ }
+ return i, nil
+}
+
+func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size()))
+ n27, err := m.MaxUnavailable.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ }
+ if m.MaxSurge != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size()))
+ n28, err := m.MaxSurge.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n28
+ }
+ return i, nil
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Partition != nil {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition))
+ }
+ return i, nil
+}
+
+func (m *Scale) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Scale) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
+ n29, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n29
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
+ n30, err := m.Spec.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n30
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
+ n31, err := m.Status.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n31
+ return i, nil
+}
+
+func (m *ScaleSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ return i, nil
+}
+
+func (m *ScaleStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ if len(m.Selector) > 0 {
+ keysForSelector := make([]string, 0, len(m.Selector))
+ for k := range m.Selector {
+ keysForSelector = append(keysForSelector, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ for _, k := range keysForSelector {
+ dAtA[i] = 0x12
+ i++
+ v := m.Selector[string(k)]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i += copy(dAtA[i:], v)
+ }
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector)))
+ i += copy(dAtA[i:], m.TargetSelector)
+ return i, nil
+}
+
+func (m *StatefulSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
+ n32, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n32
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
+ n33, err := m.Spec.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n33
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
+ n34, err := m.Status.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n34
+ return i, nil
+}
+
+func (m *StatefulSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
+ n35, err := m.ListMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n35
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
+ n36, err := m.Selector.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n36
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
+ n37, err := m.Template.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n37
+ if len(m.VolumeClaimTemplates) > 0 {
+ for _, msg := range m.VolumeClaimTemplates {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName)))
+ i += copy(dAtA[i:], m.ServiceName)
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy)))
+ i += copy(dAtA[i:], m.PodManagementPolicy)
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size()))
+ n38, err := m.UpdateStrategy.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n38
+ if m.RevisionHistoryLimit != nil {
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ }
+ return i, nil
+}
+
+func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas))
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision)))
+ i += copy(dAtA[i:], m.CurrentRevision)
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision)))
+ i += copy(dAtA[i:], m.UpdateRevision)
+ return i, nil
+}
+
+func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i += copy(dAtA[i:], m.Type)
+ if m.RollingUpdate != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size()))
+ n39, err := m.RollingUpdate.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n39
+ }
+ return i, nil
+}
+
+func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ dAtA[offset+4] = uint8(v >> 32)
+ dAtA[offset+5] = uint8(v >> 40)
+ dAtA[offset+6] = uint8(v >> 48)
+ dAtA[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *DaemonSet) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSetSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UpdateStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ n += 1 + sovGenerated(uint64(m.TemplateGeneration))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ return n
+}
+
+func (m *DaemonSetStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberMisscheduled))
+ n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberReady))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberAvailable))
+ n += 1 + sovGenerated(uint64(m.NumberUnavailable))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ return n
+}
+
+func (m *DaemonSetUpdateStrategy) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Deployment) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Strategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ n += 2
+ if m.ProgressDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds))
+ }
+ return n
+}
+
+func (m *DeploymentStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ n += 1 + sovGenerated(uint64(m.UnavailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ return n
+}
+
+func (m *DeploymentStrategy) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ReplicaSet) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReplicaSetSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ return n
+}
+
+func (m *ReplicaSetStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RollingUpdateDaemonSet) Size() (n int) {
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingUpdateDeployment) Size() (n int) {
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Size() (n int) {
+ var l int
+ _ = l
+ if m.Partition != nil {
+ n += 1 + sovGenerated(uint64(*m.Partition))
+ }
+ return n
+}
+
+func (m *Scale) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ScaleSpec) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ return n
+}
+
+func (m *ScaleStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ if len(m.Selector) > 0 {
+ for k, v := range m.Selector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.TargetSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSet) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatefulSetSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeClaimTemplates) > 0 {
+ for _, e := range m.VolumeClaimTemplates {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ServiceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodManagementPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UpdateStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ return n
+}
+
+func (m *StatefulSetStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ n += 1 + sovGenerated(uint64(m.CurrentReplicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ l = len(m.CurrentRevision)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UpdateRevision)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSetUpdateStrategy) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *DaemonSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetSpec{`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetStatus{`,
+ `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`,
+ `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`,
+ `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`,
+ `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`,
+ `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`,
+ `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetUpdateStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetUpdateStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Deployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Deployment{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`,
+ `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`,
+ `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetStatus{`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateDaemonSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateDaemonSet{`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateDeployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateDeployment{`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`,
+ `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateStatefulSetStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`,
+ `Partition:` + valueToStringGenerated(this.Partition) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Scale) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Scale{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ScaleSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ScaleSpec{`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ScaleStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForSelector := make([]string, 0, len(this.Selector))
+ for k := range this.Selector {
+ keysForSelector = append(keysForSelector, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ mapStringForSelector := "map[string]string{"
+ for _, k := range keysForSelector {
+ mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
+ }
+ mapStringForSelector += "}"
+ s := strings.Join([]string{`&ScaleStatus{`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `Selector:` + mapStringForSelector + `,`,
+ `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`,
+ `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
+ `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`,
+ `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`,
+ `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetUpdateStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetUpdateStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *DaemonSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DaemonSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType)
+ }
+ m.TemplateGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TemplateGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType)
+ }
+ m.CurrentNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType)
+ }
+ m.NumberMisscheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberMisscheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType)
+ }
+ m.DesiredNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType)
+ }
+ m.NumberReady = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberReady |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType)
+ }
+ m.UpdatedNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType)
+ }
+ m.NumberAvailable = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberAvailable |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType)
+ }
+ m.NumberUnavailable = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberUnavailable |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDaemonSet{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Deployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Deployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Deployment{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Paused = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ProgressDeadlineSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType)
+ }
+ m.UnavailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UnavailableReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, DeploymentCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDeployment{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ReplicaSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType)
+ }
+ m.FullyLabeledReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, ReplicaSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Partition = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Scale) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Scale: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScaleSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ if m.Selector == nil {
+ m.Selector = make(map[string]string)
+ }
+ if iNdEx < postIndex {
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ m.Selector[mapkey] = mapvalue
+ } else {
+ var mapvalue string
+ m.Selector[mapkey] = mapvalue
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TargetSelector = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, StatefulSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{})
+ if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType)
+ }
+ m.CurrentReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CurrentReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CurrentRevision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UpdateRevision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateStatefulSetStrategy{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+ proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated)
+}
+
+var fileDescriptorGenerated = []byte{
+ // 2048 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcb, 0x6f, 0x1c, 0xb7,
+ 0x19, 0xf7, 0xec, 0x43, 0xda, 0xa5, 0x22, 0xc9, 0xa6, 0x54, 0x69, 0x23, 0xb7, 0x2b, 0x61, 0x12,
+ 0x38, 0x72, 0x1c, 0xcf, 0xda, 0xca, 0x03, 0x89, 0x0d, 0xb4, 0xd5, 0x4a, 0xad, 0xed, 0x40, 0x92,
+ 0x15, 0x4a, 0x32, 0xd0, 0xa0, 0x05, 0x4c, 0xed, 0xd2, 0xab, 0x89, 0xe6, 0x85, 0x19, 0xce, 0x22,
+ 0x8b, 0x5e, 0x7a, 0x2a, 0x50, 0xa0, 0x40, 0x73, 0xee, 0x3f, 0xd1, 0x9e, 0x8a, 0xa2, 0xbd, 0x15,
+ 0x45, 0xe1, 0x4b, 0x81, 0xa0, 0x3d, 0x24, 0x27, 0xa1, 0xde, 0x1c, 0xfb, 0x1f, 0x04, 0x28, 0x50,
+ 0x90, 0xc3, 0x79, 0x70, 0x1e, 0xd6, 0x48, 0xb5, 0xd5, 0x20, 0xb7, 0x5d, 0x7e, 0xbf, 0xef, 0xc7,
+ 0x8f, 0xe4, 0x47, 0x7e, 0x3f, 0x72, 0xc0, 0x0f, 0x8f, 0xdf, 0xf7, 0x34, 0xdd, 0xee, 0x1c, 0xfb,
+ 0x87, 0xc4, 0xb5, 0x08, 0x25, 0x5e, 0x67, 0x48, 0xac, 0xbe, 0xed, 0x76, 0x84, 0x01, 0x3b, 0x7a,
+ 0x07, 0x3b, 0x8e, 0xd7, 0x19, 0xde, 0x3e, 0x24, 0x14, 0xaf, 0x75, 0x06, 0xc4, 0x22, 0x2e, 0xa6,
+ 0xa4, 0xaf, 0x39, 0xae, 0x4d, 0x6d, 0xb8, 0x18, 0x00, 0x35, 0xec, 0xe8, 0x1a, 0x03, 0x6a, 0x02,
+ 0xb8, 0x74, 0x73, 0xa0, 0xd3, 0x23, 0xff, 0x50, 0xeb, 0xd9, 0x66, 0x67, 0x60, 0x0f, 0xec, 0x0e,
+ 0xc7, 0x1f, 0xfa, 0x4f, 0xf8, 0x3f, 0xfe, 0x87, 0xff, 0x0a, 0x78, 0x96, 0xd4, 0x44, 0x87, 0x3d,
+ 0xdb, 0x25, 0x9d, 0xe1, 0xed, 0x74, 0x5f, 0x4b, 0xd7, 0x13, 0x18, 0xc7, 0x36, 0xf4, 0xde, 0x48,
+ 0x84, 0x95, 0x85, 0xbe, 0x13, 0x43, 0x4d, 0xdc, 0x3b, 0xd2, 0x2d, 0xe2, 0x8e, 0x3a, 0xce, 0xf1,
+ 0x80, 0x35, 0x78, 0x1d, 0x93, 0x50, 0x9c, 0xd7, 0x41, 0xa7, 0xc8, 0xcb, 0xf5, 0x2d, 0xaa, 0x9b,
+ 0x24, 0xe3, 0xf0, 0xde, 0x69, 0x0e, 0x5e, 0xef, 0x88, 0x98, 0x38, 0xe3, 0xf7, 0x76, 0x91, 0x9f,
+ 0x4f, 0x75, 0xa3, 0xa3, 0x5b, 0xd4, 0xa3, 0x6e, 0xda, 0x49, 0xfd, 0x55, 0x05, 0x34, 0x37, 0x31,
+ 0x31, 0x6d, 0x6b, 0x8f, 0x50, 0xf8, 0x18, 0x34, 0xd8, 0x30, 0xfa, 0x98, 0xe2, 0x96, 0xb2, 0xa2,
+ 0xac, 0x4e, 0xad, 0xdd, 0xd2, 0xe2, 0xb5, 0x88, 0x58, 0x35, 0xe7, 0x78, 0xc0, 0x1a, 0x3c, 0x8d,
+ 0xa1, 0xb5, 0xe1, 0x6d, 0xed, 0xe1, 0xe1, 0x27, 0xa4, 0x47, 0xb7, 0x09, 0xc5, 0x5d, 0xf8, 0xf4,
+ 0x64, 0xf9, 0xd2, 0xf8, 0x64, 0x19, 0xc4, 0x6d, 0x28, 0x62, 0x85, 0xf7, 0x41, 0xcd, 0x73, 0x48,
+ 0xaf, 0x55, 0xe1, 0xec, 0xd7, 0xb4, 0x82, 0x95, 0xd6, 0xa2, 0x98, 0xf6, 0x1c, 0xd2, 0xeb, 0xbe,
+ 0x22, 0x38, 0x6b, 0xec, 0x1f, 0xe2, 0x0c, 0x70, 0x17, 0x4c, 0x78, 0x14, 0x53, 0xdf, 0x6b, 0x55,
+ 0x39, 0xd7, 0x6a, 0x09, 0x2e, 0x8e, 0xef, 0xce, 0x08, 0xb6, 0x89, 0xe0, 0x3f, 0x12, 0x3c, 0xea,
+ 0x1f, 0x14, 0x30, 0x1d, 0x61, 0xb7, 0x74, 0x8f, 0xc2, 0x9f, 0x66, 0xe6, 0x43, 0x2b, 0x37, 0x1f,
+ 0xcc, 0x9b, 0xcf, 0xc6, 0x65, 0xd1, 0x57, 0x23, 0x6c, 0x49, 0xcc, 0xc5, 0x3d, 0x50, 0xd7, 0x29,
+ 0x31, 0xbd, 0x56, 0x65, 0xa5, 0xba, 0x3a, 0xb5, 0xa6, 0x9e, 0x3e, 0x80, 0xee, 0xb4, 0xa0, 0xab,
+ 0x3f, 0x60, 0x8e, 0x28, 0xf0, 0x57, 0x3f, 0xab, 0x25, 0x02, 0x67, 0x53, 0x04, 0x7f, 0x06, 0x1a,
+ 0x1e, 0x31, 0x48, 0x8f, 0xda, 0xae, 0x08, 0xfc, 0xed, 0x92, 0x81, 0xe3, 0x43, 0x62, 0xec, 0x09,
+ 0xd7, 0xee, 0x2b, 0x2c, 0xf2, 0xf0, 0x1f, 0x8a, 0x28, 0xe1, 0x47, 0xa0, 0x41, 0x89, 0xe9, 0x18,
+ 0x98, 0x12, 0xb1, 0x92, 0xaf, 0x25, 0x83, 0x67, 0x7b, 0x8d, 0x91, 0xed, 0xda, 0xfd, 0x7d, 0x01,
+ 0xe3, 0xcb, 0x18, 0x4d, 0x46, 0xd8, 0x8a, 0x22, 0x1a, 0xe8, 0x80, 0x19, 0xdf, 0xe9, 0x33, 0x24,
+ 0x65, 0xf9, 0x39, 0x18, 0x89, 0x65, 0xbd, 0x75, 0xfa, 0xac, 0x1c, 0x48, 0x7e, 0xdd, 0x05, 0xd1,
+ 0xcb, 0x8c, 0xdc, 0x8e, 0x52, 0xfc, 0x70, 0x1d, 0xcc, 0x9a, 0xba, 0x85, 0x08, 0xee, 0x8f, 0xf6,
+ 0x48, 0xcf, 0xb6, 0xfa, 0x5e, 0xab, 0xb6, 0xa2, 0xac, 0xd6, 0xbb, 0x8b, 0x82, 0x60, 0x76, 0x5b,
+ 0x36, 0xa3, 0x34, 0x1e, 0x7e, 0x08, 0x60, 0x38, 0x80, 0x7b, 0xc1, 0xc6, 0xd2, 0x6d, 0xab, 0x55,
+ 0x5f, 0x51, 0x56, 0xab, 0xdd, 0x25, 0xc1, 0x02, 0xf7, 0x33, 0x08, 0x94, 0xe3, 0x05, 0xb7, 0xc0,
+ 0xbc, 0x4b, 0x86, 0xba, 0xa7, 0xdb, 0xd6, 0x7d, 0xdd, 0xa3, 0xb6, 0x3b, 0xda, 0xd2, 0x4d, 0x9d,
+ 0xb6, 0x26, 0x78, 0x4c, 0xad, 0xf1, 0xc9, 0xf2, 0x3c, 0xca, 0xb1, 0xa3, 0x5c, 0x2f, 0xf5, 0xf7,
+ 0x75, 0x30, 0x9b, 0xca, 0x7b, 0xf8, 0x08, 0x2c, 0xf4, 0x7c, 0xd7, 0x25, 0x16, 0xdd, 0xf1, 0xcd,
+ 0x43, 0xe2, 0xee, 0xf5, 0x8e, 0x48, 0xdf, 0x37, 0x48, 0x9f, 0xa7, 0x48, 0xbd, 0xdb, 0x16, 0x11,
+ 0x2f, 0x6c, 0xe4, 0xa2, 0x50, 0x81, 0x37, 0x9b, 0x05, 0x8b, 0x37, 0x6d, 0xeb, 0x9e, 0x17, 0x71,
+ 0x56, 0x38, 0x67, 0x34, 0x0b, 0x3b, 0x19, 0x04, 0xca, 0xf1, 0x62, 0x31, 0xf6, 0x89, 0xa7, 0xbb,
+ 0xa4, 0x9f, 0x8e, 0xb1, 0x2a, 0xc7, 0xb8, 0x99, 0x8b, 0x42, 0x05, 0xde, 0xf0, 0x5d, 0x30, 0x15,
+ 0xf4, 0xc6, 0xd7, 0x4f, 0x2c, 0xf4, 0x9c, 0x20, 0x9b, 0xda, 0x89, 0x4d, 0x28, 0x89, 0x63, 0x43,
+ 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x49, 0xbf, 0x78, 0x81, 0x1f, 0x66, 0x10, 0x28, 0xc7, 0x8b, 0x0d,
+ 0x2d, 0xc8, 0xc0, 0xcc, 0xd0, 0x26, 0xe4, 0xa1, 0x1d, 0xe4, 0xa2, 0x50, 0x81, 0x37, 0xcb, 0xe3,
+ 0x20, 0xe4, 0xf5, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xd6, 0xa4, 0x9c, 0xc7, 0x3b, 0xb2, 0x19,
+ 0xa5, 0xf1, 0xf0, 0x1e, 0xb8, 0x12, 0x34, 0x1d, 0x58, 0x38, 0x22, 0x69, 0x70, 0x92, 0x57, 0x05,
+ 0xc9, 0x95, 0x9d, 0x34, 0x00, 0x65, 0x7d, 0xe0, 0x1d, 0x30, 0xd3, 0xb3, 0x0d, 0x83, 0xe7, 0xe3,
+ 0x86, 0xed, 0x5b, 0xb4, 0xd5, 0xe4, 0x73, 0x05, 0xd9, 0x7e, 0xdc, 0x90, 0x2c, 0x28, 0x85, 0x54,
+ 0xff, 0xaa, 0x80, 0xc5, 0x82, 0x3d, 0x0d, 0x7f, 0x00, 0x6a, 0x74, 0xe4, 0x10, 0x9e, 0xa8, 0xcd,
+ 0xee, 0x8d, 0xb0, 0x1c, 0xec, 0x8f, 0x1c, 0xf2, 0xf5, 0xc9, 0xf2, 0xd5, 0x02, 0x37, 0x66, 0x46,
+ 0xdc, 0x11, 0x1e, 0x81, 0x69, 0x97, 0x75, 0x67, 0x0d, 0x02, 0x88, 0x38, 0xb6, 0x3a, 0x85, 0xa7,
+ 0x0b, 0x4a, 0xa2, 0xe3, 0x03, 0xf8, 0xca, 0xf8, 0x64, 0x79, 0x5a, 0xb2, 0x21, 0x99, 0x58, 0xfd,
+ 0x75, 0x05, 0x80, 0x4d, 0xe2, 0x18, 0xf6, 0xc8, 0x24, 0xd6, 0x45, 0x94, 0xd4, 0x07, 0x52, 0x49,
+ 0x7d, 0xa3, 0xf8, 0xbc, 0x8c, 0x82, 0x2a, 0xac, 0xa9, 0x1f, 0xa5, 0x6a, 0xea, 0xf5, 0x32, 0x64,
+ 0xcf, 0x2f, 0xaa, 0x5f, 0x54, 0xc1, 0x5c, 0x0c, 0xde, 0xb0, 0xad, 0xbe, 0xce, 0x77, 0xc3, 0x5d,
+ 0x69, 0x45, 0xdf, 0x48, 0xad, 0xe8, 0x62, 0x8e, 0x4b, 0x62, 0x35, 0xb7, 0xa2, 0x38, 0x2b, 0xdc,
+ 0xfd, 0x1d, 0xb9, 0xf3, 0xaf, 0x4f, 0x96, 0x73, 0xa4, 0x9f, 0x16, 0x31, 0xc9, 0x21, 0xc2, 0x6b,
+ 0x60, 0xc2, 0x25, 0xd8, 0xb3, 0x2d, 0x7e, 0x2c, 0x34, 0xe3, 0xa1, 0x20, 0xde, 0x8a, 0x84, 0x15,
+ 0x5e, 0x07, 0x93, 0x26, 0xf1, 0x3c, 0x3c, 0x20, 0xfc, 0x04, 0x68, 0x76, 0x67, 0x05, 0x70, 0x72,
+ 0x3b, 0x68, 0x46, 0xa1, 0x1d, 0x7e, 0x02, 0x66, 0x0c, 0xec, 0x89, 0x74, 0xdc, 0xd7, 0x4d, 0xc2,
+ 0xf7, 0xf8, 0xd4, 0xda, 0x9b, 0xe5, 0xd6, 0x9e, 0x79, 0xc4, 0x75, 0x6c, 0x4b, 0x62, 0x42, 0x29,
+ 0x66, 0x38, 0x04, 0x90, 0xb5, 0xec, 0xbb, 0xd8, 0xf2, 0x82, 0x89, 0x62, 0xfd, 0x4d, 0x9e, 0xb9,
+ 0xbf, 0xe8, 0x3c, 0xdb, 0xca, 0xb0, 0xa1, 0x9c, 0x1e, 0xd4, 0x3f, 0x2a, 0x60, 0x26, 0x5e, 0xa6,
+ 0x0b, 0xd0, 0x4b, 0xf7, 0x65, 0xbd, 0xf4, 0x5a, 0x89, 0xe4, 0x2c, 0x10, 0x4c, 0x5f, 0xd4, 0x92,
+ 0xa1, 0x73, 0xc5, 0xb4, 0x0a, 0x1a, 0x2e, 0x71, 0x0c, 0xbd, 0x87, 0x3d, 0x51, 0x0e, 0xb9, 0xf8,
+ 0x41, 0xa2, 0x0d, 0x45, 0x56, 0x49, 0x5b, 0x55, 0x5e, 0xae, 0xb6, 0xaa, 0xbe, 0x18, 0x6d, 0xf5,
+ 0x13, 0xd0, 0xf0, 0x42, 0x55, 0x55, 0xe3, 0x94, 0x37, 0x4a, 0x6d, 0x6c, 0x21, 0xa8, 0x22, 0xea,
+ 0x48, 0x4a, 0x45, 0x74, 0x79, 0x22, 0xaa, 0x7e, 0x46, 0x11, 0xf5, 0x42, 0x85, 0x0f, 0xdb, 0xcc,
+ 0x0e, 0xf6, 0x3d, 0xd2, 0xe7, 0x3b, 0xa0, 0x11, 0x6f, 0xe6, 0x5d, 0xde, 0x8a, 0x84, 0x15, 0x1e,
+ 0x80, 0x45, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x4d, 0x82, 0xfb, 0x86, 0x6e, 0x91, 0x70, 0x00,
+ 0x4d, 0xde, 0xf1, 0xd5, 0xf1, 0xc9, 0xf2, 0xe2, 0x6e, 0x3e, 0x04, 0x15, 0xf9, 0xaa, 0x7f, 0xa9,
+ 0x81, 0xcb, 0xe9, 0xb3, 0xb1, 0x40, 0x45, 0x28, 0xe7, 0x52, 0x11, 0x6f, 0x25, 0xf2, 0x34, 0x90,
+ 0x58, 0xd1, 0xf2, 0xe4, 0xe4, 0xea, 0x3a, 0x98, 0x15, 0xaa, 0x21, 0x34, 0x0a, 0x1d, 0x15, 0x2d,
+ 0xcf, 0x81, 0x6c, 0x46, 0x69, 0x3c, 0xd3, 0x06, 0x71, 0xc9, 0x0f, 0x49, 0x6a, 0xb2, 0x36, 0x58,
+ 0x4f, 0x03, 0x50, 0xd6, 0x07, 0x6e, 0x83, 0x39, 0xdf, 0xca, 0x52, 0x05, 0xe9, 0x72, 0x55, 0x50,
+ 0xcd, 0x1d, 0x64, 0x21, 0x28, 0xcf, 0x0f, 0x3e, 0x06, 0xa0, 0x17, 0x1e, 0xe8, 0x5e, 0x6b, 0x82,
+ 0x1f, 0x09, 0x6f, 0x95, 0x48, 0xeb, 0xa8, 0x0a, 0xc4, 0x65, 0x35, 0x6a, 0xf2, 0x50, 0x82, 0x13,
+ 0xde, 0x05, 0xd3, 0x2e, 0x97, 0x84, 0x61, 0xa8, 0x81, 0xac, 0xfa, 0x8e, 0x70, 0x9b, 0x46, 0x49,
+ 0x23, 0x92, 0xb1, 0x39, 0x4a, 0xa8, 0x51, 0x5a, 0x09, 0xfd, 0x59, 0x01, 0x30, 0xbb, 0x0f, 0xe1,
+ 0x1d, 0xa9, 0x64, 0x5e, 0x4b, 0x95, 0xcc, 0x85, 0xac, 0x47, 0xa2, 0x62, 0xea, 0xf9, 0xfa, 0xe7,
+ 0x56, 0x49, 0xfd, 0x13, 0x1f, 0xa8, 0xe5, 0x04, 0x90, 0x98, 0x86, 0x8b, 0x79, 0x53, 0x28, 0x2b,
+ 0x80, 0xe2, 0xa0, 0x5e, 0x80, 0x00, 0x4a, 0x90, 0x3d, 0x5f, 0x00, 0xfd, 0xbb, 0x02, 0xe6, 0x62,
+ 0x70, 0x69, 0x01, 0x94, 0xe3, 0xf2, 0xd2, 0x04, 0x50, 0xbe, 0x82, 0xa8, 0xbe, 0x6c, 0x05, 0xf1,
+ 0x12, 0x84, 0x17, 0x17, 0x25, 0xf1, 0xd4, 0x7d, 0x93, 0x44, 0x49, 0x1c, 0x55, 0x81, 0x28, 0xf9,
+ 0x5d, 0x25, 0x19, 0xfa, 0xb7, 0x5e, 0x94, 0xfc, 0xef, 0xcf, 0x2f, 0xea, 0xdf, 0xaa, 0xe0, 0x72,
+ 0x7a, 0x1f, 0x4a, 0x05, 0x52, 0x39, 0xb5, 0x40, 0xee, 0x82, 0xf9, 0x27, 0xbe, 0x61, 0x8c, 0xf8,
+ 0x34, 0x24, 0xaa, 0x64, 0x50, 0x5a, 0xbf, 0x2b, 0x3c, 0xe7, 0x7f, 0x9c, 0x83, 0x41, 0xb9, 0x9e,
+ 0x05, 0xc5, 0xbe, 0x7a, 0xae, 0x62, 0x9f, 0xa9, 0x40, 0xb5, 0x33, 0x54, 0xa0, 0xdc, 0xc2, 0x5d,
+ 0x3f, 0x47, 0xe1, 0x3e, 0x5b, 0xa5, 0xcd, 0x39, 0xb8, 0x4e, 0xab, 0xb4, 0xea, 0x2f, 0x15, 0xb0,
+ 0x90, 0x7f, 0xe1, 0x86, 0x06, 0x98, 0x31, 0xf1, 0xa7, 0xc9, 0x77, 0x89, 0xd3, 0x8a, 0x88, 0x4f,
+ 0x75, 0x43, 0x0b, 0x9e, 0xbb, 0xb5, 0x07, 0x16, 0x7d, 0xe8, 0xee, 0x51, 0x57, 0xb7, 0x06, 0x41,
+ 0xe5, 0xdd, 0x96, 0xb8, 0x50, 0x8a, 0x5b, 0xfd, 0x4a, 0x01, 0x8b, 0x05, 0x95, 0xef, 0x62, 0x23,
+ 0x81, 0x1f, 0x83, 0x86, 0x89, 0x3f, 0xdd, 0xf3, 0xdd, 0x41, 0x5e, 0xad, 0x2e, 0xd7, 0x0f, 0xdf,
+ 0xcd, 0xdb, 0x82, 0x05, 0x45, 0x7c, 0xea, 0x43, 0xb0, 0x22, 0x0d, 0x92, 0xed, 0x1c, 0xf2, 0xc4,
+ 0x37, 0xf8, 0x26, 0x12, 0x62, 0xe3, 0x06, 0x68, 0x3a, 0xd8, 0xa5, 0x7a, 0x24, 0x55, 0xeb, 0xdd,
+ 0xe9, 0xf1, 0xc9, 0x72, 0x73, 0x37, 0x6c, 0x44, 0xb1, 0x5d, 0xfd, 0x8f, 0x02, 0xea, 0x7b, 0x3d,
+ 0x6c, 0x90, 0x0b, 0xa8, 0xf6, 0x9b, 0x52, 0xb5, 0x2f, 0x7e, 0x34, 0xe7, 0xf1, 0x14, 0x16, 0xfa,
+ 0xad, 0x54, 0xa1, 0x7f, 0xfd, 0x14, 0x9e, 0xe7, 0xd7, 0xf8, 0x0f, 0x40, 0x33, 0xea, 0xee, 0x6c,
+ 0x07, 0x90, 0xfa, 0xdb, 0x0a, 0x98, 0x4a, 0x74, 0x71, 0xc6, 0xe3, 0xeb, 0xb1, 0x74, 0xec, 0xb3,
+ 0x8d, 0xb9, 0x56, 0x66, 0x20, 0x5a, 0x78, 0xc4, 0xff, 0xc8, 0xa2, 0x6e, 0xf2, 0x82, 0x97, 0x3d,
+ 0xf9, 0xbf, 0x0f, 0x66, 0x28, 0x76, 0x07, 0x84, 0x86, 0x36, 0x3e, 0x61, 0xcd, 0xf8, 0x75, 0x62,
+ 0x5f, 0xb2, 0xa2, 0x14, 0x7a, 0xe9, 0x2e, 0x98, 0x96, 0x3a, 0x83, 0x97, 0x41, 0xf5, 0x98, 0x8c,
+ 0x02, 0xd9, 0x83, 0xd8, 0x4f, 0x38, 0x0f, 0xea, 0x43, 0x6c, 0xf8, 0x41, 0x9e, 0x37, 0x51, 0xf0,
+ 0xe7, 0x4e, 0xe5, 0x7d, 0x45, 0xfd, 0x0d, 0x9b, 0x9c, 0x38, 0x39, 0x2f, 0x20, 0xbb, 0x3e, 0x94,
+ 0xb2, 0xab, 0xf8, 0x9b, 0x52, 0x72, 0xcb, 0x14, 0xe5, 0x18, 0x4a, 0xe5, 0xd8, 0x9b, 0xa5, 0xd8,
+ 0x9e, 0x9f, 0x69, 0x7f, 0x52, 0xc0, 0x6c, 0x02, 0x7d, 0x01, 0x02, 0xe7, 0x81, 0x2c, 0x70, 0x5e,
+ 0x2f, 0x33, 0x88, 0x02, 0x85, 0xf3, 0xf7, 0xba, 0x14, 0xfc, 0xb7, 0x5e, 0xe2, 0xfc, 0x1c, 0xcc,
+ 0x0f, 0x6d, 0xc3, 0x37, 0xc9, 0x86, 0x81, 0x75, 0x33, 0x04, 0xb0, 0x2a, 0x5e, 0x4d, 0xdf, 0x2d,
+ 0x22, 0x7a, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0x1f, 0xc5, 0x9e, 0xb1, 0x0e, 0x79, 0x94, 0x43,
+ 0x87, 0x72, 0x3b, 0x81, 0xef, 0x82, 0x29, 0xa6, 0x27, 0xf4, 0x1e, 0xd9, 0xc1, 0x66, 0x28, 0x9c,
+ 0xa3, 0x2f, 0x1e, 0x7b, 0xb1, 0x09, 0x25, 0x71, 0xf0, 0x08, 0xcc, 0x39, 0x76, 0x7f, 0x1b, 0x5b,
+ 0x78, 0x40, 0x58, 0xd9, 0xdb, 0xe5, 0xdf, 0xc4, 0xf9, 0x63, 0x4c, 0xb3, 0xfb, 0x5e, 0x78, 0x4b,
+ 0xdf, 0xcd, 0x42, 0xd8, 0xa5, 0x25, 0xa7, 0x99, 0x5f, 0x5a, 0xf2, 0x28, 0xa1, 0x9b, 0xf9, 0xe2,
+ 0x17, 0xbc, 0x59, 0xae, 0x95, 0xc9, 0xb0, 0x73, 0x7e, 0xf3, 0x2b, 0x7a, 0x6b, 0x6a, 0x9c, 0xeb,
+ 0x23, 0xdb, 0x3f, 0xab, 0xe0, 0x4a, 0x66, 0xeb, 0xfe, 0x1f, 0x5f, 0x7b, 0x32, 0x72, 0xb1, 0x7a,
+ 0x06, 0xb9, 0xb8, 0x0e, 0x66, 0xc5, 0xf7, 0xbd, 0x94, 0xda, 0x8c, 0xf4, 0xf8, 0x86, 0x6c, 0x46,
+ 0x69, 0x7c, 0xde, 0x6b, 0x53, 0xfd, 0x8c, 0xaf, 0x4d, 0xc9, 0x28, 0x82, 0x19, 0x17, 0xa9, 0x97,
+ 0x8d, 0x22, 0x30, 0xa3, 0x34, 0x9e, 0x55, 0xac, 0x80, 0x35, 0x62, 0x98, 0x94, 0x2b, 0xd6, 0x81,
+ 0x64, 0x45, 0x29, 0xb4, 0xfa, 0x0f, 0x05, 0xbc, 0x5a, 0x98, 0x69, 0x70, 0x5d, 0xba, 0xb6, 0xdf,
+ 0x4c, 0x5d, 0xdb, 0xbf, 0x57, 0xe8, 0x98, 0xb8, 0xbc, 0xbb, 0xf9, 0x6f, 0x31, 0x1f, 0x94, 0x7b,
+ 0x8b, 0xc9, 0x11, 0x6b, 0xa7, 0x3f, 0xca, 0x74, 0x6f, 0x3e, 0x7d, 0xd6, 0xbe, 0xf4, 0xf9, 0xb3,
+ 0xf6, 0xa5, 0x2f, 0x9f, 0xb5, 0x2f, 0xfd, 0x62, 0xdc, 0x56, 0x9e, 0x8e, 0xdb, 0xca, 0xe7, 0xe3,
+ 0xb6, 0xf2, 0xe5, 0xb8, 0xad, 0xfc, 0x6b, 0xdc, 0x56, 0x3e, 0xfb, 0xaa, 0x7d, 0xe9, 0xe3, 0x49,
+ 0xd1, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x86, 0x7b, 0x61, 0x7b, 0xc4, 0x23, 0x00, 0x00,
+}
diff --git a/staging/src/k8s.io/api/apps/v1beta2/generated.proto b/staging/src/k8s.io/api/apps/v1beta2/generated.proto
new file mode 100644
index 00000000000..79c1f7589d7
--- /dev/null
+++ b/staging/src/k8s.io/api/apps/v1beta2/generated.proto
@@ -0,0 +1,681 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.apps.v1beta2;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/api/policy/v1beta1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta2";
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+ // +optional
+ optional DaemonSetSpec spec = 2;
+
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+ // +optional
+ optional DaemonSetStatus status = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // A list of daemon sets.
+ repeated DaemonSet items = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // If empty, defaulted to labels on Pod template.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
+
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ // +optional
+ optional DaemonSetUpdateStrategy updateStrategy = 3;
+
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ // +optional
+ optional int32 minReadySeconds = 4;
+
+ // DEPRECATED.
+ // A sequence number representing a specific generation of the template.
+ // Populated by the system. It can be set only during the creation.
+ // +optional
+ optional int64 templateGeneration = 5;
+
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ optional int32 revisionHistoryLimit = 6;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 currentNumberScheduled = 1;
+
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 numberMisscheduled = 2;
+
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 desiredNumberScheduled = 3;
+
+ // The number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running and ready.
+ optional int32 numberReady = 4;
+
+ // The most recent generation observed by the daemon set controller.
+ // +optional
+ optional int64 observedGeneration = 5;
+
+ // The total number of nodes that are running updated daemon pod
+ // +optional
+ optional int32 updatedNumberScheduled = 6;
+
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ // +optional
+ optional int32 numberAvailable = 7;
+
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ // +optional
+ optional int32 numberUnavailable = 8;
+
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ // +optional
+ optional int64 collisionCount = 9;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+message DaemonSetUpdateStrategy {
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
+ // Default is OnDelete.
+ // +optional
+ optional string type = 1;
+
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
+ // +optional
+ optional RollingUpdateDaemonSet rollingUpdate = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the Deployment.
+ // +optional
+ optional DeploymentSpec spec = 2;
+
+ // Most recently observed status of the Deployment.
+ // +optional
+ optional DeploymentStatus status = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+ // Type of deployment condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time this condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+ // Last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+ // The reason for the condition's last transition.
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 5;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+ // Standard list metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Deployments.
+ repeated Deployment items = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ // +optional
+ optional int32 replicas = 1;
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // Template describes the pods that will be created.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ // +optional
+ optional DeploymentStrategy strategy = 4;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ optional int32 minReadySeconds = 5;
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ optional int32 revisionHistoryLimit = 6;
+
+ // Indicates that the deployment is paused.
+ // +optional
+ optional bool paused = 7;
+
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Once autoRollback is
+ // implemented, the deployment controller will automatically rollback failed
+ // deployments. Note that progress will not be estimated during the time a
+ // deployment is paused. Defaults to 600s.
+ optional int32 progressDeadlineSeconds = 9;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+ // The generation observed by the deployment controller.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // +optional
+ optional int32 replicas = 2;
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // +optional
+ optional int32 updatedReplicas = 3;
+
+ // Total number of ready pods targeted by this deployment.
+ // +optional
+ optional int32 readyReplicas = 7;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // +optional
+ optional int32 availableReplicas = 4;
+
+ // Total number of unavailable pods targeted by this deployment.
+ // +optional
+ optional int32 unavailableReplicas = 5;
+
+ // Represents the latest available observations of a deployment's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated DeploymentCondition conditions = 6;
+
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ // +optional
+ optional int64 collisionCount = 8;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ // +optional
+ optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ReplicaSet represents the configuration of a ReplicaSet.
+message ReplicaSet {
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+ // +optional
+ optional ReplicaSetSpec spec = 2;
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+ // +optional
+ optional ReplicaSetStatus status = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+message ReplicaSetCondition {
+ // Type of replica set condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of ReplicaSets.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ repeated ReplicaSet items = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // +optional
+ optional int32 replicas = 1;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ optional int32 minReadySeconds = 4;
+
+ // Selector is a label query over pods that should match the replica count.
+ // If the selector is empty, it is defaulted to the labels present on the pod template.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // +optional
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ optional int32 replicas = 1;
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // +optional
+ optional int32 fullyLabeledReplicas = 2;
+
+ // The number of ready replicas for this replica set.
+ // +optional
+ optional int32 readyReplicas = 4;
+
+ // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // +optional
+ optional int32 availableReplicas = 5;
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ // +optional
+ optional int64 observedGeneration = 3;
+
+ // Represents the latest available observations of a replica set's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated ReplicaSetCondition conditions = 6;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// Spec to control the desired behavior of daemon set rolling update.
+message RollingUpdateDaemonSet {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given
+ // time. The update starts by stopping at most 30% of those DaemonSet pods
+ // and then brings up new DaemonSet pods in their place. Once the new pods
+ // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+ // that at least 70% of original number of DaemonSet pods are available at
+ // all times during the update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old RC
+ // can be scaled down further, followed by scaling up the new RC, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new RC can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new RC can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is atmost 130% of desired pods.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+message RollingUpdateStatefulSetStrategy {
+ // Partition indicates the ordinal at which the StatefulSet should be
+ // partitioned.
+ optional int32 partition = 1;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// Scale represents a scaling request for a resource.
+message Scale {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+ // +optional
+ optional ScaleSpec spec = 2;
+
+ // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+ // +optional
+ optional ScaleStatus status = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ScaleSpec describes the attributes of a scale subresource
+message ScaleSpec {
+ // desired number of instances for the scaled object.
+ // +optional
+ optional int32 replicas = 1;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// ScaleStatus represents the current status of a scale subresource.
+message ScaleStatus {
+ // actual number of observed instances of the scaled object.
+ optional int32 replicas = 1;
+
+ // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+ // +optional
+ map selector = 2;
+
+ // label selector for pods that should match the replicas count. This is a serializated
+ // version of both map-based and more expressive set-based selectors. This is done to
+ // avoid introspection in the clients. The string will be in the same format as the
+ // query-param syntax. If the target type only supports map-based selectors, both this
+ // field and map-based selector field are populated.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ // +optional
+ optional string targetSelector = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+message StatefulSet {
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired identities of pods in this set.
+ // +optional
+ optional StatefulSetSpec spec = 2;
+
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ // +optional
+ optional StatefulSetStatus status = 3;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// StatefulSetList is a collection of StatefulSets.
+message StatefulSetList {
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ repeated StatefulSet items = 2;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// A StatefulSetSpec is the specification of a StatefulSet.
+message StatefulSetSpec {
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ // +optional
+ optional int32 replicas = 1;
+
+ // selector is a label query over pods that should match the replica count.
+ // If empty, defaulted to labels on the pod template.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ // +optional
+ repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ optional string serviceName = 5;
+
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ // +optional
+ optional string podManagementPolicy = 6;
+
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ optional StatefulSetUpdateStrategy updateStrategy = 7;
+
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ optional int32 revisionHistoryLimit = 8;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// StatefulSetStatus represents the current state of a StatefulSet.
+message StatefulSetStatus {
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // replicas is the number of Pods created by the StatefulSet controller.
+ optional int32 replicas = 2;
+
+ // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+ optional int32 readyReplicas = 3;
+
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ optional int32 currentReplicas = 4;
+
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ optional int32 updatedReplicas = 5;
+
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ optional string currentRevision = 6;
+
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ optional string updateRevision = 7;
+}
+
+// WIP: This is not ready to be used and we plan to make breaking changes to it.
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+message StatefulSetUpdateStrategy {
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ optional string type = 1;
+
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+ optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
+}
+
diff --git a/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
new file mode 100644
index 00000000000..fc29ef341c9
--- /dev/null
+++ b/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -0,0 +1,347 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_DaemonSet = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSet represents the configuration of a daemon set.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+ "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+ return map_DaemonSet
+}
+
+var map_DaemonSetList = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSetList is a collection of daemon sets.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ "items": "A list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+ return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSetSpec is the specification of a daemon set.",
+ "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.",
+ "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+ "templateGeneration": "DEPRECATED. A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.",
+ "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+ return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DaemonSetStatus represents the current status of a daemon set.",
+ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+ "observedGeneration": "The most recent generation observed by the daemon set controller.",
+ "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
+ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+ return map_DaemonSetStatus
+}
+
+var map_DaemonSetUpdateStrategy = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it.",
+ "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.",
+ "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+}
+
+func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
+ return map_DaemonSetUpdateStrategy
+}
+
+var map_Deployment = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. Deployment enables declarative updates for Pods and ReplicaSets.",
+ "metadata": "Standard object metadata.",
+ "spec": "Specification of the desired behavior of the Deployment.",
+ "status": "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+ return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentCondition describes the state of a deployment at a certain point.",
+ "type": "Type of deployment condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastUpdateTime": "The last time this condition was updated.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+ return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentList is a list of Deployments.",
+ "metadata": "Standard list metadata.",
+ "items": "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+ return map_DeploymentList
+}
+
+var map_DeploymentSpec = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentSpec is the specification of the desired behavior of the Deployment.",
+ "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+ "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
+ "template": "Template describes the pods that will be created.",
+ "strategy": "The deployment strategy to use to replace existing pods with new ones.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+ "paused": "Indicates that the deployment is paused.",
+ "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+ return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentStatus is the most recently observed status of the Deployment.",
+ "observedGeneration": "The generation observed by the deployment controller.",
+ "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of ready pods targeted by this deployment.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.",
+ "conditions": "Represents the latest available observations of a deployment's current state.",
+ "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+ return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. DeploymentStrategy describes how to replace existing pods with new ones.",
+ "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+ "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+ return map_DeploymentStrategy
+}
+
+var map_ReplicaSet = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSet represents the configuration of a ReplicaSet.",
+ "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+ "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+ return map_ReplicaSet
+}
+
+var map_ReplicaSetCondition = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetCondition describes the state of a replica set at a certain point.",
+ "type": "Type of replica set condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "The last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (ReplicaSetCondition) SwaggerDoc() map[string]string {
+ return map_ReplicaSetCondition
+}
+
+var map_ReplicaSetList = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetList is a collection of ReplicaSets.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+ "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+ return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetSpec is the specification of a ReplicaSet.",
+ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+ return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ReplicaSetStatus represents the current status of a ReplicaSet.",
+ "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+ "readyReplicas": "The number of ready replicas for this replica set.",
+ "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+ "conditions": "Represents the latest available observations of a replica set's current state.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+ return map_ReplicaSetStatus
+}
+
+var map_RollingUpdateDaemonSet = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. Spec to control the desired behavior of daemon set rolling update.",
+ "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+}
+
+func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDaemonSet
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. Spec to control the desired behavior of rolling update.",
+ "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+ "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDeployment
+}
+
+var map_RollingUpdateStatefulSetStrategy = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+ "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.",
+}
+
+func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
+ return map_RollingUpdateStatefulSetStrategy
+}
+
+var map_Scale = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. Scale represents a scaling request for a resource.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+ "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+ "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+ return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ScaleSpec describes the attributes of a scale subresource",
+ "replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+ return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. ScaleStatus represents the current status of a scale subresource.",
+ "replicas": "actual number of observed instances of the scaled object.",
+ "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors",
+ "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+ return map_ScaleStatus
+}
+
+var map_StatefulSet = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+ "spec": "Spec defines the desired identities of pods in this set.",
+ "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+}
+
+func (StatefulSet) SwaggerDoc() map[string]string {
+ return map_StatefulSet
+}
+
+var map_StatefulSetList = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSetList is a collection of StatefulSets.",
+}
+
+func (StatefulSetList) SwaggerDoc() map[string]string {
+ return map_StatefulSetList
+}
+
+var map_StatefulSetSpec = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. A StatefulSetSpec is the specification of a StatefulSet.",
+ "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+ "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+ "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+ "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+ "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+}
+
+func (StatefulSetSpec) SwaggerDoc() map[string]string {
+ return map_StatefulSetSpec
+}
+
+var map_StatefulSetStatus = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSetStatus represents the current state of a StatefulSet.",
+ "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+ "replicas": "replicas is the number of Pods created by the StatefulSet controller.",
+ "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+ "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+ "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+}
+
+func (StatefulSetStatus) SwaggerDoc() map[string]string {
+ return map_StatefulSetStatus
+}
+
+var map_StatefulSetUpdateStrategy = map[string]string{
+ "": "WIP: This is not ready to be used and we plan to make breaking changes to it. StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+ "type": "Type indicates the type of the StatefulSetUpdateStrategy.",
+ "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+}
+
+func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
+ return map_StatefulSetUpdateStrategy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..bd8a1c08635
--- /dev/null
+++ b/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,938 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1beta2
+
+import (
+ core_v1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+ reflect "reflect"
+)
+
+func init() {
+ SchemeBuilder.Register(RegisterDeepCopies)
+}
+
+// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
+// to allow building arbitrary schemes.
+//
+// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
+func RegisterDeepCopies(scheme *runtime.Scheme) error {
+ return scheme.AddGeneratedDeepCopyFuncs(
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet))
+ return nil
+ }, InType: reflect.TypeOf(&DaemonSet{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList))
+ return nil
+ }, InType: reflect.TypeOf(&DaemonSetList{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec))
+ return nil
+ }, InType: reflect.TypeOf(&DaemonSetSpec{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus))
+ return nil
+ }, InType: reflect.TypeOf(&DaemonSetStatus{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy))
+ return nil
+ }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*Deployment).DeepCopyInto(out.(*Deployment))
+ return nil
+ }, InType: reflect.TypeOf(&Deployment{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition))
+ return nil
+ }, InType: reflect.TypeOf(&DeploymentCondition{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList))
+ return nil
+ }, InType: reflect.TypeOf(&DeploymentList{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec))
+ return nil
+ }, InType: reflect.TypeOf(&DeploymentSpec{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus))
+ return nil
+ }, InType: reflect.TypeOf(&DeploymentStatus{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy))
+ return nil
+ }, InType: reflect.TypeOf(&DeploymentStrategy{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet))
+ return nil
+ }, InType: reflect.TypeOf(&ReplicaSet{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition))
+ return nil
+ }, InType: reflect.TypeOf(&ReplicaSetCondition{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList))
+ return nil
+ }, InType: reflect.TypeOf(&ReplicaSetList{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec))
+ return nil
+ }, InType: reflect.TypeOf(&ReplicaSetSpec{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus))
+ return nil
+ }, InType: reflect.TypeOf(&ReplicaSetStatus{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet))
+ return nil
+ }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment))
+ return nil
+ }, InType: reflect.TypeOf(&RollingUpdateDeployment{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy))
+ return nil
+ }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*Scale).DeepCopyInto(out.(*Scale))
+ return nil
+ }, InType: reflect.TypeOf(&Scale{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec))
+ return nil
+ }, InType: reflect.TypeOf(&ScaleSpec{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus))
+ return nil
+ }, InType: reflect.TypeOf(&ScaleStatus{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet))
+ return nil
+ }, InType: reflect.TypeOf(&StatefulSet{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList))
+ return nil
+ }, InType: reflect.TypeOf(&StatefulSetList{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec))
+ return nil
+ }, InType: reflect.TypeOf(&StatefulSetSpec{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus))
+ return nil
+ }, InType: reflect.TypeOf(&StatefulSetStatus{})},
+ conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
+ in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy))
+ return nil
+ }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})},
+ )
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
+func (in *DaemonSet) DeepCopy() *DaemonSet {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DaemonSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
+func (in *DaemonSetList) DeepCopy() *DaemonSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
+func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
+ *out = *in
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int64)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
+func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(RollingUpdateDaemonSet)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
+func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetUpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+ if in == nil {
+ return nil
+ }
+ out := new(Deployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Deployment, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ in.Strategy.DeepCopyInto(&out.Strategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ if in.ProgressDeadlineSeconds != nil {
+ in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]DeploymentCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int64)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(RollingUpdateDeployment)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
+func (in *ReplicaSet) DeepCopy() *ReplicaSet {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
+func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ReplicaSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
+func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
+func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ReplicaSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
+func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
+func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateDaemonSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ }
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
+ *out = *in
+ if in.Partition != nil {
+ in, out := &in.Partition, &out.Partition
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateStatefulSetStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scale) DeepCopyInto(out *Scale) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
+func (in *Scale) DeepCopy() *Scale {
+ if in == nil {
+ return nil
+ }
+ out := new(Scale)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scale) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
+func (in *ScaleSpec) DeepCopy() *ScaleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ScaleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
+func (in *ScaleStatus) DeepCopy() *ScaleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ScaleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
+func (in *StatefulSet) DeepCopy() *StatefulSet {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StatefulSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
+func (in *StatefulSetList) DeepCopy() *StatefulSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ } else {
+ return nil
+ }
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ if in.VolumeClaimTemplates != nil {
+ in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]core_v1.PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(int32)
+ **out = **in
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
+func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
+func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(RollingUpdateStatefulSetStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
+func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetUpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/staging/src/k8s.io/api/extensions/v1beta1/generated.proto b/staging/src/k8s.io/api/extensions/v1beta1/generated.proto
index 2f386667557..91510a4a33e 100644
--- a/staging/src/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/staging/src/k8s.io/api/extensions/v1beta1/generated.proto
@@ -245,6 +245,7 @@ message DeploymentList {
repeated Deployment items = 2;
}
+// DEPRECATED.
// DeploymentRollback stores the information required to rollback a deployment.
message DeploymentRollback {
// Required: This must match the Name of a deployment.
@@ -293,6 +294,7 @@ message DeploymentSpec {
// +optional
optional bool paused = 7;
+ // DEPRECATED.
// The config this deployment is rolling back to. Will be cleared after rollback is done.
// +optional
optional RollbackConfig rollbackTo = 8;
@@ -852,6 +854,7 @@ message ReplicaSetStatus {
message ReplicationControllerDummy {
}
+// DEPRECATED.
message RollbackConfig {
// The revision to rollback to. If set to 0, rollback to the last revision.
// +optional
diff --git a/staging/src/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 7c5a8c56bf6..2a0259548c8 100644
--- a/staging/src/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/staging/src/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -152,7 +152,7 @@ func (DeploymentList) SwaggerDoc() map[string]string {
}
var map_DeploymentRollback = map[string]string{
- "": "DeploymentRollback stores the information required to rollback a deployment.",
+ "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
"name": "Required: This must match the Name of a deployment.",
"updatedAnnotations": "The annotations to be updated to a deployment",
"rollbackTo": "The config of this deployment rollback.",
@@ -171,7 +171,7 @@ var map_DeploymentSpec = map[string]string{
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.",
"paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.",
- "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
+ "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. This is not set by default.",
}
@@ -502,6 +502,7 @@ func (ReplicationControllerDummy) SwaggerDoc() map[string]string {
}
var map_RollbackConfig = map[string]string{
+ "": "DEPRECATED.",
"revision": "The revision to rollback to. If set to 0, rollback to the last revision.",
}