1
0
mirror of https://github.com/rancher/types.git synced 2025-06-27 05:56:50 +00:00

Merge pull request #16 from ibuildthecloud/work

Workload API additions
This commit is contained in:
Darren Shepherd 2017-11-14 20:51:02 -08:00 committed by GitHub
commit c83ef8dde1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
102 changed files with 15583 additions and 182 deletions

View File

@ -13,13 +13,13 @@ var (
Group: "cluster.cattle.io",
Path: "/v1-cluster",
SubContexts: map[string]bool{
"projects": true,
"clusters": true,
},
}
Schemas = commonmappers.Add(&Version, types.NewSchemas()).
AddMapperForType(&Version, v1.Cluster{}, m.NewObject(nil)).
AddMapperForType(&Version, v1.ClusterNode{}, m.NewObject(nil)).
AddMapperForType(&Version, v1.Cluster{}, m.NewObject()).
AddMapperForType(&Version, v1.ClusterNode{}, m.NewObject()).
MustImport(&Version, v1.Cluster{}).
MustImport(&Version, v1.ClusterNode{})
)

View File

@ -0,0 +1,29 @@
package mapper
import (
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/mapping/mapper"
)
type NodeAddressMapper struct {
}
func (n NodeAddressMapper) FromInternal(data map[string]interface{}) {
addresses, _ := mapper.GetSlice(data, "addresses")
for _, address := range addresses {
t := address["type"]
a := address["address"]
if t == "InternalIP" {
data["IpAddress"] = a
} else if t == "Hostname" {
data["hostname"] = a
}
}
}
func (n NodeAddressMapper) ToInternal(data map[string]interface{}) {
}
func (n NodeAddressMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
return nil
}

View File

@ -0,0 +1,49 @@
package mapper
import (
"strings"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/norman/types/mapping/mapper"
)
type OSInfo struct {
}
func (o OSInfo) FromInternal(data map[string]interface{}) {
cpuInfo := map[string]interface{}{
"count": mapper.GetValueN(data, "capacity", "cpu"),
}
kib := strings.TrimSuffix(convert.ToString(mapper.GetValueN(data, "capacity", "memory")), "Ki")
memoryInfo := map[string]interface{}{}
kibNum, err := convert.ToNumber(kib)
if err == nil {
memoryInfo["memTotalKiB"] = kibNum
}
osInfo := map[string]interface{}{
"dockerVersion": strings.TrimPrefix(convert.ToString(mapper.GetValueN(data, "nodeInfo", "containerRuntimeVersion")), "docker://"),
"kernelVersion": mapper.GetValueN(data, "nodeInfo", "kernelVersion"),
"operatingSystem": mapper.GetValueN(data, "nodeInfo", "osImage"),
}
data["info"] = map[string]interface{}{
"cpu": cpuInfo,
"memory": memoryInfo,
"os": osInfo,
"kubernetes": map[string]interface{}{
"kubeletVersion": mapper.GetValueN(data, "nodeInfo", "kubeletVersion"),
"kubeProxyVersion": mapper.GetValueN(data, "nodeInfo", "kubeletVersion"),
},
}
}
func (o OSInfo) ToInternal(data map[string]interface{}) {
}
func (o OSInfo) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
return nil
}

View File

@ -7,37 +7,46 @@ import (
m "github.com/rancher/norman/types/mapping/mapper"
)
type ResourceRequirementsMapper struct {
type PivotMapper struct {
Plural bool
}
func (r ResourceRequirementsMapper) FromInternal(data map[string]interface{}) {
func (r PivotMapper) FromInternal(data map[string]interface{}) {
for key, value := range data {
mapValue, ok := value.(map[string]interface{})
if !ok {
continue
}
for subKey, subValue := range mapValue {
m.PutValue(data, subValue, subKey, strings.TrimSuffix(key, "s"))
if r.Plural {
m.PutValue(data, subValue, subKey, strings.TrimSuffix(key, "s"))
} else {
m.PutValue(data, subValue, subKey, key)
}
}
delete(data, key)
}
}
func (r ResourceRequirementsMapper) ToInternal(data map[string]interface{}) {
func (r PivotMapper) ToInternal(data map[string]interface{}) {
for key, value := range data {
mapValue, ok := value.(map[string]interface{})
if !ok {
continue
}
for subKey, subValue := range mapValue {
m.PutValue(data, subValue, subKey, key+"s")
if r.Plural {
m.PutValue(data, subValue, subKey, key+"s")
} else {
m.PutValue(data, subValue, subKey, key)
}
}
delete(data, key)
}
}
func (r ResourceRequirementsMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
func (r PivotMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
return nil
}

View File

@ -0,0 +1,18 @@
package mapper
import (
"github.com/rancher/norman/types"
)
type StatefulSetSpecMapper struct {
}
func (s StatefulSetSpecMapper) FromInternal(data map[string]interface{}) {
}
func (s StatefulSetSpecMapper) ToInternal(data map[string]interface{}) {
}
func (s StatefulSetSpecMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
return nil
}

View File

@ -6,19 +6,172 @@ import (
"github.com/rancher/types/apis/workload.cattle.io/v1/schema/mapper"
"github.com/rancher/types/commonmappers"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/staging/src/k8s.io/api/apps/v1beta2"
)
var (
Version = types.APIVersion{
Version: "v1",
Group: "workload.cattle.io",
Path: "/v1-app",
Path: "/v1-workload",
SubContexts: map[string]bool{
"projects": true,
"projects": true,
"namespaces": true,
},
}
Schemas = commonmappers.Add(&Version, types.NewSchemas()).
Init(podTypes).
Init(namespaceTypes).
Init(nodeTypes).
Init(replicaSetTypes).
Init(deploymentTypes).
Init(statefulSetTypes)
)
func statefulSetTypes(schemas *types.Schemas) *types.Schemas {
return schemas.
AddMapperForType(&Version, v1beta2.StatefulSetUpdateStrategy{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Enum{Field: "type",
Values: map[string][]string{
"RollingUpdate": {"rollingUpdate"},
"OnDelete": {"onDelete"},
},
},
m.Move{From: "type", To: "kind"},
m.Move{From: "rollingUpdate", To: "rollingUpdateConfig"},
},
}).
AddMapperForType(&Version, v1beta2.StatefulSetSpec{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Move{
From: "replicas",
To: "deploy/scale",
},
m.Enum{Field: "podManagementPolicy",
Values: map[string][]string{
"OrderedReady": {"ordered"},
"Parallel": {"parallel"},
},
},
&m.Move{
From: "podManagementPolicy",
To: "deploy/strategy",
},
&m.Move{
From: "revisionHistoryLimit",
To: "deploy/maxRevisions",
},
m.Drop{"selector"},
m.SliceToMap{
Field: "volumeClaimTemplates",
Key: "name",
},
},
}).
AddMapperForType(&Version, v1beta2.StatefulSet{}, m.NewObject()).
MustImport(&Version, v1beta2.StatefulSetSpec{}, deployOverride{}).
MustImport(&Version, v1beta2.StatefulSet{})
}
func deploymentTypes(schemas *types.Schemas) *types.Schemas {
return schemas.
AddMapperForType(&Version, v1beta2.DeploymentStrategy{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Enum{Field: "type",
Values: map[string][]string{
"RollingUpdate": {"rollingUpdate"},
"Recreate": {"recreate"},
},
},
m.Move{From: "type", To: "kind"},
m.Move{From: "rollingUpdate", To: "rollingUpdateConfig"},
},
}).
AddMapperForType(&Version, v1beta2.DeploymentSpec{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Move{
From: "replicas",
To: "deploy/scale",
},
&m.Move{
From: "minReadySeconds",
To: "deploy/minReadySeconds",
},
&m.Move{
From: "progressDeadlineSeconds",
To: "deploy/progressDeadlineSeconds",
},
&m.Move{
From: "revisionHistoryLimit",
To: "deploy/maxRevisions",
},
m.Drop{"selector"},
m.Move{From: "strategy", To: "updateStrategy"},
},
}).
AddMapperForType(&Version, v1beta2.Deployment{}, m.NewObject()).
MustImport(&Version, v1beta2.DeploymentSpec{}, deployOverride{}).
MustImport(&Version, v1beta2.Deployment{})
}
func replicaSetTypes(schemas *types.Schemas) *types.Schemas {
return schemas.
AddMapperForType(&Version, v1beta2.ReplicaSetSpec{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Move{
From: "replicas",
To: "deploy/scale",
},
&m.Move{
From: "minReadySeconds",
To: "deploy/minReadySeconds",
},
m.Drop{"selector"},
},
}).
AddMapperForType(&Version, v1beta2.ReplicaSet{}, m.NewObject()).
MustImport(&Version, v1beta2.ReplicaSetSpec{}, deployOverride{}).
MustImport(&Version, v1beta2.ReplicaSet{})
}
func nodeTypes(schemas *types.Schemas) *types.Schemas {
return schemas.
AddMapperForType(&Version, v1.NodeStatus{}, &types.TypeMapper{
Mappers: []types.Mapper{
&mapper.NodeAddressMapper{},
&mapper.OSInfo{},
&m.Drop{"addresses"},
&m.Drop{"conditions"},
&m.Drop{"daemonEndpoints"},
&m.Drop{"images"},
&m.Drop{"nodeInfo"},
&m.SliceToMap{Field: "volumesAttached", Key: "devicePath"},
},
}).
AddMapperForType(&Version, v1.Node{}, m.NewObject()).
MustImport(&Version, v1.NodeStatus{}, nodeStatusOverride{}).
MustImport(&Version, v1.Node{})
}
func namespaceTypes(schemas *types.Schemas) *types.Schemas {
return schemas.
AddMapperForType(&Version, v1.NamespaceStatus{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Drop{"phase"},
},
}).
AddMapperForType(&Version, v1.NamespaceSpec{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Drop{"finalizers"},
},
}).
AddMapperForType(&Version, v1.Namespace{}, m.NewObject()).
MustImport(&Version, v1.Namespace{})
}
func podTypes(schemas *types.Schemas) *types.Schemas {
return schemas.
AddMapperForType(&Version, v1.Capabilities{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Move{From: "add", To: "capAdd"},
@ -79,6 +232,10 @@ var (
mapper.InitContainerMapper{},
mapper.SchedulingMapper{},
&m.Embed{Field: "securityContext"},
&m.SliceToMap{
Field: "volumes",
Key: "name",
},
&m.SliceToMap{
Field: "containers",
Key: "name",
@ -98,9 +255,10 @@ var (
}).
AddMapperForType(&Version, v1.ResourceRequirements{}, &types.TypeMapper{
Mappers: []types.Mapper{
mapper.ResourceRequirementsMapper{},
mapper.PivotMapper{Plural: true},
},
}).
// Must import handlers before Container
MustImport(&Version, v1.Handler{}, handlerOverride{}).
MustImport(&Version, v1.Probe{}, handlerOverride{}).
MustImport(&Version, v1.Container{}, struct {
@ -116,4 +274,4 @@ var (
IPC string
}{}).
MustImport(&Version, v1.Pod{})
)
}

View File

@ -3,6 +3,7 @@ package schema
import (
"github.com/rancher/norman/types"
m "github.com/rancher/norman/types/mapping/mapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
@ -28,6 +29,13 @@ var (
}
)
type ScalingGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec interface{} `json:"spec"`
Status interface{} `json:"status"`
}
type handlerOverride struct {
TCP bool
}
@ -77,3 +85,42 @@ type deployParams struct {
Ordered bool
QuorumSize int64
}
type nodeStatusOverride struct {
IPAddress string
Hostname string
Info NodeInfo
}
type NodeInfo struct {
CPU CPUInfo
Memory MemoryInfo
OS OSInfo
Kubernetes KubernetesInfo
}
type CPUInfo struct {
Count int64
}
type MemoryInfo struct {
MemTotalKiB int64
}
type OSInfo struct {
DockerVersion string
KernelVersion string
OperatingSystem string
}
type KubernetesInfo struct {
KubeletVersion string
KubeProxyVersion string
}
type DeployParams struct {
}
type deployOverride struct {
Deploy *DeployParams
}

View File

@ -9,15 +9,16 @@ const (
ClusterFieldAPIVersion = "apiVersion"
ClusterFieldAnnotations = "annotations"
ClusterFieldAzureKubernetesServiceConfig = "azureKubernetesServiceConfig"
ClusterFieldCreated = "created"
ClusterFieldCreationTimestamp = "creationTimestamp"
ClusterFieldDeletionTimestamp = "deletionTimestamp"
ClusterFieldGoogleKubernetesEngineConfig = "googleKubernetesEngineConfig"
ClusterFieldKind = "kind"
ClusterFieldLabels = "labels"
ClusterFieldName = "name"
ClusterFieldNamespace = "namespace"
ClusterFieldRancherKubernetesEngineConfig = "rancherKubernetesEngineConfig"
ClusterFieldRemoved = "removed"
ClusterFieldUuid = "uuid"
ClusterFieldStatus = "status"
ClusterFieldUID = "uid"
)
type Cluster struct {
@ -25,15 +26,16 @@ type Cluster struct {
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
AzureKubernetesServiceConfig *AzureKubernetesServiceConfig `json:"azureKubernetesServiceConfig,omitempty"`
Created string `json:"created,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
GoogleKubernetesEngineConfig *GoogleKubernetesEngineConfig `json:"googleKubernetesEngineConfig,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
RancherKubernetesEngineConfig *RancherKubernetesEngineConfig `json:"rancherKubernetesEngineConfig,omitempty"`
Removed string `json:"removed,omitempty"`
Uuid string `json:"uuid,omitempty"`
Status *ClusterStatus `json:"status,omitempty"`
UID string `json:"uid,omitempty"`
}
type ClusterCollection struct {
types.Collection

View File

@ -5,41 +5,43 @@ import (
)
const (
ClusterNodeType = "clusterNode"
ClusterNodeFieldAPIVersion = "apiVersion"
ClusterNodeFieldAnnotations = "annotations"
ClusterNodeFieldConfigSource = "configSource"
ClusterNodeFieldCreated = "created"
ClusterNodeFieldExternalID = "externalID"
ClusterNodeFieldKind = "kind"
ClusterNodeFieldLabels = "labels"
ClusterNodeFieldName = "name"
ClusterNodeFieldNamespace = "namespace"
ClusterNodeFieldPodCIDR = "podCIDR"
ClusterNodeFieldProviderID = "providerID"
ClusterNodeFieldRemoved = "removed"
ClusterNodeFieldTaints = "taints"
ClusterNodeFieldUnschedulable = "unschedulable"
ClusterNodeFieldUuid = "uuid"
ClusterNodeType = "clusterNode"
ClusterNodeFieldAPIVersion = "apiVersion"
ClusterNodeFieldAnnotations = "annotations"
ClusterNodeFieldConfigSource = "configSource"
ClusterNodeFieldCreationTimestamp = "creationTimestamp"
ClusterNodeFieldDeletionTimestamp = "deletionTimestamp"
ClusterNodeFieldExternalID = "externalID"
ClusterNodeFieldKind = "kind"
ClusterNodeFieldLabels = "labels"
ClusterNodeFieldName = "name"
ClusterNodeFieldNamespace = "namespace"
ClusterNodeFieldPodCIDR = "podCIDR"
ClusterNodeFieldProviderID = "providerID"
ClusterNodeFieldStatus = "status"
ClusterNodeFieldTaints = "taints"
ClusterNodeFieldUID = "uid"
ClusterNodeFieldUnschedulable = "unschedulable"
)
type ClusterNode struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
ConfigSource *NodeConfigSource `json:"configSource,omitempty"`
Created string `json:"created,omitempty"`
ExternalID string `json:"externalID,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
PodCIDR string `json:"podCIDR,omitempty"`
ProviderID string `json:"providerID,omitempty"`
Removed string `json:"removed,omitempty"`
Taints []Taint `json:"taints,omitempty"`
Unschedulable bool `json:"unschedulable,omitempty"`
Uuid string `json:"uuid,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
ConfigSource *NodeConfigSource `json:"configSource,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
ExternalID string `json:"externalID,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
PodCIDR string `json:"podCIDR,omitempty"`
ProviderID string `json:"providerID,omitempty"`
Status NodeStatus `json:"status,omitempty"`
Taints []Taint `json:"taints,omitempty"`
UID string `json:"uid,omitempty"`
Unschedulable bool `json:"unschedulable,omitempty"`
}
type ClusterNodeCollection struct {
types.Collection

View File

@ -1,22 +1,22 @@
package client
const (
ObjectMetaType = "objectMeta"
ObjectMetaFieldAnnotations = "annotations"
ObjectMetaFieldCreated = "created"
ObjectMetaFieldLabels = "labels"
ObjectMetaFieldName = "name"
ObjectMetaFieldNamespace = "namespace"
ObjectMetaFieldRemoved = "removed"
ObjectMetaFieldUuid = "uuid"
ObjectMetaType = "objectMeta"
ObjectMetaFieldAnnotations = "annotations"
ObjectMetaFieldCreationTimestamp = "creationTimestamp"
ObjectMetaFieldDeletionTimestamp = "deletionTimestamp"
ObjectMetaFieldLabels = "labels"
ObjectMetaFieldName = "name"
ObjectMetaFieldNamespace = "namespace"
ObjectMetaFieldUID = "uid"
)
type ObjectMeta struct {
Annotations map[string]string `json:"annotations,omitempty"`
Created string `json:"created,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Removed string `json:"removed,omitempty"`
Uuid string `json:"uuid,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
UID string `json:"uid,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
AttachedVolumeType = "attachedVolume"
AttachedVolumeFieldDevicePath = "devicePath"
AttachedVolumeFieldName = "name"
)
type AttachedVolume struct {
DevicePath string `json:"devicePath,omitempty"`
Name string `json:"name,omitempty"`
}

View File

@ -1,12 +1,12 @@
package client
const (
CapabilitiesType = "capabilities"
CapabilitiesFieldCapAdd = "capAdd"
CapabilitiesFieldCapDrop = "capDrop"
CapabilitiesType = "capabilities"
CapabilitiesFieldAdd = "add"
CapabilitiesFieldDrop = "drop"
)
type Capabilities struct {
CapAdd []string `json:"capAdd,omitempty"`
CapDrop []string `json:"capDrop,omitempty"`
Add []string `json:"add,omitempty"`
Drop []string `json:"drop,omitempty"`
}

View File

@ -7,7 +7,13 @@ import (
type Client struct {
clientbase.APIBaseClient
Pod PodOperations
Pod PodOperations
Namespace NamespaceOperations
Node NodeOperations
ReplicaSet ReplicaSetOperations
Deployment DeploymentOperations
PersistentVolumeClaim PersistentVolumeClaimOperations
StatefulSet StatefulSetOperations
}
func NewClient(opts *clientbase.ClientOpts) (*Client, error) {
@ -21,6 +27,12 @@ func NewClient(opts *clientbase.ClientOpts) (*Client, error) {
}
client.Pod = newPodClient(client)
client.Namespace = newNamespaceClient(client)
client.Node = newNodeClient(client)
client.ReplicaSet = newReplicaSetClient(client)
client.Deployment = newDeploymentClient(client)
client.PersistentVolumeClaim = newPersistentVolumeClaimClient(client)
client.StatefulSet = newStatefulSetClient(client)
return client, nil
}

View File

@ -2,65 +2,65 @@ package client
const (
ContainerType = "container"
ContainerFieldAdd = "add"
ContainerFieldAllowPrivilegeEscalation = "allowPrivilegeEscalation"
ContainerFieldCapAdd = "capAdd"
ContainerFieldCapDrop = "capDrop"
ContainerFieldArgs = "args"
ContainerFieldCommand = "command"
ContainerFieldEntrypoint = "entrypoint"
ContainerFieldDrop = "drop"
ContainerFieldEnvironment = "environment"
ContainerFieldEnvironmentFrom = "environmentFrom"
ContainerFieldHealthcheck = "healthcheck"
ContainerFieldImage = "image"
ContainerFieldImagePullPolicy = "imagePullPolicy"
ContainerFieldInitContainer = "initContainer"
ContainerFieldLivenessProbe = "livenessProbe"
ContainerFieldName = "name"
ContainerFieldPorts = "ports"
ContainerFieldPostStart = "postStart"
ContainerFieldPreStop = "preStop"
ContainerFieldPrivileged = "privileged"
ContainerFieldPullPolicy = "pullPolicy"
ContainerFieldReadOnly = "readOnly"
ContainerFieldReadycheck = "readycheck"
ContainerFieldReadOnlyRootFilesystem = "readOnlyRootFilesystem"
ContainerFieldReadinessProbe = "readinessProbe"
ContainerFieldResources = "resources"
ContainerFieldRunAsNonRoot = "runAsNonRoot"
ContainerFieldRunAsUser = "runAsUser"
ContainerFieldScheduling = "scheduling"
ContainerFieldStdin = "stdin"
ContainerFieldStdinOnce = "stdinOnce"
ContainerFieldTTY = "tty"
ContainerFieldTerminationMessagePath = "terminationMessagePath"
ContainerFieldTerminationMessagePolicy = "terminationMessagePolicy"
ContainerFieldUid = "uid"
ContainerFieldVolumeMounts = "volumeMounts"
ContainerFieldWorkingDir = "workingDir"
)
type Container struct {
Add []string `json:"add,omitempty"`
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"`
CapAdd []string `json:"capAdd,omitempty"`
CapDrop []string `json:"capDrop,omitempty"`
Args []string `json:"args,omitempty"`
Command []string `json:"command,omitempty"`
Entrypoint []string `json:"entrypoint,omitempty"`
Drop []string `json:"drop,omitempty"`
Environment map[string]string `json:"environment,omitempty"`
EnvironmentFrom []EnvironmentFrom `json:"environmentFrom,omitempty"`
Healthcheck *Probe `json:"healthcheck,omitempty"`
Image string `json:"image,omitempty"`
ImagePullPolicy string `json:"imagePullPolicy,omitempty"`
InitContainer bool `json:"initContainer,omitempty"`
LivenessProbe *Probe `json:"livenessProbe,omitempty"`
Name string `json:"name,omitempty"`
Ports []ContainerPort `json:"ports,omitempty"`
PostStart *Handler `json:"postStart,omitempty"`
PreStop *Handler `json:"preStop,omitempty"`
Privileged *bool `json:"privileged,omitempty"`
PullPolicy string `json:"pullPolicy,omitempty"`
ReadOnly *bool `json:"readOnly,omitempty"`
Readycheck *Probe `json:"readycheck,omitempty"`
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"`
ReadinessProbe *Probe `json:"readinessProbe,omitempty"`
Resources *Resources `json:"resources,omitempty"`
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
RunAsUser *int64 `json:"runAsUser,omitempty"`
Scheduling *Scheduling `json:"scheduling,omitempty"`
Stdin bool `json:"stdin,omitempty"`
StdinOnce bool `json:"stdinOnce,omitempty"`
TTY bool `json:"tty,omitempty"`
TerminationMessagePath string `json:"terminationMessagePath,omitempty"`
TerminationMessagePolicy string `json:"terminationMessagePolicy,omitempty"`
Uid *int64 `json:"uid,omitempty"`
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"`
WorkingDir string `json:"workingDir,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
ContainerImageType = "containerImage"
ContainerImageFieldNames = "names"
ContainerImageFieldSizeBytes = "sizeBytes"
)
type ContainerImage struct {
Names []string `json:"names,omitempty"`
SizeBytes int64 `json:"sizeBytes,omitempty"`
}

View File

@ -0,0 +1,10 @@
package client
const (
CPUInfoType = "cpuInfo"
CPUInfoFieldCount = "count"
)
type CPUInfo struct {
Count int64 `json:"count,omitempty"`
}

View File

@ -0,0 +1,10 @@
package client
const (
DaemonEndpointType = "daemonEndpoint"
DaemonEndpointFieldPort = "port"
)
type DaemonEndpoint struct {
Port int64 `json:"port,omitempty"`
}

View File

@ -0,0 +1,18 @@
package client
const (
DeployParamsType = "deployParams"
DeployParamsFieldMinReadySeconds = "minReadySeconds"
DeployParamsFieldPodManagementPolicy = "podManagementPolicy"
DeployParamsFieldProgressDeadlineSeconds = "progressDeadlineSeconds"
DeployParamsFieldReplicas = "replicas"
DeployParamsFieldRevisionHistoryLimit = "revisionHistoryLimit"
)
type DeployParams struct {
MinReadySeconds int64 `json:"minReadySeconds,omitempty"`
PodManagementPolicy string `json:"podManagementPolicy,omitempty"`
ProgressDeadlineSeconds *int64 `json:"progressDeadlineSeconds,omitempty"`
Replicas *int64 `json:"replicas,omitempty"`
RevisionHistoryLimit *int64 `json:"revisionHistoryLimit,omitempty"`
}

View File

@ -0,0 +1,103 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
DeploymentType = "deployment"
DeploymentFieldAPIVersion = "apiVersion"
DeploymentFieldAnnotations = "annotations"
DeploymentFieldCreationTimestamp = "creationTimestamp"
DeploymentFieldDeletionTimestamp = "deletionTimestamp"
DeploymentFieldDeploy = "deploy"
DeploymentFieldKind = "kind"
DeploymentFieldLabels = "labels"
DeploymentFieldName = "name"
DeploymentFieldNamespace = "namespace"
DeploymentFieldPaused = "paused"
DeploymentFieldStatus = "status"
DeploymentFieldStrategy = "strategy"
DeploymentFieldTemplate = "template"
DeploymentFieldUID = "uid"
)
type Deployment struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
Deploy *DeployParams `json:"deploy,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Paused bool `json:"paused,omitempty"`
Status DeploymentStatus `json:"status,omitempty"`
Strategy DeploymentStrategy `json:"strategy,omitempty"`
Template PodTemplateSpec `json:"template,omitempty"`
UID string `json:"uid,omitempty"`
}
type DeploymentCollection struct {
types.Collection
Data []Deployment `json:"data,omitempty"`
client *DeploymentClient
}
type DeploymentClient struct {
apiClient *Client
}
type DeploymentOperations interface {
List(opts *types.ListOpts) (*DeploymentCollection, error)
Create(opts *Deployment) (*Deployment, error)
Update(existing *Deployment, updates interface{}) (*Deployment, error)
ByID(id string) (*Deployment, error)
Delete(container *Deployment) error
}
func newDeploymentClient(apiClient *Client) *DeploymentClient {
return &DeploymentClient{
apiClient: apiClient,
}
}
func (c *DeploymentClient) Create(container *Deployment) (*Deployment, error) {
resp := &Deployment{}
err := c.apiClient.Ops.DoCreate(DeploymentType, container, resp)
return resp, err
}
func (c *DeploymentClient) Update(existing *Deployment, updates interface{}) (*Deployment, error) {
resp := &Deployment{}
err := c.apiClient.Ops.DoUpdate(DeploymentType, &existing.Resource, updates, resp)
return resp, err
}
func (c *DeploymentClient) List(opts *types.ListOpts) (*DeploymentCollection, error) {
resp := &DeploymentCollection{}
err := c.apiClient.Ops.DoList(DeploymentType, opts, resp)
resp.client = c
return resp, err
}
func (cc *DeploymentCollection) Next() (*DeploymentCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &DeploymentCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *DeploymentClient) ByID(id string) (*Deployment, error) {
resp := &Deployment{}
err := c.apiClient.Ops.DoByID(DeploymentType, id, resp)
return resp, err
}
func (c *DeploymentClient) Delete(container *Deployment) error {
return c.apiClient.Ops.DoResourceDelete(DeploymentType, &container.Resource)
}

View File

@ -0,0 +1,20 @@
package client
const (
DeploymentConditionType = "deploymentCondition"
DeploymentConditionFieldLastTransitionTime = "lastTransitionTime"
DeploymentConditionFieldLastUpdateTime = "lastUpdateTime"
DeploymentConditionFieldMessage = "message"
DeploymentConditionFieldReason = "reason"
DeploymentConditionFieldStatus = "status"
DeploymentConditionFieldType = "type"
)
type DeploymentCondition struct {
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
LastUpdateTime string `json:"lastUpdateTime,omitempty"`
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,16 @@
package client
const (
DeploymentSpecType = "deploymentSpec"
DeploymentSpecFieldDeploy = "deploy"
DeploymentSpecFieldPaused = "paused"
DeploymentSpecFieldStrategy = "strategy"
DeploymentSpecFieldTemplate = "template"
)
type DeploymentSpec struct {
Deploy *DeployParams `json:"deploy,omitempty"`
Paused bool `json:"paused,omitempty"`
Strategy DeploymentStrategy `json:"strategy,omitempty"`
Template PodTemplateSpec `json:"template,omitempty"`
}

View File

@ -0,0 +1,24 @@
package client
const (
DeploymentStatusType = "deploymentStatus"
DeploymentStatusFieldAvailableReplicas = "availableReplicas"
DeploymentStatusFieldCollisionCount = "collisionCount"
DeploymentStatusFieldConditions = "conditions"
DeploymentStatusFieldObservedGeneration = "observedGeneration"
DeploymentStatusFieldReadyReplicas = "readyReplicas"
DeploymentStatusFieldReplicas = "replicas"
DeploymentStatusFieldUnavailableReplicas = "unavailableReplicas"
DeploymentStatusFieldUpdatedReplicas = "updatedReplicas"
)
type DeploymentStatus struct {
AvailableReplicas int64 `json:"availableReplicas,omitempty"`
CollisionCount *int64 `json:"collisionCount,omitempty"`
Conditions []DeploymentCondition `json:"conditions,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
ReadyReplicas int64 `json:"readyReplicas,omitempty"`
Replicas int64 `json:"replicas,omitempty"`
UnavailableReplicas int64 `json:"unavailableReplicas,omitempty"`
UpdatedReplicas int64 `json:"updatedReplicas,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
DeploymentStrategyType = "deploymentStrategy"
DeploymentStrategyFieldRollingUpdate = "rollingUpdate"
DeploymentStrategyFieldType = "type"
)
type DeploymentStrategy struct {
RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
KubernetesInfoType = "kubernetesInfo"
KubernetesInfoFieldKubeProxyVersion = "kubeProxyVersion"
KubernetesInfoFieldKubeletVersion = "kubeletVersion"
)
type KubernetesInfo struct {
KubeProxyVersion string `json:"kubeProxyVersion,omitempty"`
KubeletVersion string `json:"kubeletVersion,omitempty"`
}

View File

@ -0,0 +1,10 @@
package client
const (
MemoryInfoType = "memoryInfo"
MemoryInfoFieldMemTotalKiB = "memTotalKiB"
)
type MemoryInfo struct {
MemTotalKiB int64 `json:"memTotalKiB,omitempty"`
}

View File

@ -0,0 +1,95 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
NamespaceType = "namespace"
NamespaceFieldAPIVersion = "apiVersion"
NamespaceFieldAnnotations = "annotations"
NamespaceFieldCreationTimestamp = "creationTimestamp"
NamespaceFieldDeletionTimestamp = "deletionTimestamp"
NamespaceFieldKind = "kind"
NamespaceFieldLabels = "labels"
NamespaceFieldName = "name"
NamespaceFieldNamespace = "namespace"
NamespaceFieldStatus = "status"
NamespaceFieldUID = "uid"
)
type Namespace struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Status NamespaceStatus `json:"status,omitempty"`
UID string `json:"uid,omitempty"`
}
type NamespaceCollection struct {
types.Collection
Data []Namespace `json:"data,omitempty"`
client *NamespaceClient
}
type NamespaceClient struct {
apiClient *Client
}
type NamespaceOperations interface {
List(opts *types.ListOpts) (*NamespaceCollection, error)
Create(opts *Namespace) (*Namespace, error)
Update(existing *Namespace, updates interface{}) (*Namespace, error)
ByID(id string) (*Namespace, error)
Delete(container *Namespace) error
}
func newNamespaceClient(apiClient *Client) *NamespaceClient {
return &NamespaceClient{
apiClient: apiClient,
}
}
func (c *NamespaceClient) Create(container *Namespace) (*Namespace, error) {
resp := &Namespace{}
err := c.apiClient.Ops.DoCreate(NamespaceType, container, resp)
return resp, err
}
func (c *NamespaceClient) Update(existing *Namespace, updates interface{}) (*Namespace, error) {
resp := &Namespace{}
err := c.apiClient.Ops.DoUpdate(NamespaceType, &existing.Resource, updates, resp)
return resp, err
}
func (c *NamespaceClient) List(opts *types.ListOpts) (*NamespaceCollection, error) {
resp := &NamespaceCollection{}
err := c.apiClient.Ops.DoList(NamespaceType, opts, resp)
resp.client = c
return resp, err
}
func (cc *NamespaceCollection) Next() (*NamespaceCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &NamespaceCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *NamespaceClient) ByID(id string) (*Namespace, error) {
resp := &Namespace{}
err := c.apiClient.Ops.DoByID(NamespaceType, id, resp)
return resp, err
}
func (c *NamespaceClient) Delete(container *Namespace) error {
return c.apiClient.Ops.DoResourceDelete(NamespaceType, &container.Resource)
}

View File

@ -0,0 +1,8 @@
package client
const (
NamespaceSpecType = "namespaceSpec"
)
type NamespaceSpec struct {
}

View File

@ -0,0 +1,8 @@
package client
const (
NamespaceStatusType = "namespaceStatus"
)
type NamespaceStatus struct {
}

View File

@ -0,0 +1,107 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
NodeType = "node"
NodeFieldAPIVersion = "apiVersion"
NodeFieldAnnotations = "annotations"
NodeFieldConfigSource = "configSource"
NodeFieldCreationTimestamp = "creationTimestamp"
NodeFieldDeletionTimestamp = "deletionTimestamp"
NodeFieldExternalID = "externalID"
NodeFieldKind = "kind"
NodeFieldLabels = "labels"
NodeFieldName = "name"
NodeFieldNamespace = "namespace"
NodeFieldPodCIDR = "podCIDR"
NodeFieldProviderID = "providerID"
NodeFieldStatus = "status"
NodeFieldTaints = "taints"
NodeFieldUID = "uid"
NodeFieldUnschedulable = "unschedulable"
)
type Node struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
ConfigSource *NodeConfigSource `json:"configSource,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
ExternalID string `json:"externalID,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
PodCIDR string `json:"podCIDR,omitempty"`
ProviderID string `json:"providerID,omitempty"`
Status NodeStatus `json:"status,omitempty"`
Taints []Taint `json:"taints,omitempty"`
UID string `json:"uid,omitempty"`
Unschedulable bool `json:"unschedulable,omitempty"`
}
type NodeCollection struct {
types.Collection
Data []Node `json:"data,omitempty"`
client *NodeClient
}
type NodeClient struct {
apiClient *Client
}
type NodeOperations interface {
List(opts *types.ListOpts) (*NodeCollection, error)
Create(opts *Node) (*Node, error)
Update(existing *Node, updates interface{}) (*Node, error)
ByID(id string) (*Node, error)
Delete(container *Node) error
}
func newNodeClient(apiClient *Client) *NodeClient {
return &NodeClient{
apiClient: apiClient,
}
}
func (c *NodeClient) Create(container *Node) (*Node, error) {
resp := &Node{}
err := c.apiClient.Ops.DoCreate(NodeType, container, resp)
return resp, err
}
func (c *NodeClient) Update(existing *Node, updates interface{}) (*Node, error) {
resp := &Node{}
err := c.apiClient.Ops.DoUpdate(NodeType, &existing.Resource, updates, resp)
return resp, err
}
func (c *NodeClient) List(opts *types.ListOpts) (*NodeCollection, error) {
resp := &NodeCollection{}
err := c.apiClient.Ops.DoList(NodeType, opts, resp)
resp.client = c
return resp, err
}
func (cc *NodeCollection) Next() (*NodeCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &NodeCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *NodeClient) ByID(id string) (*Node, error) {
resp := &Node{}
err := c.apiClient.Ops.DoByID(NodeType, id, resp)
return resp, err
}
func (c *NodeClient) Delete(container *Node) error {
return c.apiClient.Ops.DoResourceDelete(NodeType, &container.Resource)
}

View File

@ -0,0 +1,12 @@
package client
const (
NodeAddressType = "nodeAddress"
NodeAddressFieldAddress = "address"
NodeAddressFieldType = "type"
)
type NodeAddress struct {
Address string `json:"address,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,20 @@
package client
const (
NodeConditionType = "nodeCondition"
NodeConditionFieldLastHeartbeatTime = "lastHeartbeatTime"
NodeConditionFieldLastTransitionTime = "lastTransitionTime"
NodeConditionFieldMessage = "message"
NodeConditionFieldReason = "reason"
NodeConditionFieldStatus = "status"
NodeConditionFieldType = "type"
)
type NodeCondition struct {
LastHeartbeatTime string `json:"lastHeartbeatTime,omitempty"`
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,14 @@
package client
const (
NodeConfigSourceType = "nodeConfigSource"
NodeConfigSourceFieldAPIVersion = "apiVersion"
NodeConfigSourceFieldConfigMapRef = "configMapRef"
NodeConfigSourceFieldKind = "kind"
)
type NodeConfigSource struct {
APIVersion string `json:"apiVersion,omitempty"`
ConfigMapRef *ObjectReference `json:"configMapRef,omitempty"`
Kind string `json:"kind,omitempty"`
}

View File

@ -0,0 +1,10 @@
package client
const (
NodeDaemonEndpointsType = "nodeDaemonEndpoints"
NodeDaemonEndpointsFieldKubeletEndpoint = "kubeletEndpoint"
)
type NodeDaemonEndpoints struct {
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty"`
}

View File

@ -0,0 +1,16 @@
package client
const (
NodeInfoType = "nodeInfo"
NodeInfoFieldCPU = "cpu"
NodeInfoFieldKubernetes = "kubernetes"
NodeInfoFieldMemory = "memory"
NodeInfoFieldOS = "os"
)
type NodeInfo struct {
CPU CPUInfo `json:"cpu,omitempty"`
Kubernetes KubernetesInfo `json:"kubernetes,omitempty"`
Memory MemoryInfo `json:"memory,omitempty"`
OS OSInfo `json:"os,omitempty"`
}

View File

@ -0,0 +1,20 @@
package client
const (
NodeSpecType = "nodeSpec"
NodeSpecFieldConfigSource = "configSource"
NodeSpecFieldExternalID = "externalID"
NodeSpecFieldPodCIDR = "podCIDR"
NodeSpecFieldProviderID = "providerID"
NodeSpecFieldTaints = "taints"
NodeSpecFieldUnschedulable = "unschedulable"
)
type NodeSpec struct {
ConfigSource *NodeConfigSource `json:"configSource,omitempty"`
ExternalID string `json:"externalID,omitempty"`
PodCIDR string `json:"podCIDR,omitempty"`
ProviderID string `json:"providerID,omitempty"`
Taints []Taint `json:"taints,omitempty"`
Unschedulable bool `json:"unschedulable,omitempty"`
}

View File

@ -0,0 +1,24 @@
package client
const (
NodeStatusType = "nodeStatus"
NodeStatusFieldAllocatable = "allocatable"
NodeStatusFieldCapacity = "capacity"
NodeStatusFieldHostname = "hostname"
NodeStatusFieldIPAddress = "ipAddress"
NodeStatusFieldInfo = "info"
NodeStatusFieldPhase = "phase"
NodeStatusFieldVolumesAttached = "volumesAttached"
NodeStatusFieldVolumesInUse = "volumesInUse"
)
type NodeStatus struct {
Allocatable map[string]string `json:"allocatable,omitempty"`
Capacity map[string]string `json:"capacity,omitempty"`
Hostname string `json:"hostname,omitempty"`
IPAddress string `json:"ipAddress,omitempty"`
Info NodeInfo `json:"info,omitempty"`
Phase string `json:"phase,omitempty"`
VolumesAttached map[string]AttachedVolume `json:"volumesAttached,omitempty"`
VolumesInUse []string `json:"volumesInUse,omitempty"`
}

View File

@ -0,0 +1,28 @@
package client
const (
NodeSystemInfoType = "nodeSystemInfo"
NodeSystemInfoFieldArchitecture = "architecture"
NodeSystemInfoFieldBootID = "bootID"
NodeSystemInfoFieldContainerRuntimeVersion = "containerRuntimeVersion"
NodeSystemInfoFieldKernelVersion = "kernelVersion"
NodeSystemInfoFieldKubeProxyVersion = "kubeProxyVersion"
NodeSystemInfoFieldKubeletVersion = "kubeletVersion"
NodeSystemInfoFieldMachineID = "machineID"
NodeSystemInfoFieldOSImage = "osImage"
NodeSystemInfoFieldOperatingSystem = "operatingSystem"
NodeSystemInfoFieldSystemUUID = "systemUUID"
)
type NodeSystemInfo struct {
Architecture string `json:"architecture,omitempty"`
BootID string `json:"bootID,omitempty"`
ContainerRuntimeVersion string `json:"containerRuntimeVersion,omitempty"`
KernelVersion string `json:"kernelVersion,omitempty"`
KubeProxyVersion string `json:"kubeProxyVersion,omitempty"`
KubeletVersion string `json:"kubeletVersion,omitempty"`
MachineID string `json:"machineID,omitempty"`
OSImage string `json:"osImage,omitempty"`
OperatingSystem string `json:"operatingSystem,omitempty"`
SystemUUID string `json:"systemUUID,omitempty"`
}

View File

@ -1,22 +1,22 @@
package client
const (
ObjectMetaType = "objectMeta"
ObjectMetaFieldAnnotations = "annotations"
ObjectMetaFieldCreated = "created"
ObjectMetaFieldLabels = "labels"
ObjectMetaFieldName = "name"
ObjectMetaFieldNamespace = "namespace"
ObjectMetaFieldRemoved = "removed"
ObjectMetaFieldUuid = "uuid"
ObjectMetaType = "objectMeta"
ObjectMetaFieldAnnotations = "annotations"
ObjectMetaFieldCreationTimestamp = "creationTimestamp"
ObjectMetaFieldDeletionTimestamp = "deletionTimestamp"
ObjectMetaFieldLabels = "labels"
ObjectMetaFieldName = "name"
ObjectMetaFieldNamespace = "namespace"
ObjectMetaFieldUID = "uid"
)
type ObjectMeta struct {
Annotations map[string]string `json:"annotations,omitempty"`
Created string `json:"created,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Removed string `json:"removed,omitempty"`
Uuid string `json:"uuid,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
UID string `json:"uid,omitempty"`
}

View File

@ -0,0 +1,22 @@
package client
const (
ObjectReferenceType = "objectReference"
ObjectReferenceFieldAPIVersion = "apiVersion"
ObjectReferenceFieldFieldPath = "fieldPath"
ObjectReferenceFieldKind = "kind"
ObjectReferenceFieldName = "name"
ObjectReferenceFieldNamespace = "namespace"
ObjectReferenceFieldResourceVersion = "resourceVersion"
ObjectReferenceFieldUID = "uid"
)
type ObjectReference struct {
APIVersion string `json:"apiVersion,omitempty"`
FieldPath string `json:"fieldPath,omitempty"`
Kind string `json:"kind,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty"`
UID string `json:"uid,omitempty"`
}

View File

@ -0,0 +1,14 @@
package client
const (
OSInfoType = "osInfo"
OSInfoFieldDockerVersion = "dockerVersion"
OSInfoFieldKernelVersion = "kernelVersion"
OSInfoFieldOperatingSystem = "operatingSystem"
)
type OSInfo struct {
DockerVersion string `json:"dockerVersion,omitempty"`
KernelVersion string `json:"kernelVersion,omitempty"`
OperatingSystem string `json:"operatingSystem,omitempty"`
}

View File

@ -0,0 +1,85 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
PersistentVolumeClaimType = "persistentVolumeClaim"
PersistentVolumeClaimFieldAPIVersion = "apiVersion"
PersistentVolumeClaimFieldKind = "kind"
PersistentVolumeClaimFieldObjectMeta = "objectMeta"
PersistentVolumeClaimFieldSpec = "spec"
PersistentVolumeClaimFieldStatus = "status"
)
type PersistentVolumeClaim struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Kind string `json:"kind,omitempty"`
ObjectMeta ObjectMeta `json:"objectMeta,omitempty"`
Spec PersistentVolumeClaimSpec `json:"spec,omitempty"`
Status PersistentVolumeClaimStatus `json:"status,omitempty"`
}
type PersistentVolumeClaimCollection struct {
types.Collection
Data []PersistentVolumeClaim `json:"data,omitempty"`
client *PersistentVolumeClaimClient
}
type PersistentVolumeClaimClient struct {
apiClient *Client
}
type PersistentVolumeClaimOperations interface {
List(opts *types.ListOpts) (*PersistentVolumeClaimCollection, error)
Create(opts *PersistentVolumeClaim) (*PersistentVolumeClaim, error)
Update(existing *PersistentVolumeClaim, updates interface{}) (*PersistentVolumeClaim, error)
ByID(id string) (*PersistentVolumeClaim, error)
Delete(container *PersistentVolumeClaim) error
}
func newPersistentVolumeClaimClient(apiClient *Client) *PersistentVolumeClaimClient {
return &PersistentVolumeClaimClient{
apiClient: apiClient,
}
}
func (c *PersistentVolumeClaimClient) Create(container *PersistentVolumeClaim) (*PersistentVolumeClaim, error) {
resp := &PersistentVolumeClaim{}
err := c.apiClient.Ops.DoCreate(PersistentVolumeClaimType, container, resp)
return resp, err
}
func (c *PersistentVolumeClaimClient) Update(existing *PersistentVolumeClaim, updates interface{}) (*PersistentVolumeClaim, error) {
resp := &PersistentVolumeClaim{}
err := c.apiClient.Ops.DoUpdate(PersistentVolumeClaimType, &existing.Resource, updates, resp)
return resp, err
}
func (c *PersistentVolumeClaimClient) List(opts *types.ListOpts) (*PersistentVolumeClaimCollection, error) {
resp := &PersistentVolumeClaimCollection{}
err := c.apiClient.Ops.DoList(PersistentVolumeClaimType, opts, resp)
resp.client = c
return resp, err
}
func (cc *PersistentVolumeClaimCollection) Next() (*PersistentVolumeClaimCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &PersistentVolumeClaimCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *PersistentVolumeClaimClient) ByID(id string) (*PersistentVolumeClaim, error) {
resp := &PersistentVolumeClaim{}
err := c.apiClient.Ops.DoByID(PersistentVolumeClaimType, id, resp)
return resp, err
}
func (c *PersistentVolumeClaimClient) Delete(container *PersistentVolumeClaim) error {
return c.apiClient.Ops.DoResourceDelete(PersistentVolumeClaimType, &container.Resource)
}

View File

@ -0,0 +1,20 @@
package client
const (
PersistentVolumeClaimConditionType = "persistentVolumeClaimCondition"
PersistentVolumeClaimConditionFieldLastProbeTime = "lastProbeTime"
PersistentVolumeClaimConditionFieldLastTransitionTime = "lastTransitionTime"
PersistentVolumeClaimConditionFieldMessage = "message"
PersistentVolumeClaimConditionFieldReason = "reason"
PersistentVolumeClaimConditionFieldStatus = "status"
PersistentVolumeClaimConditionFieldType = "type"
)
type PersistentVolumeClaimCondition struct {
LastProbeTime string `json:"lastProbeTime,omitempty"`
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,18 @@
package client
const (
PersistentVolumeClaimSpecType = "persistentVolumeClaimSpec"
PersistentVolumeClaimSpecFieldAccessModes = "accessModes"
PersistentVolumeClaimSpecFieldResources = "resources"
PersistentVolumeClaimSpecFieldSelector = "selector"
PersistentVolumeClaimSpecFieldStorageClassName = "storageClassName"
PersistentVolumeClaimSpecFieldVolumeName = "volumeName"
)
type PersistentVolumeClaimSpec struct {
AccessModes []string `json:"accessModes,omitempty"`
Resources ResourceRequirements `json:"resources,omitempty"`
Selector *LabelSelector `json:"selector,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
VolumeName string `json:"volumeName,omitempty"`
}

View File

@ -0,0 +1,16 @@
package client
const (
PersistentVolumeClaimStatusType = "persistentVolumeClaimStatus"
PersistentVolumeClaimStatusFieldAccessModes = "accessModes"
PersistentVolumeClaimStatusFieldCapacity = "capacity"
PersistentVolumeClaimStatusFieldConditions = "conditions"
PersistentVolumeClaimStatusFieldPhase = "phase"
)
type PersistentVolumeClaimStatus struct {
AccessModes []string `json:"accessModes,omitempty"`
Capacity map[string]string `json:"capacity,omitempty"`
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty"`
Phase string `json:"phase,omitempty"`
}

View File

@ -11,14 +11,15 @@ const (
PodFieldAnnotations = "annotations"
PodFieldAutomountServiceAccountToken = "automountServiceAccountToken"
PodFieldContainers = "containers"
PodFieldCreated = "created"
PodFieldCreationTimestamp = "creationTimestamp"
PodFieldDNSPolicy = "dnsPolicy"
PodFieldDeletionTimestamp = "deletionTimestamp"
PodFieldDeprecatedServiceAccount = "deprecatedServiceAccount"
PodFieldFsgid = "fsgid"
PodFieldGids = "gids"
PodFieldFSGroup = "fsGroup"
PodFieldHostAliases = "hostAliases"
PodFieldHostname = "hostname"
PodFieldIPC = "ipc"
PodFieldImagePullSecrets = "imagePullSecrets"
PodFieldKind = "kind"
PodFieldLabels = "labels"
PodFieldName = "name"
@ -28,17 +29,16 @@ const (
PodFieldPID = "pid"
PodFieldPriority = "priority"
PodFieldPriorityClassName = "priorityClassName"
PodFieldPullSecrets = "pullSecrets"
PodFieldRemoved = "removed"
PodFieldRestart = "restart"
PodFieldRestartPolicy = "restartPolicy"
PodFieldRunAsNonRoot = "runAsNonRoot"
PodFieldRunAsUser = "runAsUser"
PodFieldSchedulerName = "schedulerName"
PodFieldServiceAccountName = "serviceAccountName"
PodFieldSubdomain = "subdomain"
PodFieldSupplementalGroups = "supplementalGroups"
PodFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds"
PodFieldTolerations = "tolerations"
PodFieldUid = "uid"
PodFieldUuid = "uuid"
PodFieldUID = "uid"
PodFieldVolumes = "volumes"
)
@ -49,14 +49,15 @@ type Pod struct {
Annotations map[string]string `json:"annotations,omitempty"`
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
Containers map[string]Container `json:"containers,omitempty"`
Created string `json:"created,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DNSPolicy string `json:"dnsPolicy,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
DeprecatedServiceAccount string `json:"deprecatedServiceAccount,omitempty"`
Fsgid *int64 `json:"fsgid,omitempty"`
Gids []int64 `json:"gids,omitempty"`
FSGroup *int64 `json:"fsGroup,omitempty"`
HostAliases map[string]HostAlias `json:"hostAliases,omitempty"`
Hostname string `json:"hostname,omitempty"`
IPC string `json:"ipc,omitempty"`
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
@ -66,18 +67,17 @@ type Pod struct {
PID string `json:"pid,omitempty"`
Priority *int64 `json:"priority,omitempty"`
PriorityClassName string `json:"priorityClassName,omitempty"`
PullSecrets []LocalObjectReference `json:"pullSecrets,omitempty"`
Removed string `json:"removed,omitempty"`
Restart string `json:"restart,omitempty"`
RestartPolicy string `json:"restartPolicy,omitempty"`
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
RunAsUser *int64 `json:"runAsUser,omitempty"`
SchedulerName string `json:"schedulerName,omitempty"`
ServiceAccountName string `json:"serviceAccountName,omitempty"`
Subdomain string `json:"subdomain,omitempty"`
SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
Tolerations []Toleration `json:"tolerations,omitempty"`
Uid *int64 `json:"uid,omitempty"`
Uuid string `json:"uuid,omitempty"`
Volumes []Volume `json:"volumes,omitempty"`
UID string `json:"uid,omitempty"`
Volumes map[string]Volume `json:"volumes,omitempty"`
}
type PodCollection struct {
types.Collection

View File

@ -1,16 +1,16 @@
package client
const (
PodSecurityContextType = "podSecurityContext"
PodSecurityContextFieldFsgid = "fsgid"
PodSecurityContextFieldGids = "gids"
PodSecurityContextFieldRunAsNonRoot = "runAsNonRoot"
PodSecurityContextFieldUid = "uid"
PodSecurityContextType = "podSecurityContext"
PodSecurityContextFieldFSGroup = "fsGroup"
PodSecurityContextFieldRunAsNonRoot = "runAsNonRoot"
PodSecurityContextFieldRunAsUser = "runAsUser"
PodSecurityContextFieldSupplementalGroups = "supplementalGroups"
)
type PodSecurityContext struct {
Fsgid *int64 `json:"fsgid,omitempty"`
Gids []int64 `json:"gids,omitempty"`
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
Uid *int64 `json:"uid,omitempty"`
FSGroup *int64 `json:"fsGroup,omitempty"`
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
RunAsUser *int64 `json:"runAsUser,omitempty"`
SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
}

View File

@ -7,25 +7,25 @@ const (
PodSpecFieldContainers = "containers"
PodSpecFieldDNSPolicy = "dnsPolicy"
PodSpecFieldDeprecatedServiceAccount = "deprecatedServiceAccount"
PodSpecFieldFsgid = "fsgid"
PodSpecFieldGids = "gids"
PodSpecFieldFSGroup = "fsGroup"
PodSpecFieldHostAliases = "hostAliases"
PodSpecFieldHostname = "hostname"
PodSpecFieldIPC = "ipc"
PodSpecFieldImagePullSecrets = "imagePullSecrets"
PodSpecFieldNet = "net"
PodSpecFieldNodeName = "nodeName"
PodSpecFieldPID = "pid"
PodSpecFieldPriority = "priority"
PodSpecFieldPriorityClassName = "priorityClassName"
PodSpecFieldPullSecrets = "pullSecrets"
PodSpecFieldRestart = "restart"
PodSpecFieldRestartPolicy = "restartPolicy"
PodSpecFieldRunAsNonRoot = "runAsNonRoot"
PodSpecFieldRunAsUser = "runAsUser"
PodSpecFieldSchedulerName = "schedulerName"
PodSpecFieldServiceAccountName = "serviceAccountName"
PodSpecFieldSubdomain = "subdomain"
PodSpecFieldSupplementalGroups = "supplementalGroups"
PodSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds"
PodSpecFieldTolerations = "tolerations"
PodSpecFieldUid = "uid"
PodSpecFieldVolumes = "volumes"
)
@ -35,24 +35,24 @@ type PodSpec struct {
Containers map[string]Container `json:"containers,omitempty"`
DNSPolicy string `json:"dnsPolicy,omitempty"`
DeprecatedServiceAccount string `json:"deprecatedServiceAccount,omitempty"`
Fsgid *int64 `json:"fsgid,omitempty"`
Gids []int64 `json:"gids,omitempty"`
FSGroup *int64 `json:"fsGroup,omitempty"`
HostAliases map[string]HostAlias `json:"hostAliases,omitempty"`
Hostname string `json:"hostname,omitempty"`
IPC string `json:"ipc,omitempty"`
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"`
Net string `json:"net,omitempty"`
NodeName string `json:"nodeName,omitempty"`
PID string `json:"pid,omitempty"`
Priority *int64 `json:"priority,omitempty"`
PriorityClassName string `json:"priorityClassName,omitempty"`
PullSecrets []LocalObjectReference `json:"pullSecrets,omitempty"`
Restart string `json:"restart,omitempty"`
RestartPolicy string `json:"restartPolicy,omitempty"`
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
RunAsUser *int64 `json:"runAsUser,omitempty"`
SchedulerName string `json:"schedulerName,omitempty"`
ServiceAccountName string `json:"serviceAccountName,omitempty"`
Subdomain string `json:"subdomain,omitempty"`
SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
Tolerations []Toleration `json:"tolerations,omitempty"`
Uid *int64 `json:"uid,omitempty"`
Volumes []Volume `json:"volumes,omitempty"`
Volumes map[string]Volume `json:"volumes,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
PodTemplateSpecType = "podTemplateSpec"
PodTemplateSpecFieldObjectMeta = "objectMeta"
PodTemplateSpecFieldSpec = "spec"
)
type PodTemplateSpec struct {
ObjectMeta ObjectMeta `json:"objectMeta,omitempty"`
Spec PodSpec `json:"spec,omitempty"`
}

View File

@ -0,0 +1,99 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
ReplicaSetType = "replicaSet"
ReplicaSetFieldAPIVersion = "apiVersion"
ReplicaSetFieldAnnotations = "annotations"
ReplicaSetFieldCreationTimestamp = "creationTimestamp"
ReplicaSetFieldDeletionTimestamp = "deletionTimestamp"
ReplicaSetFieldDeploy = "deploy"
ReplicaSetFieldKind = "kind"
ReplicaSetFieldLabels = "labels"
ReplicaSetFieldName = "name"
ReplicaSetFieldNamespace = "namespace"
ReplicaSetFieldStatus = "status"
ReplicaSetFieldTemplate = "template"
ReplicaSetFieldUID = "uid"
)
type ReplicaSet struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
Deploy *DeployParams `json:"deploy,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Status ReplicaSetStatus `json:"status,omitempty"`
Template PodTemplateSpec `json:"template,omitempty"`
UID string `json:"uid,omitempty"`
}
type ReplicaSetCollection struct {
types.Collection
Data []ReplicaSet `json:"data,omitempty"`
client *ReplicaSetClient
}
type ReplicaSetClient struct {
apiClient *Client
}
type ReplicaSetOperations interface {
List(opts *types.ListOpts) (*ReplicaSetCollection, error)
Create(opts *ReplicaSet) (*ReplicaSet, error)
Update(existing *ReplicaSet, updates interface{}) (*ReplicaSet, error)
ByID(id string) (*ReplicaSet, error)
Delete(container *ReplicaSet) error
}
func newReplicaSetClient(apiClient *Client) *ReplicaSetClient {
return &ReplicaSetClient{
apiClient: apiClient,
}
}
func (c *ReplicaSetClient) Create(container *ReplicaSet) (*ReplicaSet, error) {
resp := &ReplicaSet{}
err := c.apiClient.Ops.DoCreate(ReplicaSetType, container, resp)
return resp, err
}
func (c *ReplicaSetClient) Update(existing *ReplicaSet, updates interface{}) (*ReplicaSet, error) {
resp := &ReplicaSet{}
err := c.apiClient.Ops.DoUpdate(ReplicaSetType, &existing.Resource, updates, resp)
return resp, err
}
func (c *ReplicaSetClient) List(opts *types.ListOpts) (*ReplicaSetCollection, error) {
resp := &ReplicaSetCollection{}
err := c.apiClient.Ops.DoList(ReplicaSetType, opts, resp)
resp.client = c
return resp, err
}
func (cc *ReplicaSetCollection) Next() (*ReplicaSetCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &ReplicaSetCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *ReplicaSetClient) ByID(id string) (*ReplicaSet, error) {
resp := &ReplicaSet{}
err := c.apiClient.Ops.DoByID(ReplicaSetType, id, resp)
return resp, err
}
func (c *ReplicaSetClient) Delete(container *ReplicaSet) error {
return c.apiClient.Ops.DoResourceDelete(ReplicaSetType, &container.Resource)
}

View File

@ -0,0 +1,18 @@
package client
const (
ReplicaSetConditionType = "replicaSetCondition"
ReplicaSetConditionFieldLastTransitionTime = "lastTransitionTime"
ReplicaSetConditionFieldMessage = "message"
ReplicaSetConditionFieldReason = "reason"
ReplicaSetConditionFieldStatus = "status"
ReplicaSetConditionFieldType = "type"
)
type ReplicaSetCondition struct {
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
ReplicaSetSpecType = "replicaSetSpec"
ReplicaSetSpecFieldDeploy = "deploy"
ReplicaSetSpecFieldTemplate = "template"
)
type ReplicaSetSpec struct {
Deploy *DeployParams `json:"deploy,omitempty"`
Template PodTemplateSpec `json:"template,omitempty"`
}

View File

@ -0,0 +1,20 @@
package client
const (
ReplicaSetStatusType = "replicaSetStatus"
ReplicaSetStatusFieldAvailableReplicas = "availableReplicas"
ReplicaSetStatusFieldConditions = "conditions"
ReplicaSetStatusFieldFullyLabeledReplicas = "fullyLabeledReplicas"
ReplicaSetStatusFieldObservedGeneration = "observedGeneration"
ReplicaSetStatusFieldReadyReplicas = "readyReplicas"
ReplicaSetStatusFieldReplicas = "replicas"
)
type ReplicaSetStatus struct {
AvailableReplicas int64 `json:"availableReplicas,omitempty"`
Conditions []ReplicaSetCondition `json:"conditions,omitempty"`
FullyLabeledReplicas int64 `json:"fullyLabeledReplicas,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
ReadyReplicas int64 `json:"readyReplicas,omitempty"`
Replicas int64 `json:"replicas,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
RollingUpdateDeploymentType = "rollingUpdateDeployment"
RollingUpdateDeploymentFieldMaxSurge = "maxSurge"
RollingUpdateDeploymentFieldMaxUnavailable = "maxUnavailable"
)
type RollingUpdateDeployment struct {
MaxSurge string `json:"maxSurge,omitempty"`
MaxUnavailable string `json:"maxUnavailable,omitempty"`
}

View File

@ -0,0 +1,10 @@
package client
const (
RollingUpdateStatefulSetStrategyType = "rollingUpdateStatefulSetStrategy"
RollingUpdateStatefulSetStrategyFieldPartition = "partition"
)
type RollingUpdateStatefulSetStrategy struct {
Partition *int64 `json:"partition,omitempty"`
}

View File

@ -2,21 +2,21 @@ package client
const (
SecurityContextType = "securityContext"
SecurityContextFieldAdd = "add"
SecurityContextFieldAllowPrivilegeEscalation = "allowPrivilegeEscalation"
SecurityContextFieldCapAdd = "capAdd"
SecurityContextFieldCapDrop = "capDrop"
SecurityContextFieldDrop = "drop"
SecurityContextFieldPrivileged = "privileged"
SecurityContextFieldReadOnly = "readOnly"
SecurityContextFieldReadOnlyRootFilesystem = "readOnlyRootFilesystem"
SecurityContextFieldRunAsNonRoot = "runAsNonRoot"
SecurityContextFieldUid = "uid"
SecurityContextFieldRunAsUser = "runAsUser"
)
type SecurityContext struct {
Add []string `json:"add,omitempty"`
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"`
CapAdd []string `json:"capAdd,omitempty"`
CapDrop []string `json:"capDrop,omitempty"`
Drop []string `json:"drop,omitempty"`
Privileged *bool `json:"privileged,omitempty"`
ReadOnly *bool `json:"readOnly,omitempty"`
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"`
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
Uid *int64 `json:"uid,omitempty"`
RunAsUser *int64 `json:"runAsUser,omitempty"`
}

View File

@ -0,0 +1,105 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
StatefulSetType = "statefulSet"
StatefulSetFieldAPIVersion = "apiVersion"
StatefulSetFieldAnnotations = "annotations"
StatefulSetFieldCreationTimestamp = "creationTimestamp"
StatefulSetFieldDeletionTimestamp = "deletionTimestamp"
StatefulSetFieldDeploy = "deploy"
StatefulSetFieldKind = "kind"
StatefulSetFieldLabels = "labels"
StatefulSetFieldName = "name"
StatefulSetFieldNamespace = "namespace"
StatefulSetFieldServiceName = "serviceName"
StatefulSetFieldStatus = "status"
StatefulSetFieldTemplate = "template"
StatefulSetFieldUID = "uid"
StatefulSetFieldUpdateStrategy = "updateStrategy"
StatefulSetFieldVolumeClaimTemplates = "volumeClaimTemplates"
)
type StatefulSet struct {
types.Resource
APIVersion string `json:"apiVersion,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty"`
DeletionTimestamp string `json:"deletionTimestamp,omitempty"`
Deploy *DeployParams `json:"deploy,omitempty"`
Kind string `json:"kind,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
ServiceName string `json:"serviceName,omitempty"`
Status StatefulSetStatus `json:"status,omitempty"`
Template PodTemplateSpec `json:"template,omitempty"`
UID string `json:"uid,omitempty"`
UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"`
VolumeClaimTemplates map[string]PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"`
}
type StatefulSetCollection struct {
types.Collection
Data []StatefulSet `json:"data,omitempty"`
client *StatefulSetClient
}
type StatefulSetClient struct {
apiClient *Client
}
type StatefulSetOperations interface {
List(opts *types.ListOpts) (*StatefulSetCollection, error)
Create(opts *StatefulSet) (*StatefulSet, error)
Update(existing *StatefulSet, updates interface{}) (*StatefulSet, error)
ByID(id string) (*StatefulSet, error)
Delete(container *StatefulSet) error
}
func newStatefulSetClient(apiClient *Client) *StatefulSetClient {
return &StatefulSetClient{
apiClient: apiClient,
}
}
func (c *StatefulSetClient) Create(container *StatefulSet) (*StatefulSet, error) {
resp := &StatefulSet{}
err := c.apiClient.Ops.DoCreate(StatefulSetType, container, resp)
return resp, err
}
func (c *StatefulSetClient) Update(existing *StatefulSet, updates interface{}) (*StatefulSet, error) {
resp := &StatefulSet{}
err := c.apiClient.Ops.DoUpdate(StatefulSetType, &existing.Resource, updates, resp)
return resp, err
}
func (c *StatefulSetClient) List(opts *types.ListOpts) (*StatefulSetCollection, error) {
resp := &StatefulSetCollection{}
err := c.apiClient.Ops.DoList(StatefulSetType, opts, resp)
resp.client = c
return resp, err
}
func (cc *StatefulSetCollection) Next() (*StatefulSetCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &StatefulSetCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *StatefulSetClient) ByID(id string) (*StatefulSet, error) {
resp := &StatefulSet{}
err := c.apiClient.Ops.DoByID(StatefulSetType, id, resp)
return resp, err
}
func (c *StatefulSetClient) Delete(container *StatefulSet) error {
return c.apiClient.Ops.DoResourceDelete(StatefulSetType, &container.Resource)
}

View File

@ -0,0 +1,18 @@
package client
const (
StatefulSetSpecType = "statefulSetSpec"
StatefulSetSpecFieldDeploy = "deploy"
StatefulSetSpecFieldServiceName = "serviceName"
StatefulSetSpecFieldTemplate = "template"
StatefulSetSpecFieldUpdateStrategy = "updateStrategy"
StatefulSetSpecFieldVolumeClaimTemplates = "volumeClaimTemplates"
)
type StatefulSetSpec struct {
Deploy *DeployParams `json:"deploy,omitempty"`
ServiceName string `json:"serviceName,omitempty"`
Template PodTemplateSpec `json:"template,omitempty"`
UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"`
VolumeClaimTemplates map[string]PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"`
}

View File

@ -0,0 +1,24 @@
package client
const (
StatefulSetStatusType = "statefulSetStatus"
StatefulSetStatusFieldCollisionCount = "collisionCount"
StatefulSetStatusFieldCurrentReplicas = "currentReplicas"
StatefulSetStatusFieldCurrentRevision = "currentRevision"
StatefulSetStatusFieldObservedGeneration = "observedGeneration"
StatefulSetStatusFieldReadyReplicas = "readyReplicas"
StatefulSetStatusFieldReplicas = "replicas"
StatefulSetStatusFieldUpdateRevision = "updateRevision"
StatefulSetStatusFieldUpdatedReplicas = "updatedReplicas"
)
type StatefulSetStatus struct {
CollisionCount *int64 `json:"collisionCount,omitempty"`
CurrentReplicas int64 `json:"currentReplicas,omitempty"`
CurrentRevision string `json:"currentRevision,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
ReadyReplicas int64 `json:"readyReplicas,omitempty"`
Replicas int64 `json:"replicas,omitempty"`
UpdateRevision string `json:"updateRevision,omitempty"`
UpdatedReplicas int64 `json:"updatedReplicas,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
StatefulSetUpdateStrategyType = "statefulSetUpdateStrategy"
StatefulSetUpdateStrategyFieldRollingUpdate = "rollingUpdate"
StatefulSetUpdateStrategyFieldType = "type"
)
type StatefulSetUpdateStrategy struct {
RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,16 @@
package client
const (
TaintType = "taint"
TaintFieldEffect = "effect"
TaintFieldKey = "key"
TaintFieldTimeAdded = "timeAdded"
TaintFieldValue = "value"
)
type Taint struct {
Effect string `json:"effect,omitempty"`
Key string `json:"key,omitempty"`
TimeAdded string `json:"timeAdded,omitempty"`
Value string `json:"value,omitempty"`
}

View File

@ -3,4 +3,4 @@ github.com/rancher/types
k8s.io/kubernetes v1.8.3 transitive=true,staging=true
bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git
github.com/rancher/norman cad01ba487d6c071911c619babc45ae80c252229
github.com/rancher/norman 80024df69414f7cce0847ea72b0557f14edbc852

View File

@ -35,6 +35,13 @@ func NewObjectClient(namespace string, restClient rest.Interface, apiResource *m
}
}
func (p *ObjectClient) getAPIPrefix() string {
if p.gvk.Group == "" {
return "api"
}
return "apis"
}
func (p *ObjectClient) Create(o runtime.Object) (runtime.Object, error) {
ns := p.ns
if obj, ok := o.(metav1.Object); ok && obj.GetNamespace() != "" {
@ -42,7 +49,7 @@ func (p *ObjectClient) Create(o runtime.Object) (runtime.Object, error) {
}
result := p.Factory.Object()
err := p.restClient.Post().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
NamespaceIfScoped(ns, p.resource.Namespaced).
Resource(p.resource.Name).
Body(o).
@ -54,7 +61,7 @@ func (p *ObjectClient) Create(o runtime.Object) (runtime.Object, error) {
func (p *ObjectClient) Get(name string, opts metav1.GetOptions) (runtime.Object, error) {
result := p.Factory.Object()
err := p.restClient.Get().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
NamespaceIfScoped(p.ns, p.resource.Namespaced).
Resource(p.resource.Name).
VersionedParams(&opts, dynamic.VersionedParameterEncoderWithV1Fallback).
@ -74,7 +81,7 @@ func (p *ObjectClient) Update(name string, o runtime.Object) (runtime.Object, er
return result, errors.New("object missing name")
}
err := p.restClient.Put().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
NamespaceIfScoped(ns, p.resource.Namespaced).
Resource(p.resource.Name).
Name(name).
@ -86,7 +93,7 @@ func (p *ObjectClient) Update(name string, o runtime.Object) (runtime.Object, er
func (p *ObjectClient) Delete(name string, opts *metav1.DeleteOptions) error {
return p.restClient.Delete().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
NamespaceIfScoped(p.ns, p.resource.Namespaced).
Resource(p.resource.Name).
Name(name).
@ -98,7 +105,7 @@ func (p *ObjectClient) Delete(name string, opts *metav1.DeleteOptions) error {
func (p *ObjectClient) List(opts metav1.ListOptions) (runtime.Object, error) {
result := p.Factory.List()
return result, p.restClient.Get().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
NamespaceIfScoped(p.ns, p.resource.Namespaced).
Resource(p.resource.Name).
VersionedParams(&opts, dynamic.VersionedParameterEncoderWithV1Fallback).
@ -108,7 +115,7 @@ func (p *ObjectClient) List(opts metav1.ListOptions) (runtime.Object, error) {
func (p *ObjectClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
r, err := p.restClient.Get().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
Prefix("watch").
Namespace(p.ns).
NamespaceIfScoped(p.ns, p.resource.Namespaced).
@ -127,7 +134,7 @@ func (p *ObjectClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
func (p *ObjectClient) DeleteCollection(deleteOptions *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
return p.restClient.Delete().
Prefix("apis", p.gvk.Group, p.gvk.Version).
Prefix(p.getAPIPrefix(), p.gvk.Group, p.gvk.Version).
NamespaceIfScoped(p.ns, p.resource.Namespaced).
Resource(p.resource.Name).
VersionedParams(&listOptions, dynamic.VersionedParameterEncoderWithV1Fallback).

View File

@ -55,6 +55,10 @@ func getTypeString(nullable bool, typeName string, schema *types.Schema, schemas
name = "float64"
case "int":
name = "int64"
case "multiline":
return "string"
case "masked":
return "string"
case "password":
return "string"
case "date":

View File

@ -8,6 +8,7 @@ import (
type Embed struct {
Field string
ReadOnly bool
ignoreOverride bool
embeddedFields []string
}
@ -56,6 +57,10 @@ func (e *Embed) ModifySchema(schema *types.Schema, schemas *types.Schemas) error
e.Field, schema.ID, name)
}
}
if e.ReadOnly {
field.Create = false
field.Update = false
}
schema.ResourceFields[name] = field
e.embeddedFields = append(e.embeddedFields, name)
}

View File

@ -3,6 +3,8 @@ package mapper
import (
"fmt"
"strings"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
)
@ -12,39 +14,69 @@ type Move struct {
}
func (m Move) FromInternal(data map[string]interface{}) {
if v, ok := data[m.From]; ok {
delete(data, m.From)
data[m.To] = v
if v, ok := RemoveValue(data, strings.Split(m.From, "/")...); ok {
PutValue(data, v, strings.Split(m.To, "/")...)
}
}
func (m Move) ToInternal(data map[string]interface{}) {
if v, ok := data[m.To]; ok {
delete(data, m.To)
data[m.From] = v
if v, ok := RemoveValue(data, strings.Split(m.To, "/")...); ok {
PutValue(data, v, strings.Split(m.From, "/")...)
}
}
func (m Move) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
internalSchema, err := getInternal(schema)
func (m Move) ModifySchema(s *types.Schema, schemas *types.Schemas) error {
internalSchema, err := getInternal(s)
if err != nil {
return err
}
field, ok := internalSchema.ResourceFields[m.From]
_, _, fromInternalField, ok, err := getField(internalSchema, schemas, m.From)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("missing field %s on internal schema %s", m.From, internalSchema.ID)
}
_, ok = schema.ResourceFields[m.To]
if ok {
return fmt.Errorf("field %s already exists on schema %s", m.From, internalSchema.ID)
fromSchema, _, _, _, err := getField(s, schemas, m.From)
if err != nil {
return err
}
delete(schema.ResourceFields, m.From)
toSchema, toFieldName, toField, ok, err := getField(s, schemas, m.To)
if err != nil {
return err
}
_, ok = toSchema.ResourceFields[toFieldName]
if ok && !strings.Contains(m.To, "/") {
return fmt.Errorf("field %s already exists on schema %s", m.To, s.ID)
}
field.CodeName = convert.Capitalize(m.To)
schema.ResourceFields[m.To] = field
delete(fromSchema.ResourceFields, m.From)
toField.CodeName = convert.Capitalize(toFieldName)
toSchema.ResourceFields[toFieldName] = fromInternalField
return nil
}
func getField(schema *types.Schema, schemas *types.Schemas, target string) (*types.Schema, string, types.Field, bool, error) {
parts := strings.Split(target, "/")
for i, part := range parts {
if i == len(parts)-1 {
continue
}
subSchema := schemas.Schema(&schema.Version, schema.ResourceFields[part].Type)
if subSchema == nil {
return nil, "", types.Field{}, false, fmt.Errorf("failed to find field or schema for %s on %s", part, schema.ID)
}
schema = subSchema
}
name := parts[len(parts)-1]
f, ok := schema.ResourceFields[name]
return schema, name, f, ok, nil
}

View File

@ -6,14 +6,14 @@ type Object struct {
types.TypeMapper
}
func NewObject(mappers []types.Mapper) *Object {
func NewObject(mappers ...types.Mapper) *Object {
return &Object{
TypeMapper: types.TypeMapper{
Mappers: append(mappers,
&Drop{"status"},
Mappers: append([]types.Mapper{
&Embed{Field: "metadata"},
&Embed{Field: "spec"},
),
&ReadOnly{"status"},
}, mappers...),
},
}
}

View File

@ -0,0 +1,30 @@
package mapper
import (
"fmt"
"github.com/rancher/norman/types"
)
type ReadOnly struct {
Field string
}
func (r *ReadOnly) FromInternal(data map[string]interface{}) {
}
func (r *ReadOnly) ToInternal(data map[string]interface{}) {
}
func (r *ReadOnly) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
field, ok := schema.ResourceFields[r.Field]
if !ok {
return fmt.Errorf("failed to find field %s on schema %s", r.Field, schema.ID)
}
field.Create = false
field.Update = false
schema.ResourceFields[r.Field] = field
return nil
}

View File

@ -24,7 +24,9 @@ func (s SliceToMap) FromInternal(data map[string]interface{}) {
}
}
data[s.Field] = result
if len(result) > 0 {
data[s.Field] = result
}
}
func (s SliceToMap) ToInternal(data map[string]interface{}) {
@ -39,7 +41,9 @@ func (s SliceToMap) ToInternal(data map[string]interface{}) {
}
}
data[s.Field] = result
if len(result) > 0 {
data[s.Field] = result
}
}
func (s SliceToMap) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {

View File

@ -13,6 +13,7 @@ import (
var (
resourceType = reflect.TypeOf(Resource{})
typeType = reflect.TypeOf(metav1.TypeMeta{})
metaType = reflect.TypeOf(metav1.ObjectMeta{})
blacklistNames = map[string]bool{
"links": true,
@ -114,6 +115,9 @@ func (s *Schemas) readFields(schema *Schema, t reflect.Type) error {
schema.ResourceMethods = []string{"GET", "PUT", "DELETE"}
}
hasType := false
hasMeta := false
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
@ -128,6 +132,14 @@ func (s *Schemas) readFields(schema *Schema, t reflect.Type) error {
continue
}
if field.Anonymous && jsonName == "" && field.Type == typeType {
hasType = true
}
if field.Anonymous && jsonName == "metadata" && field.Type == metaType {
hasMeta = true
}
if field.Anonymous && jsonName == "" {
t := field.Type
if t.Kind() == reflect.Ptr {
@ -177,15 +189,15 @@ func (s *Schemas) readFields(schema *Schema, t reflect.Type) error {
schemaField.Type = inferedType
}
if field.Type == metaType {
schema.CollectionMethods = []string{"GET", "POST"}
schema.ResourceMethods = []string{"GET", "PUT", "DELETE"}
}
logrus.Debugf("Setting field %s.%s: %#v", schema.ID, fieldName, schemaField)
schema.ResourceFields[fieldName] = schemaField
}
if hasType && hasMeta {
schema.CollectionMethods = []string{"GET", "POST"}
schema.ResourceMethods = []string{"GET", "PUT", "DELETE"}
}
return nil
}

View File

@ -13,6 +13,8 @@ type SchemaCollection struct {
Data []Schema
}
type SchemaInitFunc func(*Schemas) *Schemas
type Schemas struct {
schemasByPath map[string]map[string]*Schema
mappers map[string]map[string]Mapper
@ -28,6 +30,10 @@ func NewSchemas() *Schemas {
}
}
func (s *Schemas) Init(initFunc SchemaInitFunc) *Schemas {
return initFunc(s)
}
func (s *Schemas) Err() error {
return NewErrors(s.errors)
}

1
vendor/k8s.io/kubernetes/.bazelrc generated vendored Symbolic link
View File

@ -0,0 +1 @@
build/root/.bazelrc

30
vendor/k8s.io/kubernetes/.generated_files generated vendored Normal file
View File

@ -0,0 +1,30 @@
# Files that should be ignored by tools which do not want to consider generated
# code.
#
# https://github.com/kubernetes/contrib/blob/master/mungegithub/mungers/size.go
#
# This file is a series of lines, each of the form:
# <type> <name>
#
# Type can be:
# path - an exact path to a single file
# file-name - an exact leaf filename, regardless of path
# path-prefix - a prefix match on the file path
# file-prefix - a prefix match of the leaf filename (no path)
# paths-from-repo - read a file from the repo and load file paths
#
file-prefix zz_generated.
file-name BUILD
file-name types.generated.go
file-name generated.pb.go
file-name generated.proto
file-name types_swagger_doc_generated.go
path-prefix Godeps/
path-prefix vendor/
path-prefix api/swagger-spec/
path-prefix pkg/generated/
paths-from-repo docs/.generated_docs

10
vendor/k8s.io/kubernetes/.gitattributes generated vendored Normal file
View File

@ -0,0 +1,10 @@
hack/verify-flags/known-flags.txt merge=union
test/test_owners.csv merge=union
**/zz_generated.*.go -diff
**/types.generated.go -diff
**/generated.pb.go -diff
**/generated.proto -diff
**/types_swagger_doc_generated.go -diff
docs/api-reference/** -diff
federation/docs/api-reference/** -diff

127
vendor/k8s.io/kubernetes/.gitignore generated vendored Normal file
View File

@ -0,0 +1,127 @@
# OSX leaves these everywhere on SMB shares
._*
# OSX trash
.DS_Store
# Eclipse files
.classpath
.project
.settings/**
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
.idea/
*.iml
# Vscode files
.vscode
# This is where the result of the go build goes
/output*/
/_output*/
/_output
# Emacs save files
*~
\#*\#
.\#*
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# cscope-related files
cscope.*
# Go test binaries
*.test
/hack/.test-cmd-auth
# JUnit test output from ginkgo e2e tests
/junit*.xml
# Mercurial files
**/.hg
**/.hg*
# Vagrant
.vagrant
network_closure.sh
# Local cluster env variables
/cluster/env.sh
# Compiled binaries in third_party
/third_party/pkg
# Also ignore etcd installed by hack/install-etcd.sh
/third_party/etcd*
/default.etcd
# User cluster configs
.kubeconfig
.tags*
# Version file for dockerized build
.dockerized-kube-version-defs
# Web UI
/www/master/node_modules/
/www/master/npm-debug.log
/www/master/shared/config/development.json
# Karma output
/www/test_out
# precommit temporary directories created by ./hack/verify-generated-docs.sh and ./hack/lib/util.sh
/_tmp/
/doc_tmp/
# Test artifacts produced by Jenkins jobs
/_artifacts/
# Go dependencies installed on Jenkins
/_gopath/
# Config directories created by gcloud and gsutil on Jenkins
/.config/gcloud*/
/.gsutil/
# CoreOS stuff
/cluster/libvirt-coreos/coreos_*.img
# Juju Stuff
/cluster/juju/charms/*
/cluster/juju/bundles/local.yaml
# Downloaded Kubernetes binary release
/kubernetes/
# direnv .envrc files
.envrc
# Downloaded kubernetes binary release tar ball
kubernetes.tar.gz
# generated files in any directory
# TODO(thockin): uncomment this when we stop committing the generated files.
#zz_generated.*
zz_generated.openapi.go
# make-related metadata
/.make/
# Just in time generated data in the source, should never be commited
/test/e2e/generated/bindata.go
# This file used by some vendor repos (e.g. github.com/go-openapi/...) to store secret variables and should not be ignored
!\.drone\.sec
# Godeps workspace
/Godeps/_workspace
/bazel-*
*.pyc

1
vendor/k8s.io/kubernetes/.kazelcfg.json generated vendored Symbolic link
View File

@ -0,0 +1 @@
build/root/.kazelcfg.json

1
vendor/k8s.io/kubernetes/BUILD.bazel generated vendored Symbolic link
View File

@ -0,0 +1 @@
build/root/BUILD.root

2112
vendor/k8s.io/kubernetes/CHANGELOG-1.8.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

9
vendor/k8s.io/kubernetes/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# Contributing
Information about contributing to the
[kubernetes code repo](README.md) lives in the
[kubernetes community repo](https://github.com/kubernetes/community)
(it's a big topic).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CONTRIBUTING.md?pixel)]()

202
vendor/k8s.io/kubernetes/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
vendor/k8s.io/kubernetes/Makefile generated vendored Symbolic link
View File

@ -0,0 +1 @@
build/root/Makefile

1
vendor/k8s.io/kubernetes/Makefile.generated_files generated vendored Symbolic link
View File

@ -0,0 +1 @@
build/root/Makefile.generated_files

17
vendor/k8s.io/kubernetes/OWNERS generated vendored Normal file
View File

@ -0,0 +1,17 @@
reviewers:
- brendandburns
- dchen1107
- jbeda
- lavalamp
- smarterclayton
- thockin
approvers:
- bgrant0607
- brendandburns
- dchen1107
- jbeda
- monopole # To move code per kubernetes/community#598
- lavalamp
- smarterclayton
- thockin
- wojtek-t

177
vendor/k8s.io/kubernetes/OWNERS_ALIASES generated vendored Normal file
View File

@ -0,0 +1,177 @@
aliases:
sig-scheduling-maintainers:
- bsalamat
- davidopp
- k82cn
- timothysc
- wojtek-t
sig-scheduling:
- bsalamat
- davidopp
- jayunit100
- k82cn
- resouer
- timothysc
- wojtek-t
sig-cli-maintainers:
- adohe
- brendandburns
- deads2k
- fabianofranz
- janetkuo
- liggitt
- pwittrock
- smarterclayton
sig-cli:
- adohe
- deads2k
- derekwaynecarr
- dims
- dshulyak
- eparis
- ericchiang
- fabianofranz
- ghodss
- mengqiy
- rootfs
- shiywang
- smarterclayton
- soltysh
- sttts
sig-testing-reviewers:
- fejta
- ixdy
- rmmh
- spiffxp
- spxtr
sig-testing-approvers:
- fejta
- ixdy
- rmmh
- spiffxp
- spxtr
sig-node-reviewers:
- Random-Liu
- dashpole
- dchen1107
- derekwaynecarr
- dims
- feiskyer
- mtaufen
- ncdc
- pmorie
- resouer
- sjpotter
- tallclair
- tmrts
- vishh
- yifan-gu
- yujuhong
sig-network-approvers:
- bowei
- caseydavenport
- danwinship
- dcbw
- dnardo
- freehan
- mrhohn
- nicksardo
- thockin
sig-network-reviewers:
- bowei
- caseydavenport
- danwinship
- dcbw
- dnardo
- freehan
- mrhohn
- nicksardo
- thockin
sig-apps-reviewers:
- enisoc
- erictune
- foxish
- janetkuo
- kow3ns
- lukaszo
- mfojtik
- smarterclayton
- soltysh
- tnozicka
sig-apps-api-approvers:
- erictune
- smarterclayton
milestone-maintainers:
- lavalamp
- deads2k
- michelleN
- mattfarina
- prydonius
- bgrant0607
- jdumars
- ericchiang
- liggitt
- deads2k
- mwielgus
- directxman12
- justinsb
- kris-nova
- chrislovecnm
- mfburnett
- slack
- colemickens
- foxish
- fabianofranz
- pwittrock
- AdoHe
- lukemarsden
- jbeda
- roberthbailey
- zehicle
- jdumars
- grodrigues3
- Phillels
- devin-donnelly
- jaredbhatti
- csbell
- quinton-hoole
- piosz
- fabxc
- thockin
- dcbw
- caseydavenport
- dchen1107
- derekwaynecarr
- zen
- marcoceppi
- dghubble
- idvoretskyi
- xsgordon
- apsinha
- idvoretskyi
- calebamiles
- pwittrock
- calebamiles
- wojtek-t
- countspongebob
- jbeda
- davidopp
- timothysc
- pmorie
- arschles
- vaikas-google
- duglin
- saad-ali
- childsb
- spiffxp
- fejta
- timothysc
- danielromlein
- floreks
- michmike
- abgworrall
- krzyzacy
- steveperry-53
- radhikpac
- jpbetz

86
vendor/k8s.io/kubernetes/README.md generated vendored Normal file
View File

@ -0,0 +1,86 @@
# Kubernetes
[![Submit Queue Widget]][Submit Queue] [![GoDoc Widget]][GoDoc]
<img src="https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png" width="100">
----
Kubernetes is an open source system for managing [containerized applications]
across multiple hosts, providing basic mechanisms for deployment, maintenance,
and scaling of applications.
Kubernetes builds upon a decade and a half of experience at Google running
production workloads at scale using a system called [Borg],
combined with best-of-breed ideas and practices from the community.
Kubernetes is hosted by the Cloud Native Computing Foundation ([CNCF]).
If you are a company that wants to help shape the evolution of
technologies that are container-packaged, dynamically-scheduled
and microservices-oriented, consider joining the CNCF.
For details about who's involved and how Kubernetes plays a role,
read the CNCF [announcement].
----
## To start using Kubernetes
See our documentation on [kubernetes.io].
Try our [interactive tutorial].
Take a free course on [Scalable Microservices with Kubernetes].
## To start developing Kubernetes
The [community repository] hosts all information about
building Kubernetes from source, how to contribute code
and documentation, who to contact about what, etc.
If you want to build Kubernetes right away there are two options:
##### You have a working [Go environment].
```
$ go get -d k8s.io/kubernetes
$ cd $GOPATH/src/k8s.io/kubernetes
$ make
```
##### You have a working [Docker environment].
```
$ git clone https://github.com/kubernetes/kubernetes
$ cd kubernetes
$ make quick-release
```
If you are less impatient, head over to the [developer's documentation].
## Support
If you need support, start with the [troubleshooting guide]
and work your way through the process that we've outlined.
That said, if you have questions, reach out to us
[one way or another][communication].
[announcement]: https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container
[Borg]: https://research.google.com/pubs/pub43438.html
[CNCF]: https://www.cncf.io/about
[communication]: https://github.com/kubernetes/community/blob/master/communication.md
[community repository]: https://github.com/kubernetes/community
[containerized applications]: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/
[developer's documentation]: https://github.com/kubernetes/community/tree/master/contributors/devel
[Docker environment]: https://docs.docker.com/engine
[Go environment]: https://golang.org/doc/install
[GoDoc]: https://godoc.org/k8s.io/kubernetes
[GoDoc Widget]: https://godoc.org/k8s.io/kubernetes?status.svg
[interactive tutorial]: http://kubernetes.io/docs/tutorials/kubernetes-basics
[kubernetes.io]: http://kubernetes.io
[Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615
[Submit Queue]: http://submit-queue.k8s.io/#/ci
[Submit Queue Widget]: http://submit-queue.k8s.io/health.svg?v=1
[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]()

39
vendor/k8s.io/kubernetes/SUPPORT.md generated vendored Normal file
View File

@ -0,0 +1,39 @@
## Support for deploying and using Kubernetes
Welcome to Kubernetes! We use GitHub for tracking bugs and feature requests.
This isn't the right place to get support for using Kubernetes, but the following
resources are available below, thanks for understanding.
### Stack Overflow
The Kubernetes Community is active on Stack Overflow, you can post your questions there:
* [Kubernetes on Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
* Here are some tips for [about how to ask good questions](http://stackoverflow.com/help/how-to-ask).
* Don't forget to check to see [what's on topic](http://stackoverflow.com/help/on-topic).
### Documentation
* [User Documentation](https://kubernetes.io/docs/)
* [Troubleshooting Guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/)
### Real-time Chat
* [Slack](https://kubernetes.slack.com) ([registration](http://slack.k8s.io)):
The `#kubernetes-users` and `#kubernetes-novice` channels are usual places where
people offer support.
* Also check out the
[Slack Archive](http://kubernetes.slackarchive.io/) of past conversations.
### Mailing Lists/Groups
* [Kubernetes-users group](https://groups.google.com/forum/#!forum/kubernetes-users)
<!---
Derived from https://github.com/kubernetes/community/blob/master/contributors/devel/on-call-user-support.md
-->

325
vendor/k8s.io/kubernetes/Vagrantfile generated vendored Normal file
View File

@ -0,0 +1,325 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes
Vagrant.require_version ">= 1.7.4"
if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true'
raise Vagrant::Errors::VagrantError.new, <<END
Calling 'vagrant up' directly is not supported. Instead, please run the following:
export KUBERNETES_PROVIDER=vagrant
export VAGRANT_DEFAULT_PROVIDER=providername
./cluster/kube-up.sh
END
end
# The number of nodes to provision
$num_node = (ENV['NUM_NODES'] || 1).to_i
# ip configuration
$master_ip = ENV['MASTER_IP']
$node_ip_base = ENV['NODE_IP_BASE'] || ""
$node_ips = $num_node.times.collect { |n| $node_ip_base + "#{n+3}" }
# Determine the OS platform to use
$kube_os = ENV['KUBERNETES_OS'] || "fedora"
# Determine whether vagrant should use nfs to sync folders
$use_nfs = ENV['KUBERNETES_VAGRANT_USE_NFS'] == 'true'
# Determine whether vagrant should use rsync to sync folders
$use_rsync = ENV['KUBERNETES_VAGRANT_USE_RSYNC'] == 'true'
# To override the vagrant provider, use (e.g.):
# KUBERNETES_PROVIDER=vagrant VAGRANT_DEFAULT_PROVIDER=... .../cluster/kube-up.sh
# To override the box, use (e.g.):
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... .../cluster/kube-up.sh
# You can specify a box version:
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_VERSION=... .../cluster/kube-up.sh
# You can specify a box location:
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_URL=... .../cluster/kube-up.sh
# KUBERNETES_BOX_URL and KUBERNETES_BOX_VERSION will be ignored unless
# KUBERNETES_BOX_NAME is set
# Default OS platform to provider/box information
$kube_provider_boxes = {
:parallels => {
'fedora' => {
# :box_url and :box_version are optional (and mutually exclusive);
# if :box_url is omitted the box will be retrieved by :box_name (and
# :box_version if provided) from
# http://atlas.hashicorp.com/boxes/search (formerly
# http://vagrantcloud.com/); this allows you override :box_name with
# your own value so long as you provide :box_url; for example, the
# "official" name of this box is "rickard-von-essen/
# opscode_fedora-20", but by providing the URL and our own name, we
# make it appear as yet another provider under the "kube-fedora22"
# box
:box_name => 'kube-fedora23',
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/parallels/opscode_fedora-23_chef-provisionerless.box'
}
},
:virtualbox => {
'fedora' => {
:box_name => 'kube-fedora23',
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-23_chef-provisionerless.box'
}
},
:libvirt => {
'fedora' => {
:box_name => 'kube-fedora23',
:box_url => 'https://dl.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box'
}
},
:vmware_desktop => {
'fedora' => {
:box_name => 'kube-fedora23',
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-23_chef-provisionerless.box'
}
},
:vsphere => {
'fedora' => {
:box_name => 'vsphere-dummy',
:box_url => 'https://github.com/deromka/vagrant-vsphere/blob/master/vsphere-dummy.box?raw=true'
}
}
}
# Give access to all physical cpu cores
# Previously cargo-culted from here:
# http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck
# Rewritten to actually determine the number of hardware cores instead of assuming
# that the host has hyperthreading enabled.
host = RbConfig::CONFIG['host_os']
if host =~ /darwin/
$vm_cpus = `sysctl -n hw.physicalcpu`.to_i
elsif host =~ /linux/
#This should work on most processors, however it will fail on ones without the core id field.
#So far i have only seen this on a raspberry pi. which you probably don't want to run vagrant on anyhow...
#But just in case we'll default to the result of nproc if we get 0 just to be safe.
$vm_cpus = `cat /proc/cpuinfo | grep 'core id' | sort -u | wc -l`.to_i
if $vm_cpus < 1
$vm_cpus = `nproc`.to_i
end
else # sorry Windows folks, I can't help you
$vm_cpus = 2
end
# Give VM 1024MB of RAM by default
# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation.
# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens.
# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.)
$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i
$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 2048).to_i
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
if Vagrant.has_plugin?("vagrant-proxyconf")
$http_proxy = ENV['KUBERNETES_HTTP_PROXY'] || ""
$https_proxy = ENV['KUBERNETES_HTTPS_PROXY'] || ""
$no_proxy = ENV['KUBERNETES_NO_PROXY'] || "127.0.0.1"
config.proxy.http = $http_proxy
config.proxy.https = $https_proxy
config.proxy.no_proxy = $no_proxy
end
# this corrects a bug in 1.8.5 where an invalid SSH key is inserted.
if Vagrant::VERSION == "1.8.5"
config.ssh.insert_key = false
end
def setvmboxandurl(config, provider)
if ENV['KUBERNETES_BOX_NAME'] then
config.vm.box = ENV['KUBERNETES_BOX_NAME']
if ENV['KUBERNETES_BOX_URL'] then
config.vm.box_url = ENV['KUBERNETES_BOX_URL']
end
if ENV['KUBERNETES_BOX_VERSION'] then
config.vm.box_version = ENV['KUBERNETES_BOX_VERSION']
end
else
config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name]
if $kube_provider_boxes[provider][$kube_os][:box_url] then
config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url]
end
if $kube_provider_boxes[provider][$kube_os][:box_version] then
config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version]
end
end
end
def customize_vm(config, vm_mem)
if $use_nfs then
config.vm.synced_folder ".", "/vagrant", nfs: true
elsif $use_rsync then
opts = {}
if ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'] then
opts[:rsync__args] = ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'].split(" ")
end
if ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'] then
opts[:rsync__exclude] = ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'].split(" ")
end
config.vm.synced_folder ".", "/vagrant", opts
end
# Try VMWare Fusion first (see
# https://docs.vagrantup.com/v2/providers/basic_usage.html)
config.vm.provider :vmware_fusion do |v, override|
setvmboxandurl(override, :vmware_desktop)
v.vmx['memsize'] = vm_mem
v.vmx['numvcpus'] = $vm_cpus
end
# configure libvirt provider
config.vm.provider :libvirt do |v, override|
setvmboxandurl(override, :libvirt)
v.memory = vm_mem
v.cpus = $vm_cpus
v.nested = true
v.volume_cache = 'none'
end
# Then try VMWare Workstation
config.vm.provider :vmware_workstation do |v, override|
setvmboxandurl(override, :vmware_desktop)
v.vmx['memsize'] = vm_mem
v.vmx['numvcpus'] = $vm_cpus
end
# Then try Parallels
config.vm.provider :parallels do |v, override|
setvmboxandurl(override, :parallels)
v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem]
v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus]
# Don't attempt to update the Parallels tools on the image (this can
# be done manually if necessary)
v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
# Set up Parallels folder sharing to behave like VirtualBox (i.e.,
# mount the current directory as /vagrant and that's it)
v.customize ['set', :id, '--shf-guest', 'off']
v.customize ['set', :id, '--shf-guest-automount', 'off']
v.customize ['set', :id, '--shf-host', 'on']
# Synchronize VM clocks to host clock (Avoid certificate invalid issue)
v.customize ['set', :id, '--time-sync', 'on']
# Remove all auto-mounted "shared folders"; the result seems to
# persist between runs (i.e., vagrant halt && vagrant up)
override.vm.provision :shell, :inline => (%q{
set -ex
if [ -d /media/psf ]; then
for i in /media/psf/*; do
if [ -d "${i}" ]; then
umount "${i}" || true
rmdir -v "${i}"
fi
done
rmdir -v /media/psf
fi
exit
}).strip
end
# Then try vsphere
config.vm.provider :vsphere do |vsphere, override|
setvmboxandurl(override, :vsphere)
#config.vm.hostname = ENV['MASTER_NAME']
config.ssh.username = ENV['MASTER_USER']
config.ssh.password = ENV['MASTER_PASSWD']
config.ssh.pty = true
config.ssh.insert_key = true
#config.ssh.private_key_path = '~/.ssh/id_rsa_vsphere'
# Don't attempt to update the tools on the image (this can
# be done manually if necessary)
# vsphere.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
# The vSphere host we're going to connect to
vsphere.host = ENV['VAGRANT_VSPHERE_URL']
# The ESX host for the new VM
vsphere.compute_resource_name = ENV['VAGRANT_VSPHERE_RESOURCE_POOL']
# The resource pool for the new VM
#vsphere.resource_pool_name = 'Comp'
# path to folder where new VM should be created, if not specified template's parent folder will be used
vsphere.vm_base_path = ENV['VAGRANT_VSPHERE_BASE_PATH']
# The template we're going to clone
vsphere.template_name = ENV['VAGRANT_VSPHERE_TEMPLATE_NAME']
# The name of the new machine
#vsphere.name = ENV['MASTER_NAME']
# vSphere login
vsphere.user = ENV['VAGRANT_VSPHERE_USERNAME']
# vSphere password
vsphere.password = ENV['VAGRANT_VSPHERE_PASSWORD']
# cpu count
vsphere.cpu_count = $vm_cpus
# memory in MB
vsphere.memory_mb = vm_mem
# If you don't have SSL configured correctly, set this to 'true'
vsphere.insecure = ENV['VAGRANT_VSPHERE_INSECURE']
end
# Don't attempt to update Virtualbox Guest Additions (requires gcc)
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
end
# Finally, fall back to VirtualBox
config.vm.provider :virtualbox do |v, override|
setvmboxandurl(override, :virtualbox)
v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem]
v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus]
# Use faster paravirtualized networking
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
v.customize ["modifyvm", :id, "--nictype2", "virtio"]
end
end
# Kubernetes master
config.vm.define "master" do |c|
customize_vm c, $vm_master_mem
if ENV['KUBE_TEMP'] then
script = "#{ENV['KUBE_TEMP']}/master-start.sh"
c.vm.provision "shell", run: "always", path: script
end
c.vm.network "private_network", ip: "#{$master_ip}"
end
# Kubernetes node
$num_node.times do |n|
node_vm_name = "node-#{n+1}"
config.vm.define node_vm_name do |node|
customize_vm node, $vm_node_mem
node_ip = $node_ips[n]
if ENV['KUBE_TEMP'] then
script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh"
node.vm.provision "shell", run: "always", path: script
end
node.vm.network "private_network", ip: "#{node_ip}"
end
end
end

1
vendor/k8s.io/kubernetes/WORKSPACE generated vendored Symbolic link
View File

@ -0,0 +1 @@
build/root/WORKSPACE

5
vendor/k8s.io/kubernetes/code-of-conduct.md generated vendored Normal file
View File

@ -0,0 +1,5 @@
## Kubernetes Community Code of Conduct
Kubernetes follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]()

370
vendor/k8s.io/kubernetes/labels.yaml generated vendored Normal file
View File

@ -0,0 +1,370 @@
# Scanned and autogenerated by https://github.com/tonglil/labeler
---
repo: kubernetes/kubernetes
labels:
- name: approved
color: 0ffa16
- name: approved-for-milestone
color: fef2c0
- name: area/admin
color: 0052cc
- name: area/admission-control
color: 0052cc
- name: area/api
color: 0052cc
- name: area/apiserver
color: 0052cc
- name: area/app-lifecycle
color: 0052cc
- name: area/batch
color: 0052cc
- name: area/build-release
color: 0052cc
- name: area/cadvisor
color: 0052cc
- name: area/client-libraries
color: 0052cc
- name: area/cloudprovider
color: 0052cc
- name: area/configmap-api
color: 0052cc
- name: area/controller-manager
color: 0052cc
- name: area/declarative-configuration
color: 0052cc
- name: area/dns
color: 0052cc
- name: area/docker
color: 0052cc
- name: area/downward-api
color: 0052cc
- name: area/ecosystem
color: 0052cc
- name: area/etcd
color: 0052cc
- name: area/example
color: 0052cc
- name: area/example/cassandra
color: 0052cc
- name: area/extensibility
color: 0052cc
- name: area/HA
color: 0052cc
- name: area/hw-accelerators
color: 0052cc
- name: area/images-registry
color: 0052cc
- name: area/ingress
color: 0052cc
- name: area/introspection
color: 0052cc
- name: area/ipv6
color: 0052cc
- name: area/isolation
color: 0052cc
- name: area/kube-proxy
color: 0052cc
- name: area/kubeadm
color: 0052cc
- name: area/kubectl
color: 0052cc
- name: area/kubelet
color: 0052cc
- name: area/kubelet-api
color: 0052cc
- name: area/logging
color: 0052cc
- name: area/monitoring
color: 0052cc
- name: area/node-e2e
color: 0052cc
- name: area/node-lifecycle
color: 0052cc
- name: area/nodecontroller
color: 0052cc
- name: area/os/coreos
color: d4c5f9
- name: area/os/fedora
color: d4c5f9
- name: area/os/gci
color: d4c5f9
- name: area/os/ubuntu
color: d4c5f9
- name: area/platform/aws
color: d4c5f9
- name: area/platform/azure
color: d4c5f9
- name: area/platform/gce
color: d4c5f9
- name: area/platform/gke
color: d4c5f9
- name: area/platform/mesos
color: d4c5f9
- name: area/platform/vagrant
color: d4c5f9
- name: area/platform/vsphere
color: d4c5f9
- name: area/release-infra
color: 0052cc
- name: area/reliability
color: 0052cc
- name: area/rkt
color: 0052cc
- name: area/secret-api
color: 0052cc
- name: area/security
color: d93f0b
- name: area/stateful-apps
color: 0052cc
- name: area/swagger
color: 0052cc
- name: area/system-requirement
color: 0052cc
- name: area/teardown
color: 0052cc
- name: area/test
color: 0052cc
- name: area/test-infra
color: 0052cc
- name: area/third-party-resource
color: 0052cc
- name: area/ui
color: 0052cc
- name: area/upgrade
color: 0052cc
- name: area/usability
color: 0052cc
- name: area/workload-api/cronjob
color: 0052cc
- name: area/workload-api/daemonset
color: 0052cc
- name: area/workload-api/deployment
color: 0052cc
- name: area/workload-api/job
color: 0052cc
- name: area/workload-api/replicaset
color: 0052cc
- name: beta-blocker
color: d93f0b
- name: cherrypick-approved
color: fef2c0
- name: cherrypick-candidate
color: fef2c0
- name: 'cla: human-approved'
color: bfe5bf
- name: 'cla: no'
color: e11d21
- name: 'cla: yes'
color: bfe5bf
- name: 'cncf-cla: no'
color: e11d21
- name: 'cncf-cla: yes'
color: bfe5bf
- name: do-not-merge
color: e11d21
- name: do-not-merge/work-in-progress
color: e11d21
- name: do-not-merge/hold
color: e11d21
- name: do-not-merge/cherry-pick-not-approved
color: e11d21
- name: do-not-merge/release-note-label-needed
color: e11d21
- name: do-not-merge/blocked-paths
color: e11d21
- name: flake-has-meta
color: fbca04
- name: for-new-contributors
color: 006b75
- name: help-wanted
color: 006b75
- name: keep-open
color: fbca04
- name: kind/api-change
color: c7def8
- name: kind/bug
color: e11d21
- name: kind/cleanup
color: c7def8
- name: kind/design
color: c7def8
- name: kind/documentation
color: c7def8
- name: kind/enhancement
color: c7def8
- name: kind/feature
color: c7def8
- name: kind/flake
color: f7c6c7
- name: kind/friction
color: c7def8
- name: kind/mesos-flake
color: f7c6c7
- name: kind/new-api
color: c7def8
- name: kind/old-docs
color: c7def8
- name: kind/postmortem
color: bfe5bf
- name: kind/support
color: eb6420
- name: kind/technical-debt
color: c7def8
- name: kind/upgrade-test-failure
color: fbca04
- name: lgtm
color: 15dd18
- name: milestone-labels-complete
color: 77bb00
- name: milestone-labels-incomplete
color: e11d21
- name: needs-ok-to-merge
color: ededed
- name: needs-ok-to-test
color: b60205
- name: needs-rebase
color: BDBDBD
- name: needs-sig
color: ededed
- name: non-release-blocker
color: 0e8a16
- name: ok-to-merge
color: fbca04
- name: priority/awaiting-more-evidence
color: fef2c0
- name: priority/backlog
color: fbca04
- name: priority/critical-urgent
color: e11d21
- name: priority/failing-test
color: e11d21
- name: priority/important-longterm
color: eb6420
- name: priority/important-soon
color: eb6420
- name: priority/P0
color: ff0000
- name: priority/P1
color: ededed
- name: priority/P2
color: ededed
- name: priority/P3
color: ededed
- name: queue/blocks-others
color: ffaa00
- name: queue/critical-fix
color: ffaa00
- name: queue/fix
color: ffaa00
- name: queue/multiple-rebases
color: ffaa00
- name: release-blocker
color: d93f0b
- name: release-note
color: c2e0c6
- name: release-note-action-required
color: c2e0c6
- name: release-note-label-needed
color: db5a64
- name: release-note-none
color: c2e0c6
- name: requires-release-czar-attention
color: d93f0b
- name: retest-not-required
color: eb6420
- name: retest-not-required-docs-only
color: fbca04
- name: sig/api-machinery
color: d2b48c
- name: sig/apps
color: d2b48c
- name: sig/architecture
color: d2b48c
- name: sig/auth
color: d2b48c
- name: sig/autoscaling
color: d2b48c
- name: sig/aws
color: d2b48c
- name: sig/azure
color: d2b48c
- name: sig/big-data
color: d2b48c
- name: sig/cli
color: d2b48c
- name: sig/cluster-lifecycle
color: d2b48c
- name: sig/cluster-ops
color: d2b48c
- name: sig/contributor-experience
color: d2b48c
- name: sig/docs
color: d2b48c
- name: sig/federation
color: d2b48c
- name: sig/instrumentation
color: d2b48c
- name: sig/network
color: d2b48c
- name: sig/node
color: d2b48c
- name: sig/onprem
color: d2b48c
- name: sig/openstack
color: d2b48c
- name: sig/release
color: d2b48c
- name: sig/rktnetes
color: d2b48c
- name: sig/scalability
color: d2b48c
- name: sig/scheduling
color: d2b48c
- name: sig/service-catalog
color: d2b48c
- name: sig/storage
color: d2b48c
- name: sig/testing
color: d2b48c
- name: sig/ui
color: d2b48c
- name: sig/windows
color: d2b48c
- name: size/L
color: ee9900
- name: size/M
color: eebb00
- name: size/S
color: 77bb00
- name: size/XL
color: ee5500
- name: size/XS
color: "009900"
- name: size/XXL
color: ee0000
- name: stale
color: "795548"
- name: status/in-progress
color: fef2c0
- name: status/in-review
color: fef2c0
- name: team/api (deprecated - do not use)
color: ededed
- name: team/cluster (deprecated - do not use)
color: ededed
- name: team/control-plane (deprecated - do not use)
color: ededed
- name: team/gke
color: d2b48c
- name: team/huawei
color: d2b48c
- name: team/mesosphere
color: d2b48c
- name: team/redhat
color: d2b48c
- name: team/test-infra
color: ededed
- name: team/ux (deprecated - do not use)
color: ededed
- name: triaged
color: d455d0

220
vendor/k8s.io/kubernetes/staging/BUILD generated vendored Normal file
View File

@ -0,0 +1,220 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "staging",
library = ":go_default_library",
)
go_library(
name = "go_default_library",
srcs = ["godeps-json-updater.go"],
deps = ["//vendor/github.com/spf13/pflag:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/api/admission/v1alpha1:all-srcs",
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:all-srcs",
"//staging/src/k8s.io/api/apps/v1beta1:all-srcs",
"//staging/src/k8s.io/api/apps/v1beta2:all-srcs",
"//staging/src/k8s.io/api/authentication/v1:all-srcs",
"//staging/src/k8s.io/api/authentication/v1beta1:all-srcs",
"//staging/src/k8s.io/api/authorization/v1:all-srcs",
"//staging/src/k8s.io/api/authorization/v1beta1:all-srcs",
"//staging/src/k8s.io/api/autoscaling/v1:all-srcs",
"//staging/src/k8s.io/api/autoscaling/v2beta1:all-srcs",
"//staging/src/k8s.io/api/batch/v1:all-srcs",
"//staging/src/k8s.io/api/batch/v1beta1:all-srcs",
"//staging/src/k8s.io/api/batch/v2alpha1:all-srcs",
"//staging/src/k8s.io/api/certificates/v1beta1:all-srcs",
"//staging/src/k8s.io/api/core/v1:all-srcs",
"//staging/src/k8s.io/api/extensions/v1beta1:all-srcs",
"//staging/src/k8s.io/api/imagepolicy/v1alpha1:all-srcs",
"//staging/src/k8s.io/api/networking/v1:all-srcs",
"//staging/src/k8s.io/api/policy/v1beta1:all-srcs",
"//staging/src/k8s.io/api/rbac/v1:all-srcs",
"//staging/src/k8s.io/api/rbac/v1alpha1:all-srcs",
"//staging/src/k8s.io/api/rbac/v1beta1:all-srcs",
"//staging/src/k8s.io/api/scheduling/v1alpha1:all-srcs",
"//staging/src/k8s.io/api/settings/v1alpha1:all-srcs",
"//staging/src/k8s.io/api/storage/v1:all-srcs",
"//staging/src/k8s.io/api/storage/v1beta1:all-srcs",
"//staging/src/k8s.io/apiextensions-apiserver:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/api/testing:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/api/validation:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/apimachinery:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/fuzzer:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/apis/testapigroup:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/conversion:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/fields:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/labels:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/runtime:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/selection:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/test:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/types:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/cache:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/framer:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/httpstream:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/initialization:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/json:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/net:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/proxy:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/remotecommand:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/validation:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/version:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/watch:all-srcs",
"//staging/src/k8s.io/apimachinery/third_party/forked/golang/json:all-srcs",
"//staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil:all-srcs",
"//staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/admission:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/apis/apiserver:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/apis/example:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/audit:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/group:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/request/union:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/request/websocket:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/request/x509:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/token/cache:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/token/union:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/authorization/union:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/endpoints:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/features:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/registry:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/server:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/storage:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/feature:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/flag:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/flushwriter:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/logs:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/proxy:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/trace:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/webhook:all-srcs",
"//staging/src/k8s.io/apiserver/pkg/util/wsstream:all-srcs",
"//staging/src/k8s.io/apiserver/plugin/pkg/audit:all-srcs",
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator:all-srcs",
"//staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook:all-srcs",
"//staging/src/k8s.io/client-go/discovery:all-srcs",
"//staging/src/k8s.io/client-go/dynamic:all-srcs",
"//staging/src/k8s.io/client-go/examples/create-update-delete-deployment:all-srcs",
"//staging/src/k8s.io/client-go/examples/in-cluster-client-configuration:all-srcs",
"//staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration:all-srcs",
"//staging/src/k8s.io/client-go/examples/workqueue:all-srcs",
"//staging/src/k8s.io/client-go/informers:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes:all-srcs",
"//staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/listers/apps/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/apps/v1beta2:all-srcs",
"//staging/src/k8s.io/client-go/listers/authentication/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/authentication/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/authorization/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/authorization/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/autoscaling/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/autoscaling/v2beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/batch/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/batch/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/batch/v2alpha1:all-srcs",
"//staging/src/k8s.io/client-go/listers/certificates/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/core/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/extensions/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/listers/networking/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/rbac/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/rbac/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/listers/rbac/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/listers/scheduling/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/listers/settings/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/listers/storage/v1:all-srcs",
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/pkg/version:all-srcs",
"//staging/src/k8s.io/client-go/plugin/pkg/auth/authenticator/token/oidc/testing:all-srcs",
"//staging/src/k8s.io/client-go/plugin/pkg/client/auth:all-srcs",
"//staging/src/k8s.io/client-go/rest:all-srcs",
"//staging/src/k8s.io/client-go/testing:all-srcs",
"//staging/src/k8s.io/client-go/third_party/forked/golang/template:all-srcs",
"//staging/src/k8s.io/client-go/tools/auth:all-srcs",
"//staging/src/k8s.io/client-go/tools/cache:all-srcs",
"//staging/src/k8s.io/client-go/tools/clientcmd:all-srcs",
"//staging/src/k8s.io/client-go/tools/leaderelection:all-srcs",
"//staging/src/k8s.io/client-go/tools/metrics:all-srcs",
"//staging/src/k8s.io/client-go/tools/pager:all-srcs",
"//staging/src/k8s.io/client-go/tools/portforward:all-srcs",
"//staging/src/k8s.io/client-go/tools/record:all-srcs",
"//staging/src/k8s.io/client-go/tools/reference:all-srcs",
"//staging/src/k8s.io/client-go/tools/remotecommand:all-srcs",
"//staging/src/k8s.io/client-go/transport:all-srcs",
"//staging/src/k8s.io/client-go/util/cert:all-srcs",
"//staging/src/k8s.io/client-go/util/exec:all-srcs",
"//staging/src/k8s.io/client-go/util/flowcontrol:all-srcs",
"//staging/src/k8s.io/client-go/util/homedir:all-srcs",
"//staging/src/k8s.io/client-go/util/integer:all-srcs",
"//staging/src/k8s.io/client-go/util/jsonpath:all-srcs",
"//staging/src/k8s.io/client-go/util/retry:all-srcs",
"//staging/src/k8s.io/client-go/util/testing:all-srcs",
"//staging/src/k8s.io/client-go/util/workqueue:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/client-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/conversion-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/deepcopy-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/defaulter-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/go-to-protobuf:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/import-boss:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/informer-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/lister-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/openapi-gen:all-srcs",
"//staging/src/k8s.io/code-generator/cmd/set-gen:all-srcs",
"//staging/src/k8s.io/code-generator/third_party/forked/golang/reflect:all-srcs",
"//staging/src/k8s.io/kube-aggregator:all-srcs",
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics:all-srcs",
"//staging/src/k8s.io/metrics/pkg/apis/metrics:all-srcs",
"//staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset:all-srcs",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:all-srcs",
"//staging/src/k8s.io/sample-apiserver:all-srcs",
],
tags = ["automanaged"],
)

14
vendor/k8s.io/kubernetes/staging/OWNERS generated vendored Normal file
View File

@ -0,0 +1,14 @@
approvers:
- lavalamp
- smarterclayton
reviewers:
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- sttts
- krousey

40
vendor/k8s.io/kubernetes/staging/README.md generated vendored Normal file
View File

@ -0,0 +1,40 @@
# External Repository Staging Area
This directory is the staging area for packages that have been split to their
own repository. The content here will be periodically published to respective
top-level k8s.io repositories.
Repositories currently staged here:
- [`k8s.io/apiextensions-apiserver`](https://github.com/kubernetes/apiextensions-apiserver)
- [`k8s.io/api`](https://github.com/kubernetes/api)
- [`k8s.io/apimachinery`](https://github.com/kubernetes/apimachinery)
- [`k8s.io/apiserver`](https://github.com/kubernetes/apiserver)
- [`k8s.io/client-go`](https://github.com/kubernetes/client-go)
- [`k8s.io/kube-aggregator`](https://github.com/kubernetes/kube-aggregator)
- [`k8s.io/code-generator`](https://github.com/kubernetes/code-generator) (about to be published)
- [`k8s.io/metrics`](https://github.com/kubernetes/metrics)
- [`k8s.io/sample-apiserver`](https://github.com/kubernetes/sample-apiserver)
The code in the staging/ directory is authoritative, i.e. the only copy of the
code. You can directly modify such code.
## Using staged repositories from Kubernetes code
Kubernetes code uses the repositories in this directory via symlinks in the
`vendor/k8s.io` directory into this staging area. For example, when
Kubernetes code imports a package from the `k8s.io/client-go` repository, that
import is resolved to `staging/src/k8s.io/client-go` relative to the project
root:
```go
// pkg/example/some_code.go
package example
import (
"k8s.io/client-go/dynamic" // resolves to staging/src/k8s.io/client-go/dynamic
)
```
Once the change-over to external repositories is complete, these repositories
will actually be vendored from `k8s.io/<package-name>`.

51
vendor/k8s.io/kubernetes/staging/src/k8s.io/api/OWNERS generated vendored Normal file
View File

@ -0,0 +1,51 @@
approvers:
- erictune
- lavalamp
- smarterclayton
- thockin
- liggitt
# - bgrant0607 # manual escalations only
reviewers:
- brendandburns
- caesarxuchao
- davidopp
- dchen1107
- deads2k
- derekwaynecarr
- dims
- eparis
- erictune
- errordeveloper
- feiskyer
- gmarek
- janetkuo
- jbeda
- jsafrane
- jszczepkowski
- justinsb
- krousey
- lavalamp
- liggitt
- luxas
- madhusudancs
- mikedanese
- mwielgus
- ncdc
- nikhiljindal
- piosz
- pmorie
- pwittrock
- roberthbailey
- rootfs
- saad-ali
- smarterclayton
- soltysh
- sttts
- tallclair
- thockin
- timothysc
- vishh
- wojtek-t
- yifan-gu
- yujuhong
- zmerlynn

20
vendor/k8s.io/kubernetes/staging/src/k8s.io/api/apps/OWNERS generated vendored Executable file
View File

@ -0,0 +1,20 @@
reviewers:
- thockin
- lavalamp
- smarterclayton
- deads2k
- caesarxuchao
- pmorie
- sttts
- saad-ali
- ncdc
- tallclair
- timothysc
- dims
- errordeveloper
- mml
- m1093782566
- mbohlool
- david-mcmahon
- kevin-wangzefeng
- jianhuiz

View File

@ -0,0 +1,47 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated.pb.go",
"register.go",
"types.go",
"types_swagger_doc_generated.go",
"zz_generated.deepcopy.go",
],
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
filegroup(
name = "go_default_library_protos",
srcs = ["generated.proto"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,20 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
package v1beta2 // import "k8s.io/api/apps/v1beta2"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,691 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.apps.v1beta2;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/api/policy/v1beta1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1beta2";
// ControllerRevision implements an immutable snapshot of state data. Clients
// are responsible for serializing and deserializing the objects that contain
// their internal state.
// Once a ControllerRevision has been successfully created, it can not be updated.
// The API Server will fail validation of all requests that attempt to mutate
// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
// it may be subject to name and representation changes in future releases, and clients should not
// depend on its stability. It is primarily for internal use by controllers.
message ControllerRevision {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Data is the serialized representation of the state.
optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
// Revision indicates the revision of the state represented by Data.
optional int64 revision = 3;
}
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
message ControllerRevisionList {
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of ControllerRevisions
repeated ControllerRevision items = 2;
}
// DaemonSet represents the configuration of a daemon set.
message DaemonSet {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// The desired behavior of this daemon set.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional DaemonSetSpec spec = 2;
// The current status of this daemon set. This data may be
// out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional DaemonSetStatus status = 3;
}
// DaemonSetList is a collection of daemon sets.
message DaemonSetList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// A list of daemon sets.
repeated DaemonSet items = 2;
}
// DaemonSetSpec is the specification of a daemon set.
message DaemonSetSpec {
// A label query over pods that are managed by the daemon set.
// Must match in order to be controlled.
// If empty, defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
// An object that describes the pod that will be created.
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
// An update strategy to replace existing DaemonSet pods with new pods.
// +optional
optional DaemonSetUpdateStrategy updateStrategy = 3;
// The minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
optional int32 minReadySeconds = 4;
// The number of old history to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
optional int32 revisionHistoryLimit = 6;
}
// DaemonSetStatus represents the current status of a daemon set.
message DaemonSetStatus {
// The number of nodes that are running at least 1
// daemon pod and are supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
optional int32 currentNumberScheduled = 1;
// The number of nodes that are running the daemon pod, but are
// not supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
optional int32 numberMisscheduled = 2;
// The total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
optional int32 desiredNumberScheduled = 3;
// The number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
optional int32 numberReady = 4;
// The most recent generation observed by the daemon set controller.
// +optional
optional int64 observedGeneration = 5;
// The total number of nodes that are running updated daemon pod
// +optional
optional int32 updatedNumberScheduled = 6;
// The number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least spec.minReadySeconds)
// +optional
optional int32 numberAvailable = 7;
// The number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least spec.minReadySeconds)
// +optional
optional int32 numberUnavailable = 8;
// Count of hash collisions for the DaemonSet. The DaemonSet controller
// uses this field as a collision avoidance mechanism when it needs to
// create the name for the newest ControllerRevision.
// +optional
optional int32 collisionCount = 9;
}
// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
message DaemonSetUpdateStrategy {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
// +optional
optional string type = 1;
// Rolling update config params. Present only if type = "RollingUpdate".
// ---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as Deployment `strategy.rollingUpdate`.
// See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
optional RollingUpdateDaemonSet rollingUpdate = 2;
}
// Deployment enables declarative updates for Pods and ReplicaSets.
message Deployment {
// Standard object metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Specification of the desired behavior of the Deployment.
// +optional
optional DeploymentSpec spec = 2;
// Most recently observed status of the Deployment.
// +optional
optional DeploymentStatus status = 3;
}
// DeploymentCondition describes the state of a deployment at a certain point.
message DeploymentCondition {
// Type of deployment condition.
optional string type = 1;
// Status of the condition, one of True, False, Unknown.
optional string status = 2;
// The last time this condition was updated.
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
// Last time the condition transitioned from one status to another.
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
// The reason for the condition's last transition.
optional string reason = 4;
// A human readable message indicating details about the transition.
optional string message = 5;
}
// DeploymentList is a list of Deployments.
message DeploymentList {
// Standard list metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of Deployments.
repeated Deployment items = 2;
}
// DeploymentSpec is the specification of the desired behavior of the Deployment.
message DeploymentSpec {
// Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1.
// +optional
optional int32 replicas = 1;
// Label selector for pods. Existing ReplicaSets whose pods are
// selected by this will be the ones affected by this deployment.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
// Template describes the pods that will be created.
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
// The deployment strategy to use to replace existing pods with new ones.
// +optional
optional DeploymentStrategy strategy = 4;
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
optional int32 minReadySeconds = 5;
// The number of old ReplicaSets to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
optional int32 revisionHistoryLimit = 6;
// Indicates that the deployment is paused.
// +optional
optional bool paused = 7;
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. Defaults to 600s.
optional int32 progressDeadlineSeconds = 9;
}
// DeploymentStatus is the most recently observed status of the Deployment.
message DeploymentStatus {
// The generation observed by the deployment controller.
// +optional
optional int64 observedGeneration = 1;
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
// +optional
optional int32 replicas = 2;
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
// +optional
optional int32 updatedReplicas = 3;
// Total number of ready pods targeted by this deployment.
// +optional
optional int32 readyReplicas = 7;
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
optional int32 availableReplicas = 4;
// Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
optional int32 unavailableReplicas = 5;
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
repeated DeploymentCondition conditions = 6;
// Count of hash collisions for the Deployment. The Deployment controller uses this
// field as a collision avoidance mechanism when it needs to create the name for the
// newest ReplicaSet.
// +optional
optional int32 collisionCount = 8;
}
// DeploymentStrategy describes how to replace existing pods with new ones.
message DeploymentStrategy {
// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
// +optional
optional string type = 1;
// Rolling update config params. Present only if DeploymentStrategyType =
// RollingUpdate.
// ---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be.
// +optional
optional RollingUpdateDeployment rollingUpdate = 2;
}
// ReplicaSet represents the configuration of a ReplicaSet.
message ReplicaSet {
// If the Labels of a ReplicaSet are empty, they are defaulted to
// be the same as the Pod(s) that the ReplicaSet manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines the specification of the desired behavior of the ReplicaSet.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional ReplicaSetSpec spec = 2;
// Status is the most recently observed status of the ReplicaSet.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional ReplicaSetStatus status = 3;
}
// ReplicaSetCondition describes the state of a replica set at a certain point.
message ReplicaSetCondition {
// Type of replica set condition.
optional string type = 1;
// Status of the condition, one of True, False, Unknown.
optional string status = 2;
// The last time the condition transitioned from one status to another.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
// The reason for the condition's last transition.
// +optional
optional string reason = 4;
// A human readable message indicating details about the transition.
// +optional
optional string message = 5;
}
// ReplicaSetList is a collection of ReplicaSets.
message ReplicaSetList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of ReplicaSets.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
repeated ReplicaSet items = 2;
}
// ReplicaSetSpec is the specification of a ReplicaSet.
message ReplicaSetSpec {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
// +optional
optional int32 replicas = 1;
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
optional int32 minReadySeconds = 4;
// Selector is a label query over pods that should match the replica count.
// If the selector is empty, it is defaulted to the labels present on the pod template.
// Label keys and values that must match in order to be controlled by this replica set.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
message ReplicaSetStatus {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
optional int32 replicas = 1;
// The number of pods that have labels matching the labels of the pod template of the replicaset.
// +optional
optional int32 fullyLabeledReplicas = 2;
// The number of ready replicas for this replica set.
// +optional
optional int32 readyReplicas = 4;
// The number of available replicas (ready for at least minReadySeconds) for this replica set.
// +optional
optional int32 availableReplicas = 5;
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
optional int64 observedGeneration = 3;
// Represents the latest available observations of a replica set's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
repeated ReplicaSetCondition conditions = 6;
}
// Spec to control the desired behavior of daemon set rolling update.
message RollingUpdateDaemonSet {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
// This cannot be 0.
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their pods stopped for an update at any given
// time. The update starts by stopping at most 30% of those DaemonSet pods
// and then brings up new DaemonSet pods in their place. Once the new pods
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
// that at least 70% of original number of DaemonSet pods are available at
// all times during the update.
// +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
}
// Spec to control the desired behavior of rolling update.
message RollingUpdateDeployment {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding down.
// This can not be 0 if MaxSurge is 0.
// Defaults to 25%.
// Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
// immediately when the rolling update starts. Once new pods are ready, old RC
// can be scaled down further, followed by scaling up the new RC, ensuring
// that the total number of pods available at all times during the update is at
// least 70% of desired pods.
// +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
// The maximum number of pods that can be scheduled above the desired number of
// pods.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// This can not be 0 if MaxUnavailable is 0.
// Absolute number is calculated from percentage by rounding up.
// Defaults to 25%.
// Example: when this is set to 30%, the new RC can be scaled up immediately when
// the rolling update starts, such that the total number of old and new pods do not exceed
// 130% of desired pods. Once old pods have been killed,
// new RC can be scaled up further, ensuring that total number of pods running
// at any time during the update is atmost 130% of desired pods.
// +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
}
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
message RollingUpdateStatefulSetStrategy {
// Partition indicates the ordinal at which the StatefulSet should be
// partitioned.
// Default value is 0.
// +optional
optional int32 partition = 1;
}
// Scale represents a scaling request for a resource.
message Scale {
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
optional ScaleSpec spec = 2;
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
// +optional
optional ScaleStatus status = 3;
}
// ScaleSpec describes the attributes of a scale subresource
message ScaleSpec {
// desired number of instances for the scaled object.
// +optional
optional int32 replicas = 1;
}
// ScaleStatus represents the current status of a scale subresource.
message ScaleStatus {
// actual number of observed instances of the scaled object.
optional int32 replicas = 1;
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
map<string, string> selector = 2;
// label selector for pods that should match the replicas count. This is a serializated
// version of both map-based and more expressive set-based selectors. This is done to
// avoid introspection in the clients. The string will be in the same format as the
// query-param syntax. If the target type only supports map-based selectors, both this
// field and map-based selector field are populated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
optional string targetSelector = 3;
}
// StatefulSet represents a set of pods with consistent identities.
// Identities are defined as:
// - Network: A single stable DNS and hostname.
// - Storage: As many VolumeClaims as requested.
// The StatefulSet guarantees that a given network identity will always
// map to the same storage identity.
message StatefulSet {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines the desired identities of pods in this set.
// +optional
optional StatefulSetSpec spec = 2;
// Status is the current status of Pods in this StatefulSet. This data
// may be out of date by some window of time.
// +optional
optional StatefulSetStatus status = 3;
}
// StatefulSetList is a collection of StatefulSets.
message StatefulSetList {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
repeated StatefulSet items = 2;
}
// A StatefulSetSpec is the specification of a StatefulSet.
message StatefulSetSpec {
// replicas is the desired number of replicas of the given Template.
// These are replicas in the sense that they are instantiations of the
// same Template, but individual replicas also have a consistent identity.
// If unspecified, defaults to 1.
// TODO: Consider a rename of this field.
// +optional
optional int32 replicas = 1;
// selector is a label query over pods that should match the replica count.
// If empty, defaulted to labels on the pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
// template is the object that describes the pod that will be created if
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
// will fulfill this Template, but have a unique identity from the rest
// of the StatefulSet.
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
// volumeClaimTemplates is a list of claims that pods are allowed to reference.
// The StatefulSet controller is responsible for mapping network identities to
// claims in a way that maintains the identity of a pod. Every claim in
// this list must have at least one matching (by name) volumeMount in one
// container in the template. A claim in this list takes precedence over
// any volumes in the template, with the same name.
// TODO: Define the behavior if a claim already exists with the same name.
// +optional
repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
// serviceName is the name of the service that governs this StatefulSet.
// This service must exist before the StatefulSet, and is responsible for
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
optional string serviceName = 5;
// podManagementPolicy controls how pods are created during initial scale up,
// when replacing pods on nodes, or when scaling down. The default policy is
// `OrderedReady`, where pods are created in increasing order (pod-0, then
// pod-1, etc) and the controller will wait until each pod is ready before
// continuing. When scaling down, the pods are removed in the opposite order.
// The alternative policy is `Parallel` which will create pods in parallel
// to match the desired scale without waiting, and on scale down will delete
// all pods at once.
// +optional
optional string podManagementPolicy = 6;
// updateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
optional StatefulSetUpdateStrategy updateStrategy = 7;
// revisionHistoryLimit is the maximum number of revisions that will
// be maintained in the StatefulSet's revision history. The revision history
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
optional int32 revisionHistoryLimit = 8;
}
// StatefulSetStatus represents the current state of a StatefulSet.
message StatefulSetStatus {
// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
// StatefulSet's generation, which is updated on mutation by the API Server.
// +optional
optional int64 observedGeneration = 1;
// replicas is the number of Pods created by the StatefulSet controller.
optional int32 replicas = 2;
// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
optional int32 readyReplicas = 3;
// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
// indicated by currentRevision.
optional int32 currentReplicas = 4;
// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
// indicated by updateRevision.
optional int32 updatedReplicas = 5;
// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
// sequence [0,currentReplicas).
optional string currentRevision = 6;
// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
// [replicas-updatedReplicas,replicas)
optional string updateRevision = 7;
// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
// uses this field as a collision avoidance mechanism when it needs to create the name for the
// newest ControllerRevision.
// +optional
optional int32 collisionCount = 9;
}
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
// controller will use to perform updates. It includes any additional parameters
// necessary to perform the update for the indicated strategy.
message StatefulSetUpdateStrategy {
// Type indicates the type of the StatefulSetUpdateStrategy.
// Default is RollingUpdate.
// +optional
optional string type = 1;
// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
// +optional
optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
}

View File

@ -0,0 +1,61 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Deployment{},
&DeploymentList{},
&Scale{},
&StatefulSet{},
&StatefulSetList{},
&DaemonSet{},
&DaemonSetList{},
&ReplicaSet{},
&ReplicaSetList{},
&ControllerRevision{},
&ControllerRevisionList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,823 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
ControllerRevisionHashLabelKey = "controller-revision-hash"
StatefulSetRevisionLabel = ControllerRevisionHashLabelKey
DeprecatedRollbackTo = "deprecated.deployment.rollback.to"
DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation"
)
// ScaleSpec describes the attributes of a scale subresource
type ScaleSpec struct {
// desired number of instances for the scaled object.
// +optional
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
// ScaleStatus represents the current status of a scale subresource.
type ScaleStatus struct {
// actual number of observed instances of the scaled object.
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// label selector for pods that should match the replicas count. This is a serializated
// version of both map-based and more expressive set-based selectors. This is done to
// avoid introspection in the clients. The string will be in the same format as the
// query-param syntax. If the target type only supports map-based selectors, both this
// field and map-based selector field are populated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
}
// +genclient
// +genclient:noVerbs
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Scale represents a scaling request for a resource.
type Scale struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
// +optional
Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StatefulSet represents a set of pods with consistent identities.
// Identities are defined as:
// - Network: A single stable DNS and hostname.
// - Storage: As many VolumeClaims as requested.
// The StatefulSet guarantees that a given network identity will always
// map to the same storage identity.
type StatefulSet struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired identities of pods in this set.
// +optional
Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the current status of Pods in this StatefulSet. This data
// may be out of date by some window of time.
// +optional
Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PodManagementPolicyType defines the policy for creating pods under a stateful set.
type PodManagementPolicyType string
const (
// OrderedReadyPodManagement will create pods in strictly increasing order on
// scale up and strictly decreasing order on scale down, progressing only when
// the previous pod is ready or terminated. At most one pod will be changed
// at any time.
OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
// ParallelPodManagement will create and delete pods as soon as the stateful set
// replica count is changed, and will not wait for pods to be ready or complete
// termination.
ParallelPodManagement = "Parallel"
)
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
// controller will use to perform updates. It includes any additional parameters
// necessary to perform the update for the indicated strategy.
type StatefulSetUpdateStrategy struct {
// Type indicates the type of the StatefulSetUpdateStrategy.
// Default is RollingUpdate.
// +optional
Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
// +optional
RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
}
// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
// all possible update strategies for the StatefulSet controller.
type StatefulSetUpdateStrategyType string
const (
// RollingUpdateStatefulSetStrategyType indicates that update will be
// applied to all Pods in the StatefulSet with respect to the StatefulSet
// ordering constraints. When a scale operation is performed with this
// strategy, new Pods will be created from the specification version indicated
// by the StatefulSet's updateRevision.
RollingUpdateStatefulSetStrategyType = "RollingUpdate"
// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
// tracking and ordered rolling restarts are disabled. Pods are recreated
// from the StatefulSetSpec when they are manually deleted. When a scale
// operation is performed with this strategy,specification version indicated
// by the StatefulSet's currentRevision.
OnDeleteStatefulSetStrategyType = "OnDelete"
)
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
type RollingUpdateStatefulSetStrategy struct {
// Partition indicates the ordinal at which the StatefulSet should be
// partitioned.
// Default value is 0.
// +optional
Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
}
// A StatefulSetSpec is the specification of a StatefulSet.
type StatefulSetSpec struct {
// replicas is the desired number of replicas of the given Template.
// These are replicas in the sense that they are instantiations of the
// same Template, but individual replicas also have a consistent identity.
// If unspecified, defaults to 1.
// TODO: Consider a rename of this field.
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// selector is a label query over pods that should match the replica count.
// If empty, defaulted to labels on the pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
// template is the object that describes the pod that will be created if
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
// will fulfill this Template, but have a unique identity from the rest
// of the StatefulSet.
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
// volumeClaimTemplates is a list of claims that pods are allowed to reference.
// The StatefulSet controller is responsible for mapping network identities to
// claims in a way that maintains the identity of a pod. Every claim in
// this list must have at least one matching (by name) volumeMount in one
// container in the template. A claim in this list takes precedence over
// any volumes in the template, with the same name.
// TODO: Define the behavior if a claim already exists with the same name.
// +optional
VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
// serviceName is the name of the service that governs this StatefulSet.
// This service must exist before the StatefulSet, and is responsible for
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
// podManagementPolicy controls how pods are created during initial scale up,
// when replacing pods on nodes, or when scaling down. The default policy is
// `OrderedReady`, where pods are created in increasing order (pod-0, then
// pod-1, etc) and the controller will wait until each pod is ready before
// continuing. When scaling down, the pods are removed in the opposite order.
// The alternative policy is `Parallel` which will create pods in parallel
// to match the desired scale without waiting, and on scale down will delete
// all pods at once.
// +optional
PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
// updateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
// revisionHistoryLimit is the maximum number of revisions that will
// be maintained in the StatefulSet's revision history. The revision history
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
}
// StatefulSetStatus represents the current state of a StatefulSet.
type StatefulSetStatus struct {
// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
// StatefulSet's generation, which is updated on mutation by the API Server.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
// replicas is the number of Pods created by the StatefulSet controller.
Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
// indicated by currentRevision.
CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
// indicated by updateRevision.
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
// sequence [0,currentReplicas).
CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
// [replicas-updatedReplicas,replicas)
UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
// uses this field as a collision avoidance mechanism when it needs to create the name for the
// newest ControllerRevision.
// +optional
CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StatefulSetList is a collection of StatefulSets.
type StatefulSetList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Deployment enables declarative updates for Pods and ReplicaSets.
type Deployment struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the Deployment.
// +optional
Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the Deployment.
// +optional
Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// DeploymentSpec is the specification of the desired behavior of the Deployment.
type DeploymentSpec struct {
// Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1.
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Label selector for pods. Existing ReplicaSets whose pods are
// selected by this will be the ones affected by this deployment.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
// Template describes the pods that will be created.
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
// The deployment strategy to use to replace existing pods with new ones.
// +optional
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
// The number of old ReplicaSets to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
// Indicates that the deployment is paused.
// +optional
Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. Defaults to 600s.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
}
const (
// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
// to existing RCs (and label key that is added to its pods) to prevent the existing RCs
// to select new pods (and old pods being select by new RC).
DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
)
// DeploymentStrategy describes how to replace existing pods with new ones.
type DeploymentStrategy struct {
// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
// +optional
Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
// Rolling update config params. Present only if DeploymentStrategyType =
// RollingUpdate.
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be.
// +optional
RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
}
type DeploymentStrategyType string
const (
// Kill all existing pods before creating new ones.
RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
// Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
)
// Spec to control the desired behavior of rolling update.
type RollingUpdateDeployment struct {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding down.
// This can not be 0 if MaxSurge is 0.
// Defaults to 25%.
// Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
// immediately when the rolling update starts. Once new pods are ready, old RC
// can be scaled down further, followed by scaling up the new RC, ensuring
// that the total number of pods available at all times during the update is at
// least 70% of desired pods.
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
// The maximum number of pods that can be scheduled above the desired number of
// pods.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// This can not be 0 if MaxUnavailable is 0.
// Absolute number is calculated from percentage by rounding up.
// Defaults to 25%.
// Example: when this is set to 30%, the new RC can be scaled up immediately when
// the rolling update starts, such that the total number of old and new pods do not exceed
// 130% of desired pods. Once old pods have been killed,
// new RC can be scaled up further, ensuring that total number of pods running
// at any time during the update is atmost 130% of desired pods.
// +optional
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
}
// DeploymentStatus is the most recently observed status of the Deployment.
type DeploymentStatus struct {
// The generation observed by the deployment controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
// Total number of ready pods targeted by this deployment.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
// Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
// Count of hash collisions for the Deployment. The Deployment controller uses this
// field as a collision avoidance mechanism when it needs to create the name for the
// newest ReplicaSet.
// +optional
CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
}
type DeploymentConditionType string
// These are valid conditions of a deployment.
const (
// Available means the deployment is available, ie. at least the minimum available
// replicas required are up and running for at least minReadySeconds.
DeploymentAvailable DeploymentConditionType = "Available"
// Progressing means the deployment is progressing. Progress for a deployment is
// considered when a new replica set is created or adopted, and when new pods scale
// up or old pods scale down. Progress is not estimated for paused deployments or
// when progressDeadlineSeconds is not specified.
DeploymentProgressing DeploymentConditionType = "Progressing"
// ReplicaFailure is added in a deployment when one of its pods fails to be created
// or deleted.
DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
)
// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentCondition struct {
// Type of deployment condition.
Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
// The last time this condition was updated.
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeploymentList is a list of Deployments.
type DeploymentList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Deployments.
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
// +optional
Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
// Rolling update config params. Present only if type = "RollingUpdate".
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as Deployment `strategy.rollingUpdate`.
// See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
}
type DaemonSetUpdateStrategyType string
const (
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
// Replace the old daemons only when it's killed
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
)
// Spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
// This cannot be 0.
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their pods stopped for an update at any given
// time. The update starts by stopping at most 30% of those DaemonSet pods
// and then brings up new DaemonSet pods in their place. Once the new pods
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
// that at least 70% of original number of DaemonSet pods are available at
// all times during the update.
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
}
// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpec struct {
// A label query over pods that are managed by the daemon set.
// Must match in order to be controlled.
// If empty, defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
// An object that describes the pod that will be created.
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
// An update strategy to replace existing DaemonSet pods with new pods.
// +optional
UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
// The minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// The number of old history to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
}
// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatus struct {
// The number of nodes that are running at least 1
// daemon pod and are supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
// The number of nodes that are running the daemon pod, but are
// not supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
// The total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
// The number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
// The most recent generation observed by the daemon set controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
// The total number of nodes that are running updated daemon pod
// +optional
UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
// The number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least spec.minReadySeconds)
// +optional
NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
// The number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least spec.minReadySeconds)
// +optional
NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
// Count of hash collisions for the DaemonSet. The DaemonSet controller
// uses this field as a collision avoidance mechanism when it needs to
// create the name for the newest ControllerRevision.
// +optional
CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSet represents the configuration of a daemon set.
type DaemonSet struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The desired behavior of this daemon set.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// The current status of this daemon set. This data may be
// out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead.
// DaemonSetTemplateGenerationKey is the key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates
// during DaemonSet template update.
DaemonSetTemplateGenerationKey string = "pod-template-generation"
// DefaultDaemonSetUniqueLabelKey is the default label key that is added
// to existing DaemonSet pods to distinguish between old and new
// DaemonSet pods during DaemonSet template updates.
DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSetList is a collection of daemon sets.
type DaemonSetList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of daemon sets.
Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSet represents the configuration of a ReplicaSet.
type ReplicaSet struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicaSet are empty, they are defaulted to
// be the same as the Pod(s) that the ReplicaSet manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the ReplicaSet.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the ReplicaSet.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSetList is a collection of ReplicaSets.
type ReplicaSetList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ReplicaSets.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the replica count.
// If the selector is empty, it is defaulted to the labels present on the pod template.
// Label keys and values that must match in order to be controlled by this replica set.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replicaset.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replica set.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replica set.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replica set's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicaSetConditionType string
// These are valid conditions of a replica set.
const (
// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
// due to kubelet being down or finalizers are failing.
ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
)
// ReplicaSetCondition describes the state of a replica set at a certain point.
type ReplicaSetCondition struct {
// Type of replica set condition.
Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ControllerRevision implements an immutable snapshot of state data. Clients
// are responsible for serializing and deserializing the objects that contain
// their internal state.
// Once a ControllerRevision has been successfully created, it can not be updated.
// The API Server will fail validation of all requests that attempt to mutate
// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
// it may be subject to name and representation changes in future releases, and clients should not
// depend on its stability. It is primarily for internal use by controllers.
type ControllerRevision struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data is the serialized representation of the state.
Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
// Revision indicates the revision of the state represented by Data.
Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
type ControllerRevisionList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ControllerRevisions
Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
}

Some files were not shown because too many files have changed in this diff Show More