Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Vincenzo D'Amore 2015-09-14 15:36:35 +02:00
commit a2ea6ade59
296 changed files with 7389 additions and 2082 deletions

View File

@ -13171,7 +13171,11 @@
"properties": {
"reason": {
"type": "string",
"description": "(brief) reason the container is not yet running, such as pulling its image."
"description": "(brief) reason the container is not yet running."
},
"message": {
"type": "string",
"description": "Message regarding why the container is not yet running."
}
}
},
@ -13662,6 +13666,10 @@
"sessionAffinity": {
"type": "string",
"description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies"
},
"loadBalancerIP": {
"type": "string",
"description": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature."
}
}
},

View File

@ -835,6 +835,8 @@ function kube::release::package_full_tarball() {
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
cp "${KUBE_ROOT}/LICENSE" "${release_stage}/"
cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
mkdir -p "${release_stage}/contrib/completions/bash"
cp "${KUBE_ROOT}/contrib/completions/bash/kubectl" "${release_stage}/contrib/completions/bash"
kube::release::clean_cruft

View File

@ -43,17 +43,12 @@ For a regular service, this resolves to the port number and the CNAME:
`my-svc.my-namespace.svc.cluster.local`.
For a headless service, this resolves to multiple answers, one for each pod
that is backing the service, and contains the port number and a CNAME of the pod
with the format `auto-generated-name.my-svc.my-namespace.svc.cluster.local`
SRV records always contain the 'svc' segment in them and are not supported for
old-style CNAMEs where the 'svc' segment was omitted.
of the form `auto-generated-name.my-svc.my-namespace.svc.cluster.local`.
### Backwards compatibility
Previous versions of kube-dns made names of the for
`my-svc.my-namespace.cluster.local` (the 'svc' level was added later). For
compatibility, kube-dns supports both names for the time being. Users should
avoid creating a namespace named 'svc', to avoid conflicts. The old name
format is deprecated and will be removed in a future release.
`my-svc.my-namespace.cluster.local` (the 'svc' level was added later). This
is no longer supported.
## How do I find the DNS server?
The DNS server itself runs as a Kubernetes Service. This gives it a stable IP
@ -178,6 +173,11 @@ paths to the node's own DNS settings. If the node is able to resolve DNS names
specific to the larger environment, pods should be able to, also. See "Known
issues" below for a caveat.
If you don't want this, or if you want a different DNS config for pods, you can
use the kubelet's `--resolv-conf` flag. Setting it to "" means that pods will
not inherit DNS. Setting it to a valid file path means that kubelet will use
this file instead of `/etc/resolv.conf` for DNS inheritance.
## Known issues
Kubernetes installs do not configure the nodes' resolv.conf files to use the
cluster DNS by default, because that process is inherently distro-specific.
@ -190,7 +190,7 @@ consume 1 `nameserver` record and 3 `search` records. This means that if a
local installation already uses 3 `nameserver`s or uses more than 3 `search`es,
some of those settings will be lost. As a partial workaround, the node can run
`dnsmasq` which will provide more `nameserver` entries, but not more `search`
entries.
entries. You can also use kubelet's `--resolv-conf` flag.
## Making changes
Please observe the release process for making changes to the `kube2sky`

View File

@ -35,8 +35,8 @@ import (
"github.com/golang/glog"
skymsg "github.com/skynetservices/skydns/msg"
kapi "k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
kclient "k8s.io/kubernetes/pkg/client/unversioned"
kcache "k8s.io/kubernetes/pkg/client/unversioned/cache"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kframework "k8s.io/kubernetes/pkg/controller/framework"
kSelector "k8s.io/kubernetes/pkg/fields"

View File

@ -29,7 +29,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/client/cache"
)
type fakeEtcdClient struct {

View File

@ -118,45 +118,51 @@ else
# This is the best option, but it is sadly broken on most distros
# Bug: https://github.com/docker/docker/issues/4036
# 95% goes to the docker thin-pool
lvcreate -l 95%VG --thinpool docker-thinpool vg-ephemeral
# 80% goes to the docker thin-pool; we want to leave some space for host-volumes
lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral
DOCKER_OPTS="${DOCKER_OPTS} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool"
# Note that we don't move docker; docker goes direct to the thinpool
else
# Remaining space (20%) is for kubernetes data
# TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data?
lvcreate -l 100%FREE -n kubernetes vg-ephemeral
mkfs -t ext4 /dev/vg-ephemeral/kubernetes
mkdir -p /mnt/ephemeral/kubernetes
echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/kubernetes
move_kubelet="/mnt/ephemeral/kubernetes"
else
# aufs
# Create a docker lv, use docker on it
# 95% goes to the docker thin-pool
# We used to split docker & kubernetes, but we no longer do that, because
# host volumes go into the kubernetes area, and it is otherwise very easy
# to fill up small volumes.
release=`lsb_release -c -s`
if [[ "${release}" != "wheezy" ]] ; then
lvcreate -l 95%VG --thinpool docker-thinpool vg-ephemeral
lvcreate -l 100%FREE --thinpool pool-ephemeral vg-ephemeral
THINPOOL_SIZE=$(lvs vg-ephemeral/docker-thinpool -o LV_SIZE --noheadings --units M --nosuffix)
lvcreate -V${THINPOOL_SIZE}M -T vg-ephemeral/docker-thinpool -n docker
THINPOOL_SIZE=$(lvs vg-ephemeral/pool-ephemeral -o LV_SIZE --noheadings --units M --nosuffix)
lvcreate -V${THINPOOL_SIZE}M -T vg-ephemeral/pool-ephemeral -n ephemeral
else
# Thin provisioning not supported by Wheezy
echo "Detected wheezy; won't use LVM thin provisioning"
lvcreate -l 95%VG -n docker vg-ephemeral
lvcreate -l 100%VG -n ephemeral vg-ephemeral
fi
mkfs -t ext4 /dev/vg-ephemeral/docker
mkdir -p /mnt/ephemeral/docker
echo "/dev/vg-ephemeral/docker /mnt/ephemeral/docker ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/docker
mkfs -t ext4 /dev/vg-ephemeral/ephemeral
mkdir -p /mnt/ephemeral
echo "/dev/vg-ephemeral/ephemeral /mnt/ephemeral ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral
mkdir -p /mnt/ephemeral/kubernetes
move_docker="/mnt/ephemeral"
fi
# Remaining 5% is for kubernetes data
# TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data?
lvcreate -l 100%FREE -n kubernetes vg-ephemeral
mkfs -t ext4 /dev/vg-ephemeral/kubernetes
mkdir -p /mnt/ephemeral/kubernetes
echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/kubernetes
move_kubelet="/mnt/ephemeral/kubernetes"
else
move_kubelet="/mnt/ephemeral/kubernetes"
fi
else
echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}"
fi
fi

View File

@ -44,6 +44,8 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
ENABLE_EXPERIMENTAL_API="${KUBE_ENABLE_EXPERIMENTAL_API:-false}"
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3
@ -87,7 +89,6 @@ CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Create autoscaler for cluster's nodes.
# NOT WORKING YET!
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
@ -95,6 +96,13 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi
# Optional: Enable feature for autoscaling number of pods
# Experimental feature, not ready for production use.
ENABLE_HORIZONTAL_POD_AUTOSCALER="${KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER:-false}"
if [[ "${ENABLE_HORIZONTAL_POD_AUTOSCALER}" == "true" ]]; then
ENABLE_EXPERIMENTAL_API=true
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@ -45,6 +45,9 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
ENABLE_EXPERIMENTAL_API="${KUBE_ENABLE_EXPERIMENTAL_API:-false}"
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
@ -59,7 +62,10 @@ TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL"
APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
# pod-eviction-timeout is currently 2 * node-monitor-grace-period to allow for some network
# problems, but don't ensure that the Kubelet can be restarted without evicting Pods. We don't
# think it's necessary for tests.
CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL} --pod-eviction-timeout=1m20s"
SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
@ -92,7 +98,6 @@ CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Create autoscaler for cluster's nodes.
# NOT WORKING YET!
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
@ -100,6 +105,13 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi
# Optional: Enable feature for autoscaling number of pods
# Experimental feature, not ready for production use.
ENABLE_HORIZONTAL_POD_AUTOSCALER="${KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER:-false}"
if [[ "${ENABLE_HORIZONTAL_POD_AUTOSCALER}" == "true" ]]; then
ENABLE_EXPERIMENTAL_API=true
fi
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.

View File

@ -310,6 +310,11 @@ EOF
cluster_registry_disk_type: gce
cluster_registry_disk_size: $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE})
cluster_registry_disk_name: ${CLUSTER_REGISTRY_DISK}
EOF
fi
if [ -n "${ENABLE_HORIZONTAL_POD_AUTOSCALER:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_horizontal_pod_autoscaler: '$(echo "$ENABLE_HORIZONTAL_POD_AUTOSCALER" | sed -e "s/'/''/g")'
EOF
fi
}
@ -568,6 +573,11 @@ EOF
# CIDR range.
cat <<EOF >>/etc/salt/minion.d/grains.conf
cbr-cidr: ${MASTER_IP_RANGE}
EOF
fi
if [[ ! -z "${RUNTIME_CONFIG:-}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
EOF
fi
}

View File

@ -54,6 +54,8 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
ENABLE_HORIZONTAL_POD_AUTOSCALER: $(yaml-quote ${ENABLE_HORIZONTAL_POD_AUTOSCALER})
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME})
RKT_VERSION: $(yaml-quote ${RKT_VERSION})

View File

@ -51,6 +51,8 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
ENABLE_HORIZONTAL_POD_AUTOSCALER: $(yaml-quote ${ENABLE_HORIZONTAL_POD_AUTOSCALER})
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})

View File

@ -53,6 +53,18 @@ function join_csv {
# Verify prereqs
function verify-prereqs {
if [[ "${ENABLE_EXPERIMENTAL_API}" == "true" ]]; then
if [[ -z "${RUNTIME_CONFIG}" ]]; then
RUNTIME_CONFIG="experimental/v1=true"
else
# TODO: add checking if RUNTIME_CONFIG contains "experimental/v1=false" and appending "experimental/v1=true" if not.
if echo "${RUNTIME_CONFIG}" | grep -q -v "experimental/v1=true"; then
echo "Experimental API should be turned on, but is not turned on in RUNTIME_CONFIG!"
exit 1
fi
fi
fi
local cmd
for cmd in gcloud gsutil; do
if ! which "${cmd}" >/dev/null; then
@ -465,6 +477,7 @@ function write-master-env {
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
KUBELET_APISERVER="${MASTER_NAME}"
fi
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
}

View File

@ -25,7 +25,7 @@ NETWORK="${NETWORK:-default}"
NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}"
FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}"
GCLOUD="${GCLOUD:-gcloud}"
CMD_GROUP="${CMD_GROUP:-beta}"
CMD_GROUP="${CMD_GROUP:-}"
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}"
MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-1}"

View File

@ -99,7 +99,7 @@ function verify-prereqs() {
sudo_prefix="sudo"
fi
${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update "${CMD_GROUP}"|| true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update ${CMD_GROUP:-} || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update kubectl|| true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true
}
@ -150,7 +150,7 @@ function kube-up() {
)
# Bring up the cluster.
"${GCLOUD}" "${CMD_GROUP}" container clusters create "${CLUSTER_NAME}" "${create_args[@]}"
"${GCLOUD}" ${CMD_GROUP:-} container clusters create "${CLUSTER_NAME}" "${create_args[@]}"
}
# Execute prior to running tests to initialize required structure. This is
@ -200,7 +200,7 @@ function test-setup() {
function detect-master() {
echo "... in gke:detect-master()" >&2
detect-project >&2
KUBE_MASTER_IP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
KUBE_MASTER_IP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep endpoint | cut -f 2 -d ' ')
}
@ -242,7 +242,7 @@ function detect-minion-names {
# NODE_INSTANCE_GROUP
function detect-node-instance-group {
echo "... in gke:detect-node-instance-group()" >&2
NODE_INSTANCE_GROUP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
NODE_INSTANCE_GROUP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep instanceGroupManagers | cut -d '/' -f 11)
}
@ -318,6 +318,6 @@ function test-teardown() {
function kube-down() {
echo "... in gke:kube-down()" >&2
detect-project >&2
"${GCLOUD}" "${CMD_GROUP}" container clusters delete --project="${PROJECT}" \
"${GCLOUD}" ${CMD_GROUP:-} container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}" --quiet
}

View File

@ -84,10 +84,10 @@ net.ipv4.ip_forward:
#
# To change:
#
# 1. Find new deb name with:
# curl https://get.docker.com/ubuntu/dists/docker/main/binary-amd64/Packages
# 1. Find new deb name at:
# http://apt.dockerproject.org/repo/pool/main/d/docker-engine
# 2. Download based on that:
# curl -O https://get.docker.com/ubuntu/pool/main/<...>
# curl -O http://apt.dockerproject.org/repo/pool/main/d/docker-engine/<deb>
# 3. Upload to GCS:
# gsutil cp <deb> gs://kubernetes-release/docker/<deb>
# 4. Make it world readable:
@ -99,16 +99,22 @@ net.ipv4.ip_forward:
{% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %}
# Only upgrade Docker to 1.8.2 for the containerVM image.
# TODO(dchen1107): For release 1.1, we want to update the ContainerVM image to
# include Docker 1.8.2 and comment out the upgrade below.
{% if grains.get('cloud', '') == 'gce'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'wheezy' -%}
{% set docker_pkg_name='docker-engine' %}
{% set override_deb='docker-engine_1.8.2-0~wheezy_amd64.deb' %}
{% set override_deb_sha1='dcff80bffcbde458508da58d2a9fe7bef8eed404' %}
{% set override_docker_ver='1.8.2-0~wheezy' %}
{% else %}
{% set docker_pkg_name='lxc-docker-1.7.1' %}
{% set override_docker_ver='1.7.1' %}
{% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %}
{% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %}
{% set override_docker_ver='1.7.1' %}
# Comment out below logic for master branch, so that we can upgrade GCE cluster
# to docker 1.7.1 by default.
#
# TODO(dchen1107): For release 1.1, we want to fall back to
# ContainerVM installed docker by set override_deb, override_deb_sha1 and
# override_docker_ver back to '' for gce cloud provider.
{% endif %}
{% if override_docker_ver != '' %}
purge-old-docker-package:
@ -135,10 +141,10 @@ purge-old-docker-package:
- mode: 644
- makedirs: true
lxc-docker-{{ override_docker_ver }}:
docker-upgrade:
pkg.installed:
- sources:
- lxc-docker-{{ override_docker_ver }}: /var/cache/docker-install/{{ override_deb }}
- {{ docker_pkg_name }}: /var/cache/docker-install/{{ override_deb }}
- require:
- file: /var/cache/docker-install/{{ override_deb }}
{% endif %} # end override_docker_ver != ''
@ -168,7 +174,7 @@ fix-service-docker:
- file: {{ environment_file }}
{% if override_docker_ver != '' %}
- require:
- pkg: lxc-docker-{{ override_docker_ver }}
- pkg: {{ docker_pkg_name }}-{{ override_docker_ver }}
{% endif %}
{% endif %}
@ -187,13 +193,13 @@ docker:
- watch:
- file: {{ environment_file }}
{% if override_docker_ver != '' %}
- pkg: lxc-docker-{{ override_docker_ver }}
- pkg: docker-upgrade
{% endif %}
{% if pillar.get('is_systemd') %}
- file: {{ pillar.get('systemd_system_path') }}/docker.service
{% endif %}
{% if override_docker_ver != '' %}
- require:
- pkg: lxc-docker-{{ override_docker_ver }}
- pkg: docker-upgrade
{% endif %}
{% endif %} # end grains.os_family != 'RedHat'

View File

@ -1,6 +1,7 @@
{% set cluster_name = "" -%}
{% set cluster_cidr = "" -%}
{% set allocate_node_cidrs = "" -%}
{% set enable_horizontal_pod_autoscaler = "" -%}
{% if pillar['instance_prefix'] is defined -%}
{% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%}
@ -11,6 +12,9 @@
{% if pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
{% if pillar['enable_horizontal_pod_autoscaler'] is defined -%}
{% set enable_horizontal_pod_autoscaler = "--enable-horizontal-pod-autoscaler=" + pillar['enable_horizontal_pod_autoscaler'] -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% set cloud_config = "" -%}
@ -34,7 +38,7 @@
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + enable_horizontal_pod_autoscaler + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['controller_manager_test_args'] is defined -%}

View File

@ -32,7 +32,6 @@ mkdir -p binaries/minion
# flannel
echo "Download flannel release ..."
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.4.0"}
echo "Flannel version is $FLANNEL_VERSION"
if [ ! -f flannel.tar.gz ] ; then
curl -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz
tar xzf flannel.tar.gz
@ -54,10 +53,10 @@ cp $ETCD/etcd $ETCD/etcdctl binaries/master
# k8s
echo "Download kubernetes release ..."
K8S_VERSION=${K8S_VERSION:-"1.0.3"}
KUBE_VERSION=${KUBE_VERSION:-"1.0.3"}
if [ ! -f kubernetes.tar.gz ] ; then
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${KUBE_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
tar xzf kubernetes.tar.gz
fi
pushd kubernetes/server

View File

@ -19,8 +19,6 @@ set -e
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
# use an array to record name and ip
declare -A mm
MASTER=""
MASTER_IP=""
MINION_IPS=""
@ -443,24 +441,42 @@ function prepare-push() {
echo "Upgrading nodes to local binaries is not yet supported.Please specify the version"
exit 1
fi
# Run build.sh to get the latest release
source "${KUBE_ROOT}/cluster/ubuntu/build.sh"
# Run build.sh to get the required release
pushd ubuntu
source "build.sh"
popd
}
# Update a kubernetes master with latest release
# Update a kubernetes master with required release
function push-master {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
setClusterInfo
ii=0
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then
echo "Cleaning on master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true
if [[ "${roles[${ii}]}" == "a" ]]; then
echo "Cleaning master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-master
elif [[ "${roles[${ii}]}" == "ai" ]]; then
echo "Cleaning master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-masterandminion
elif [[ "${roles[${ii}]}" == "i" ]]; then
((ii=ii+1))
continue
else
echo "unsupported role for ${i}. please check"
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
@ -468,41 +484,76 @@ function push-master {
verify-cluster
}
# Update a kubernetes node with latest release
# Update a kubernetes node with required release
function push-node() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
node=${1}
node_ip=${1}
setClusterInfo
ii=0
existing=false
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "ai" && $i == *$node ]]; then
echo "Cleaning on node ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true
if [[ "${roles[${ii}]}" == "i" && ${i#*@} == $node_ip ]]; then
echo "Cleaning node ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service flanneld stop;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-minion $i
existing=true
elif [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]] && [[ ${i#*@} == $node_ip ]]; then
echo "${i} is master node, please try ./kube-push -m instead"
existing=true
elif [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then
((ii=ii+1))
continue
else
echo "unsupported role for ${i}, or nodes ${i} don't exist. please check"
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
if [[ "${existing}" == false ]]; then
echo "node ${node_ip} does not exist"
else
verify-cluster
fi
}
# Update a kubernetes cluster with latest source
function kube-push {
# Update a kubernetes cluster with required source
function kube-push {
prepare-push
#stop all the kube's process & etcd
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
#stop all the kube's process & etcd
ii=0
for i in ${nodes}; do
echo "Cleaning on node ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop all process: " service etcd stop' || true
ssh -t $i 'rm -f /opt/bin/kube* /etc/init/kube* /etc/init.d/kube* /etc/default/kube*; rm -rf ~/kube' || true
{
echo "Cleaning on node ${i#*@}"
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd' || true
elif [[ "${roles[${ii}]}" == "i" ]]; then
ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password for stopping flanneld: " service flanneld stop' || true
else
echo "unsupported role for ${i}"
fi
ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
}
((ii=ii+1))
done
#Update all nodes with the lasted release
#Update all nodes with the required release
if [[ ! -f "ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no latest release of kubernetes,please check first"
echo "There is no required release of kubernetes, please check first"
exit 1
fi
#provision all nodes,include master&nodes
setClusterInfo
ii=0

View File

@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/expapi"
_ "k8s.io/kubernetes/pkg/expapi/v1"
_ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/apis/experimental/v1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
@ -44,6 +44,24 @@ var (
groupVersion = flag.StringP("version", "v", "api/v1", "groupPath/version for conversion.")
)
// We're moving to pkg/apis/group/version. This handles new and legacy packages.
func pkgPath(group, version string) string {
if group == "" {
group = "api"
}
gv := group
if version != "" {
gv = path.Join(group, version)
}
switch {
case group == "api":
// TODO(lavalamp): remove this special case when we move api to apis/api
return path.Join(pkgBase, gv)
default:
return path.Join(pkgBase, "apis", gv)
}
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
flag.Parse()
@ -70,14 +88,14 @@ func main() {
glog.Fatalf("error writing package line: %v", err)
}
versionPath := path.Join(pkgBase, group, version)
versionPath := pkgPath(group, version)
generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), versionPath)
apiShort := generator.AddImport(path.Join(pkgBase, "api"))
generator.AddImport(path.Join(pkgBase, "api/resource"))
// TODO(wojtek-t): Change the overwrites to a flag.
generator.OverwritePackage(version, "")
for _, knownType := range api.Scheme.KnownTypes(version) {
if !strings.HasPrefix(knownType.PkgPath(), versionPath) {
if knownType.PkgPath() != versionPath {
continue
}
if err := generator.GenerateConversionsForType(version, knownType); err != nil {

View File

@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/expapi"
_ "k8s.io/kubernetes/pkg/expapi/v1"
_ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/apis/experimental/v1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
@ -45,6 +45,32 @@ var (
overwrites = flag.StringP("overwrites", "o", "", "Comma-separated overwrites for package names")
)
// types inside the api package don't need to say "api.Scheme"; all others do.
func destScheme(group, version string) string {
if group == "api" && version == "" {
return "Scheme"
}
return "api.Scheme"
}
// We're moving to pkg/apis/group/version. This handles new and legacy packages.
func pkgPath(group, version string) string {
if group == "" {
group = "api"
}
gv := group
if version != "" {
gv = path.Join(group, version)
}
switch {
case group == "api":
// TODO(lavalamp): remove this special case when we move api to apis/api
return path.Join(pkgBase, gv)
default:
return path.Join(pkgBase, "apis", gv)
}
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
flag.Parse()
@ -65,10 +91,7 @@ func main() {
group, version := path.Split(*groupVersion)
group = strings.TrimRight(group, "/")
registerTo := "api.Scheme"
if *groupVersion == "api/" {
registerTo = "Scheme"
}
registerTo := destScheme(group, version)
pkgname := group
if len(version) != 0 {
pkgname = version
@ -79,7 +102,7 @@ func main() {
glog.Fatalf("error writing package line: %v", err)
}
versionPath := path.Join(pkgBase, group, version)
versionPath := pkgPath(group, version)
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, sets.NewString("k8s.io/kubernetes"))
generator.AddImport(path.Join(pkgBase, "api"))
@ -93,7 +116,7 @@ func main() {
}
}
for _, knownType := range api.Scheme.KnownTypes(version) {
if !strings.HasPrefix(knownType.PkgPath(), versionPath) {
if knownType.PkgPath() != versionPath {
continue
}
if err := generator.AddType(knownType); err != nil {

View File

@ -39,13 +39,13 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/testapi"
explatest "k8s.io/kubernetes/pkg/apis/experimental/latest"
"k8s.io/kubernetes/pkg/apiserver"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/node"
replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication"
explatest "k8s.io/kubernetes/pkg/expapi/latest"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"

View File

@ -35,11 +35,11 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/meta"
explatest "k8s.io/kubernetes/pkg/apis/experimental/latest"
"k8s.io/kubernetes/pkg/apiserver"
"k8s.io/kubernetes/pkg/capabilities"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/cloudprovider"
explatest "k8s.io/kubernetes/pkg/expapi/latest"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/storage"

View File

@ -35,12 +35,13 @@ import (
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/autoscaler"
"k8s.io/kubernetes/pkg/controller/autoscaler/metrics"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/namespace"
"k8s.io/kubernetes/pkg/controller/node"
"k8s.io/kubernetes/pkg/controller/persistentvolume"
"k8s.io/kubernetes/pkg/controller/podautoscaler"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/controller/resourcequota"
"k8s.io/kubernetes/pkg/controller/route"
@ -63,6 +64,7 @@ type CMServer struct {
CloudConfigFile string
ConcurrentEndpointSyncs int
ConcurrentRCSyncs int
ConcurrentDSCSyncs int
ServiceSyncPeriod time.Duration
NodeSyncPeriod time.Duration
ResourceQuotaSyncPeriod time.Duration
@ -98,6 +100,7 @@ func NewCMServer() *CMServer {
Address: net.ParseIP("127.0.0.1"),
ConcurrentEndpointSyncs: 5,
ConcurrentRCSyncs: 5,
ConcurrentDSCSyncs: 2,
ServiceSyncPeriod: 5 * time.Minute,
NodeSyncPeriod: 10 * time.Second,
ResourceQuotaSyncPeriod: 10 * time.Second,
@ -213,6 +216,9 @@ func (s *CMServer) Run(_ []string) error {
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
@ -248,7 +254,7 @@ func (s *CMServer) Run(_ []string) error {
namespaceController.Run()
if s.EnableHorizontalPodAutoscaler {
horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient))
horizontalPodAutoscalerController := podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient))
horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod)
}

View File

@ -27,10 +27,10 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/config"

View File

@ -35,11 +35,11 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/chaosclient"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/healthz"
"k8s.io/kubernetes/pkg/kubelet"
@ -119,6 +119,7 @@ type KubeletServer struct {
ResolverConfig string
ResourceContainer string
RktPath string
RktStage1Image string
RootDirectory string
RunOnce bool
StandaloneMode bool
@ -189,6 +190,7 @@ func NewKubeletServer() *KubeletServer {
RegistryBurst: 10,
ResourceContainer: "/kubelet",
RktPath: "",
RktStage1Image: "",
RootDirectory: defaultRootDir,
SyncFrequency: 10 * time.Second,
SystemContainer: "",
@ -254,6 +256,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'")
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used")
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
fs.IntVar(&s.MaxPods, "max-pods", 40, "Number of Pods that can run on this Kubelet.")
@ -364,6 +367,7 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) {
ResolverConfig: s.ResolverConfig,
ResourceContainer: s.ResourceContainer,
RktPath: s.RktPath,
RktStage1Image: s.RktStage1Image,
RootDirectory: s.RootDirectory,
Runonce: s.RunOnce,
StandaloneMode: (len(s.APIServerList) == 0),
@ -789,6 +793,7 @@ type KubeletConfig struct {
ResolverConfig string
ResourceContainer string
RktPath string
RktStage1Image string
RootDirectory string
Runonce bool
StandaloneMode bool
@ -851,6 +856,7 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.CgroupRoot,
kc.ContainerRuntime,
kc.RktPath,
kc.RktStage1Image,
kc.Mounter,
kc.DockerDaemonContainer,
kc.SystemContainer,

View File

@ -359,6 +359,7 @@ _kubectl_create()
flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--schema-cache-dir=")
flags+=("--validate")
must_have_one_flag=()
@ -388,6 +389,7 @@ _kubectl_replace()
flags+=("--grace-period=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--schema-cache-dir=")
flags+=("--timeout=")
flags+=("--validate")
@ -534,6 +536,7 @@ _kubectl_rolling-update()
flags+=("--output-version=")
flags+=("--poll-interval=")
flags+=("--rollback")
flags+=("--schema-cache-dir=")
flags+=("--show-all")
flags+=("-a")
flags+=("--sort-by=")
@ -687,6 +690,7 @@ _kubectl_run()
flags+=("--image=")
flags+=("--labels=")
two_word_flags+=("-l")
flags+=("--limits=")
flags+=("--no-headers")
flags+=("--output=")
two_word_flags+=("-o")
@ -695,6 +699,7 @@ _kubectl_run()
flags+=("--port=")
flags+=("--replicas=")
two_word_flags+=("-r")
flags+=("--requests=")
flags+=("--restart=")
flags+=("--show-all")
flags+=("-a")
@ -762,6 +767,7 @@ _kubectl_expose()
flags+=("--generator=")
flags+=("--labels=")
two_word_flags+=("-l")
flags+=("--load-balancer-ip=")
flags+=("--name=")
flags+=("--no-headers")
flags+=("--output=")
@ -781,7 +787,6 @@ _kubectl_expose()
flags+=("--type=")
must_have_one_flag=()
must_have_one_flag+=("--port=")
must_have_one_noun=()
}

View File

@ -47,6 +47,7 @@ import (
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/controller/daemon"
)
// CMServer is the main context object for the controller manager.
@ -113,6 +114,9 @@ func (s *CMServer) Run(_ []string) error {
controllerManager := replicationcontroller.NewReplicationManager(kubeClient, replicationcontroller.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
//TODO(jdef) should eventually support more cloud providers here
if s.CloudProvider != mesos.ProviderName {
glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider)

View File

@ -37,8 +37,8 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/kubelet"
kconfig "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/dockertools"

View File

@ -325,6 +325,7 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
kc.CgroupRoot,
kc.ContainerRuntime,
kc.RktPath,
kc.RktStage1Image,
kc.Mounter,
kc.DockerDaemonContainer,
kc.SystemContainer,

View File

@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/proc"
"k8s.io/kubernetes/contrib/mesos/pkg/queue"
"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/util/sets"
)

View File

@ -19,7 +19,7 @@ package queue
import (
"time"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/client/cache"
)
type EventType int

View File

@ -35,9 +35,9 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util"
plugin "k8s.io/kubernetes/plugin/pkg/scheduler"

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/runtime"
kutil "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/watch"

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/queue"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/client/cache"
)
// wrapper for the k8s pod type so that we can define additional methods on a "pod"

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/endpoints"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
kservice "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"

View File

@ -222,7 +222,7 @@ you are doing [manual node administration](#manual-node-administration), then yo
capacity when adding a node.
The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It
checks that the sum of the limits of containers on the node is no greater than than the node capacity. It
checks that the sum of the limits of containers on the node is no greater than the node capacity. It
includes all containers started by kubelet, but not containers started directly by docker, nor
processes not in containers.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 94 KiB

View File

@ -60,7 +60,7 @@ Instead of a single Timestamp, each event object [contains](http://releases.k8s.
Each binary that generates events:
* Maintains a historical record of previously generated events:
* Implemented with ["Least Recently Used Cache"](https://github.com/golang/groupcache/blob/master/lru/lru.go) in [`pkg/client/unversioned/record/events_cache.go`](../../pkg/client/unversioned/record/events_cache.go).
* Implemented with ["Least Recently Used Cache"](https://github.com/golang/groupcache/blob/master/lru/lru.go) in [`pkg/client/record/events_cache.go`](../../pkg/client/record/events_cache.go).
* The key in the cache is generated from the event object minus timestamps/count/transient fields, specifically the following events fields are used to construct a unique key for an event:
* `event.Source.Component`
* `event.Source.Host`

View File

@ -38,7 +38,7 @@ with a number of existing API types and with the [API
conventions](api-conventions.md). If creating a new API
type/resource, we also recommend that you first send a PR containing
just a proposal for the new API types, and that you initially target
the experimental API (pkg/expapi).
the experimental API (pkg/apis/experimental).
The Kubernetes API has two major components - the internal structures and
the versioned APIs. The versioned APIs are intended to be stable, while the
@ -399,10 +399,10 @@ The conversion code resides with each versioned API. There are two files:
functions
- `pkg/api/<version>/conversion_generated.go` containing auto-generated
conversion functions
- `pkg/expapi/<version>/conversion.go` containing manually written conversion
functions
- `pkg/expapi/<version>/conversion_generated.go` containing auto-generated
- `pkg/apis/experimental/<version>/conversion.go` containing manually written
conversion functions
- `pkg/apis/experimental/<version>/conversion_generated.go` containing
auto-generated conversion functions
Since auto-generated conversion functions are using manually written ones,
those manually written should be named with a defined convention, i.e. a function
@ -437,7 +437,7 @@ of your versioned api objects.
The deep copy code resides with each versioned API:
- `pkg/api/<version>/deep_copy_generated.go` containing auto-generated copy functions
- `pkg/expapi/<version>/deep_copy_generated.go` containing auto-generated copy functions
- `pkg/apis/experimental/<version>/deep_copy_generated.go` containing auto-generated copy functions
To regenerate them:
- run
@ -446,6 +446,23 @@ To regenerate them:
hack/update-generated-deep-copies.sh
```
## Making a new API Group
This section is under construction, as we make the tooling completely generic.
At the moment, you'll have to make a new directory under pkg/apis/; copy the
directory structure from pkg/apis/experimental. Add the new group/version to all
of the hack/{verify,update}-generated-{deep-copy,conversions,swagger}.sh files
in the appropriate places--it should just require adding your new group/version
to a bash array. You will also need to make sure your new types are imported by
the generation commands (cmd/gendeepcopy/ & cmd/genconversion). These
instructions may not be complete and will be updated as we gain experience.
Adding API groups outside of the pkg/apis/ directory is not currently supported,
but is clearly desirable. The deep copy & conversion generators need to work by
parsing go files instead of by reflection; then they will be easy to point at
arbitrary directories: see issue [#13775](http://issue.k8s.io/13775).
## Update the fuzzer
Part of our testing regimen for APIs is to "fuzz" (fill with random values) API

View File

@ -108,7 +108,7 @@ Once the playbook as finished, it will print out the IP of the Kubernetes master
SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster:
$ ssh -i ~/.ssh/id_rsa_k8s core@<maste IP>
$ ssh -i ~/.ssh/id_rsa_k8s core@<master IP>
$ fleetctl list-machines
MACHINE IP METADATA
a017c422... <node #1 IP> role=node

View File

@ -42,7 +42,7 @@ Running Kubernetes locally via Docker
- [Step Three: Run the service proxy](#step-three-run-the-service-proxy)
- [Test it out](#test-it-out)
- [Run an application](#run-an-application)
- [Expose it as a service:](#expose-it-as-a-service)
- [Expose it as a service](#expose-it-as-a-service)
- [A note on turning down your cluster](#a-note-on-turning-down-your-cluster)
### Overview
@ -128,7 +128,7 @@ On OS/X you will need to set up port forwarding via ssh:
boot2docker ssh -L8080:localhost:8080
```
List the nodes in your cluster by running::
List the nodes in your cluster by running:
```sh
kubectl get nodes
@ -149,7 +149,7 @@ If you are running different Kubernetes clusters, you may need to specify `-s ht
kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
```
now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
Now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
### Expose it as a service
@ -164,7 +164,7 @@ NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR
nginx 10.0.93.211 <none> 80/TCP run=nginx 1h
```
If `CLUSTER_IP` is blank run the following command to obtain it. Know issue #10836
If `CLUSTER_IP` is blank run the following command to obtain it. Know issue [#10836](https://github.com/kubernetes/kubernetes/issues/10836)
```sh
kubectl get svc nginx

View File

@ -123,7 +123,7 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_API_ARGS=""
```
* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused"
* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused". Note that Fedora 22 uses etcd 2.0, One of the changes in etcd 2.0 is that now uses port 2379 and 2380 (as opposed to etcd 0.46 which userd 4001 and 7001).
```sh
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001"

View File

@ -132,6 +132,24 @@ However the gcloud bundled kubectl version may be older than the one downloaded
get.k8s.io install script. We recommend you use the downloaded binary to avoid
potential issues with client/server version skew.
#### Enabling bash completion of the Kubernetes command line tools
You may find it useful to enable `kubectl` bash completion:
```
$ source ./contrib/completions/bash/kubectl
```
**Note**: This will last for the duration of your bash session. If you want to make this permanent you need to add this line in your bash profile.
Alternatively, on most linux distributions you can also move the completions file to your bash_completions.d like this:
```
$ cp ./contrib/completions/bash/kubectl /etc/bash_completion.d/
```
but then you have to update it when you update kubectl.
### Getting started with your cluster
#### Inspect your cluster

View File

@ -38,36 +38,31 @@ We still have [a bunch of work](http://issue.k8s.io/8262) to do to make the expe
### **Prerequisite**
- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on your machine and should be enabled. The minimum version required at this moment (2015/05/28) is [215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html).
- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on the machine and should be enabled. The minimum version required at this moment (2015/09/01) is 219
*(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)*
- Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt).
The minimum version required for now is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6).
- Make sure the `rkt metadata service` is running because it is necessary for running pod in private network mode.
More details about the networking of rkt can be found in the [documentation](https://github.com/coreos/rkt/blob/master/Documentation/networking.md).
To start the `rkt metadata service`, you can simply run:
```console
$ sudo rkt metadata-service
```
If you want the service to be running as a systemd service, then:
```console
$ sudo systemd-run rkt metadata-service
```
Alternatively, you can use the [rkt-metadata.service](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.service) and [rkt-metadata.socket](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.socket) to start the service.
The minimum version required for now is [v0.8.0](https://github.com/coreos/rkt/releases/tag/v0.8.0).
- Note that for rkt version later than v0.7.0, `metadata service` is not required for running pods in private networks. So now rkt pods will not register the metadata service be default.
### Local cluster
To use rkt as the container runtime, you just need to set the environment variable `CONTAINER_RUNTIME`:
To use rkt as the container runtime, we need to supply `--container-runtime=rkt` and `--rkt-path=$PATH_TO_RKT_BINARY` to kubelet. Additionally we can provide `--rkt-stage1-image` flag
as well to select which [stage1 image](https://github.com/coreos/rkt/blob/master/Documentation/running-lkvm-stage1.md) we want to use.
If you are using the [hack/local-up-cluster.sh](../../../hack/local-up-cluster.sh) script to launch the local cluster, then you can edit the environment variable `CONTAINER_RUNTIME`, `RKT_PATH` and `RKT_STAGE1_IMAGE` to
set these flags:
```console
$ export CONTAINER_RUNTIME=rkt
$ export RKT_PATH=$PATH_TO_RKT_BINARY
$ export RKT_STAGE1_IMAGE=PATH=$PATH_TO_STAGE1_IMAGE
```
Then we can launch the local cluster using the script:
```console
$ hack/local-up-cluster.sh
```
@ -85,7 +80,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt
You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`:
```console
$ export KUBE_RKT_VERSION=0.5.6
$ export KUBE_RKT_VERSION=0.8.0
```
Then you can launch the cluster by:
@ -109,7 +104,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt
You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`:
```console
$ export KUBE_RKT_VERSION=0.5.6
$ export KUBE_RKT_VERSION=0.8.0
```
You can optionally choose the CoreOS channel by setting `COREOS_CHANNEL`:
@ -134,6 +129,46 @@ See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try ou
For more complete applications, please look in the [examples directory](../../../examples/).
### Debugging
Here are severals tips for you when you run into any issues.
##### Check logs
By default, the log verbose level is 2. In order to see more logs related to rkt, we can set the verbose level to 4.
For local cluster, we can set the environment variable: `LOG_LEVEL=4`.
If the cluster is using salt, we can edit the [logging.sls](../../../cluster/saltbase/pillar/logging.sls) in the saltbase.
##### Check rkt pod status
To check the pods' status, we can use rkt command, such as `rkt list`, `rkt status`, `rkt image list`, etc.
More information about rkt command line can be found [here](https://github.com/coreos/rkt/blob/master/Documentation/commands.md)
##### Check journal logs
As we use systemd to launch rkt pods(by creating service files which will run `rkt run-prepared`, we can check the pods' log
using `journalctl`:
- Check the running state of the systemd service:
```console
$ sudo journalctl -u $SERVICE_FILE
```
where `$SERVICE_FILE` is the name of the service file created for the pod, you can find it in the kubelet logs.
##### Check the log of the container in the pod:
```console
$ sudo journalctl -M rkt-$UUID -u $CONTAINER_NAME
```
where `$UUID` is the rkt pod's UUID, which you can find via `rkt list --full`, and `$CONTAINER_NAME` is the container's name.
##### Check Kubernetes events, logs.
Besides above tricks, Kubernetes also provides us handy tools for debugging the pods. More information can be found [here](../../../docs/user-guide/application-troubleshooting.md)
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/rkt/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -246,7 +246,8 @@ kubernetes/cluster/ubuntu/build.sh
sudo cp -f binaries/minion/* /usr/bin
# Get the iptables based kube-proxy reccomended for this demo
sudo wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy -P /usr/bin/
wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy
sudo cp kube-proxy /usr/bin/
sudo chmod +x /usr/bin/kube-proxy
```

View File

@ -28,6 +28,10 @@ JSON and YAML formats are accepted.
\fB\-o\fP, \fB\-\-output\fP=""
Output mode. Use "\-o name" for shorter output (resource/name).
.PP
\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema"
If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
.PP
\fB\-\-validate\fP=true
If true, use a schema to validate the input before sending it

View File

@ -50,6 +50,10 @@ re\-use the labels from the resource it exposes.
\fB\-l\fP, \fB\-\-labels\fP=""
Labels to apply to the service created by this call.
.PP
\fB\-\-load\-balancer\-ip\fP=""
IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud\-provider specific).
.PP
\fB\-\-name\fP=""
The name for the newly created object.

View File

@ -46,6 +46,10 @@ Please refer to the models in
\fB\-o\fP, \fB\-\-output\fP=""
Output mode. Use "\-o name" for shorter output (resource/name).
.PP
\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema"
If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
.PP
\fB\-\-timeout\fP=0
Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object

View File

@ -60,6 +60,10 @@ existing replication controller and overwrite at least one (common) label in its
\fB\-\-rollback\fP=false
If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout
.PP
\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema"
If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
.PP
\fB\-a\fP, \fB\-\-show\-all\fP=false
When printing, show all resources (default hide terminated pods.)

View File

@ -50,6 +50,10 @@ Creates a replication controller to manage the created container(s).
\fB\-l\fP, \fB\-\-labels\fP=""
Labels to apply to the pod(s).
.PP
\fB\-\-limits\fP=""
The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'
.PP
\fB\-\-no\-headers\fP=false
When using the default output, don't print headers.
@ -76,6 +80,10 @@ Creates a replication controller to manage the created container(s).
\fB\-r\fP, \fB\-\-replicas\fP=1
Number of replicas to create for this container. Default is 1.
.PP
\fB\-\-requests\fP=""
The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'
.PP
\fB\-\-restart\fP="Always"
The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and \-\-replicas must be 1. Default 'Always'

View File

@ -166,7 +166,7 @@ the same time, we can introduce an additional etcd event type:
Thus, we need to create the EtcdResync event, extend watch.Interface and
its implementations to support it and handle those events appropriately
in places like
[Reflector](../../pkg/client/unversioned/cache/reflector.go)
[Reflector](../../pkg/client/cache/reflector.go)
However, this might turn out to be unnecessary optimization if apiserver
will always keep up (which is possible in the new design). We will work

View File

@ -88,7 +88,7 @@ use the full image name (e.g. gcr.io/my_project/image:tag).
All pods in a cluster will have read access to images in this registry.
The kubelet kubelet will authenticate to GCR using the instance's
The kubelet will authenticate to GCR using the instance's
Google service account. The service account on the instance
will have a `https://www.googleapis.com/auth/devstorage.read_only`,
so it can pull from the project's GCR, but not push.

View File

@ -61,6 +61,7 @@ $ cat pod.json | kubectl create -f -
```
-f, --filename=[]: Filename, directory, or URL to file to use to create the resource
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
--validate[=true]: If true, use a schema to validate the input before sending it
```
@ -96,7 +97,7 @@ $ cat pod.json | kubectl create -f -
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152429973 +0000 UTC
###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.289761103 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_create.md?pixel)]()

View File

@ -45,7 +45,7 @@ selector for a new Service on the specified port. If no labels are specified, th
re-use the labels from the resource it exposes.
```
kubectl expose (-f FILENAME | TYPE NAME) --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type]
kubectl expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type]
```
### Examples
@ -73,6 +73,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service
--generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.
-l, --labels="": Labels to apply to the service created by this call.
--load-balancer-ip="": IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud-provider specific).
--name="": The name for the newly created object.
--no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].
@ -121,7 +122,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.159044239 +0000 UTC
###### Auto generated by spf13/cobra at 2015-09-11 03:36:48.458259032 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]()

View File

@ -74,6 +74,7 @@ kubectl replace --force -f ./pod.json
--force[=false]: Delete and re-create the specified resource
--grace-period=-1: Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative.
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
--timeout=0: Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object
--validate[=true]: If true, use a schema to validate the input before sending it
```
@ -110,7 +111,7 @@ kubectl replace --force -f ./pod.json
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153166598 +0000 UTC
###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.290279625 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_replace.md?pixel)]()

View File

@ -78,6 +78,7 @@ $ kubectl rolling-update frontend --image=image:v2
--output-version="": Output the formatted object with the given version (default api-version).
--poll-interval=3s: Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
--rollback[=false]: If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
@ -118,7 +119,7 @@ $ kubectl rolling-update frontend --image=image:v2
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154895732 +0000 UTC
###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.293748592 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]()

View File

@ -87,12 +87,14 @@ $ kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
--hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container.
--image="": The image for the container to run.
-l, --labels="": Labels to apply to the pod(s).
--limits="": The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'
--no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].
--output-version="": Output the formatted object with the given version (default api-version).
--overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.
--port=-1: The port that this container exposes.
-r, --replicas=1: Number of replicas to create for this container. Default is 1.
--requests="": The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'
--restart="Always": The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and --replicas must be 1. Default 'Always'
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.

View File

@ -79,7 +79,7 @@ Note that replication controllers may themselves have labels and would generally
Pods may be removed from a replication controller's target set by changing their labels. This technique may be used to remove pods from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed).
Similarly, deleting a replication controller does not affect the pods it created. Its `replicas` field must first be set to 0 in order to delete the pods controlled. (Note that the client tool, kubectl, provides a single operation, [stop](kubectl/kubectl_stop.md) to delete both the replication controller and the pods it controls. However, there is no such operation in the API at the moment)
Similarly, deleting a replication controller using the API does not affect the pods it created. Its `replicas` field must first be set to `0` in order to delete the pods controlled. (Note that the client tool, `kubectl`, provides a single operation, [delete](kubectl/kubectl_delete.md) to delete both the replication controller and the pods it controls. If you want to leave the pods running when deleting a replication controller, specify `--cascade=false`. However, there is no such operation in the API at the moment)
## Responsibilities of the replication controller

View File

@ -144,7 +144,7 @@ secrets/build-robot-secret
Now you can confirm that the newly built secret is populated with an API token for the "build-robot" service account.
```console
kubectl describe secrets/build-robot-secret
$ kubectl describe secrets/build-robot-secret
Name: build-robot-secret
Namespace: default
Labels: <none>

View File

@ -433,6 +433,7 @@ information about the provisioned balancer will be published in the `Service`'s
}
],
"clusterIP": "10.0.171.239",
"loadBalancerIP": "78.11.24.19",
"type": "LoadBalancer"
},
"status": {
@ -448,7 +449,11 @@ information about the provisioned balancer will be published in the `Service`'s
```
Traffic from the external load balancer will be directed at the backend `Pods`,
though exactly how that works depends on the cloud provider.
though exactly how that works depends on the cloud provider. Some cloud providers allow
the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created
with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified,
an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the
cloud provider does not support the feature, the field will be ignored.
## Shortcomings

View File

@ -194,6 +194,7 @@ func TestExampleObjectSchemas(t *testing.T) {
"../examples/glusterfs": {
"glusterfs-pod": &api.Pod{},
"glusterfs-endpoints": &api.Endpoints{},
"glusterfs-service": &api.Service{},
},
"../docs/user-guide/liveness": {
"exec-liveness": &api.Pod{},

View File

@ -75,6 +75,15 @@ NAME ENDPOINTS
glusterfs-cluster 10.240.106.152:1,10.240.79.157:1
```
We need also create a service for this endpoints, so that the endpoints will be persistented. We will add this service without a selector to tell Kubernetes we want to add its endpoints manually. You can see [glusterfs-service.json](glusterfs-service.json) for details.
Use this command to create the service:
```sh
$ kubectl create -f examples/glusterfs/glusterfs-service.json
```
### Create a POD
The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustrates a sample configuration.

View File

@ -0,0 +1,12 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs-cluster"
},
"spec": {
"ports": [
{"port": 1}
]
}
}

View File

@ -46,7 +46,7 @@ This example shows how to build a simple, multi-tier web application using Kuber
- [Step Three: Fire up the replicated slave pods](#step-three-fire-up-the-replicated-slave-pods)
- [Step Four: Create the redis slave service](#step-four-create-the-redis-slave-service)
- [Step Five: Create the frontend replicated pods](#step-five-create-the-frontend-replicated-pods)
- [Step Six: Set up the guestbook frontend service.](#step-six-set-up-the-guestbook-frontend-service)
- [Step Six: Set up the guestbook frontend service](#step-six-set-up-the-guestbook-frontend-service)
- [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific)
- [Create the Frontend Service](#create-the-frontend-service)
- [Accessing the guestbook site externally](#accessing-the-guestbook-site-externally)

View File

@ -26,26 +26,27 @@ kube::golang::setup_env
genconversion=$(kube::util::find-binary "genconversion")
function generate_version() {
local version=$1
local TMPFILE="/tmp/conversion_generated.$(date +%s).go"
local group_version=$1
local TMPFILE="/tmp/conversion_generated.$(date +%s).go"
echo "Generating for ${version}"
echo "Generating for ${group_version}"
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE"
cat >> "$TMPFILE" <<EOF
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE"
cat >> "$TMPFILE" <<EOF
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-conversions.sh
EOF
"${genconversion}" -v "${version}" -f - >> "$TMPFILE"
"${genconversion}" -v "${group_version}" -f - >> "$TMPFILE"
mv "$TMPFILE" "pkg/${version}/conversion_generated.go"
mv "$TMPFILE" "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/conversion_generated.go"
}
DEFAULT_VERSIONS="api/v1 expapi/v1"
VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS}
# TODO(lavalamp): get this list by listing the pkg/apis/ directory?
DEFAULT_GROUP_VERSIONS="api/v1 experimental/v1"
VERSIONS=${VERSIONS:-$DEFAULT_GROUP_VERSIONS}
for ver in $VERSIONS; do
# Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS.
KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}"
# Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS.
KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}"
done

View File

@ -25,42 +25,35 @@ kube::golang::setup_env
gendeepcopy=$(kube::util::find-binary "gendeepcopy")
function result_file_name() {
local version=$1
echo "pkg/${version}/deep_copy_generated.go"
}
function generate_version() {
local version=$1
local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go"
local group_version=$1
local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go"
echo "Generating for ${version}"
echo "Generating for ${group_version}"
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
cat >> $TMPFILE <<EOF
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
cat >> $TMPFILE <<EOF
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-deep-copies.sh.
EOF
"${gendeepcopy}" -v "${version}" -f - -o "${version}=" >> "$TMPFILE"
"${gendeepcopy}" -v "${group_version}" -f - -o "${group_version}=" >> "$TMPFILE"
mv "$TMPFILE" `result_file_name ${version}`
local dest="pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/deep_copy_generated.go"
rm -f "${dest}"
mv "${TMPFILE}" "${dest}"
}
function generate_deep_copies() {
local versions="$@"
# To avoid compile errors, remove the currently existing files.
for ver in ${versions}; do
rm -f `result_file_name ${ver}`
done
for ver in ${versions}; do
# Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS.
apiVersions="${ver##*/}"
KUBE_API_VERSIONS="${apiVersions}" generate_version "${ver}"
done
local group_versions="$@"
for ver in ${group_versions}; do
# Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS.
apiVersions="${ver##*/}"
KUBE_API_VERSIONS="${apiVersions}" generate_version "${ver}"
done
}
DEFAULT_VERSIONS="api/ api/v1 expapi/ expapi/v1"
DEFAULT_VERSIONS="api/ api/v1 experimental/ experimental/v1"
VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS}
generate_deep_copies "$VERSIONS"

View File

@ -40,7 +40,7 @@ find_files() {
\) -prune \
\) \
\( -wholename '*pkg/api/v*/types.go' \
-o -wholename '*pkg/expapi/v*/types.go' \
-o -wholename '*pkg/apis/*/v*/types.go' \
\)
}
@ -61,7 +61,7 @@ for file in $versioned_api_files; do
fi
done
internal_types_files="${KUBE_ROOT}/pkg/api/types.go ${KUBE_ROOT}/pkg/expapi/types.go"
internal_types_files="${KUBE_ROOT}/pkg/api/types.go ${KUBE_ROOT}/pkg/apis/experimental/types.go"
for internal_types_file in $internal_types_files; do
if grep json: "${internal_types_file}" | grep -v // | grep description: ; then
echo "Internal API types should not contain descriptions"

View File

@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
APIROOTS=${APIROOTS:-pkg/api pkg/expapi}
APIROOTS=${APIROOTS:-pkg/api pkg/apis/experimental}
_tmp="${KUBE_ROOT}/_tmp"
cleanup() {

View File

@ -25,7 +25,7 @@ kube::golang::setup_env
gendeepcopy=$(kube::util::find-binary "gendeepcopy")
APIROOTS=${APIROOTS:-pkg/api pkg/expapi}
APIROOTS=${APIROOTS:-pkg/api pkg/apis/experimental}
_tmp="${KUBE_ROOT}/_tmp"
cleanup() {

View File

@ -70,6 +70,10 @@ if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then
: ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-central1-f"}
: ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel
elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then
KUBERNETES_PROVIDER="gke"
: ${E2E_ZONE:="us-central1-f"}
fi
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
@ -84,8 +88,8 @@ if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
fi
fi
# Specialized tests which should be skipped by default for projects.
GCE_DEFAULT_SKIP_TESTS=(
# Specialized to skip when running reboot tests.
REBOOT_SKIP_TESTS=(
"Autoscaling\sSuite"
"Skipped"
"Reboot"
@ -93,6 +97,20 @@ GCE_DEFAULT_SKIP_TESTS=(
"Example"
)
# Specialized tests which should be skipped by default for projects.
GCE_DEFAULT_SKIP_TESTS=(
"${REBOOT_SKIP_TESTS[@]}"
"Reboot")
# Tests which cannot be run on GKE, e.g. because they require
# master ssh access.
GKE_REQUIRED_SKIP_TESTS=(
"Nodes"
"Etcd\sFailure"
"MasterCerts"
"Shell"
)
# The following tests are known to be flaky, and are thus run only in their own
# -flaky- build variants.
GCE_FLAKY_TESTS=(
@ -131,6 +149,7 @@ GCE_PARALLEL_SKIP_TESTS=(
GCE_PARALLEL_FLAKY_TESTS=(
"DaemonRestart"
"Elasticsearch"
"Namespaces.*should\sdelete\sfast"
"PD"
"ServiceAccounts"
"Services.*change\sthe\stype"
@ -345,6 +364,81 @@ case ${JOB_NAME} in
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="k8s-jkns-e2e-gce-release"}
;;
kubernetes-e2e-gke-prod)
: ${DOGFOOD_GCLOUD:="true"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-prod"}
: ${E2E_NETWORK:="e2e-gke-prod"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${JENKINS_USE_SERVER_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-prod"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-staging)
: ${DOGFOOD_GCLOUD:="true"}
: ${GKE_API_ENDPOINT:="https://staging-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-staging"}
: ${E2E_NETWORK:="e2e-gke-staging"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${JENKINS_USE_SERVER_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-staging"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-test)
: ${DOGFOOD_GCLOUD:="true"}
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/rc"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-test"}
: ${E2E_NETWORK:="e2e-gke-test"}
: ${JENKINS_USE_RELEASE_TARS:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-ci)
: ${DOGFOOD_GCLOUD:="true"}
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/staging"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci"}
: ${E2E_NETWORK:="e2e-gke-ci"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-ci-reboot)
: ${DOGFOOD_GCLOUD:="true"}
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/staging"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci-reboot"}
: ${E2E_NETWORK:="e2e-gke-ci"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
)"}
;;
esac
# AWS variables
@ -362,6 +456,13 @@ export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-}
export CLUSTER_NAME=${E2E_CLUSTER_NAME}
export ZONE=${E2E_ZONE}
export KUBE_GKE_NETWORK=${E2E_NETWORK}
export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-}
export DOGFOOD_GCLOUD=${DOGFOOD_GCLOUD:-}
export CMD_GROUP=${CMD_GROUP:-}
if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then
export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT}
fi
# Shared cluster variables
export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-}
@ -371,6 +472,7 @@ export MINION_SIZE=${MINION_SIZE:-}
export NUM_MINIONS=${NUM_MINIONS:-}
export PROJECT=${PROJECT:-}
export KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER}
export PATH=${PATH}:/usr/local/go/bin
export KUBE_SKIP_CONFIRMATIONS=y
@ -407,10 +509,13 @@ if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; the
# gcloud bug can cause racing component updates to stomp on each
# other.
export KUBE_SKIP_UPDATE=y
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update preview -q" || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update alpha -q" || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update beta -q" || true
{
sudo flock -x -n 9
gcloud components update -q || true
gcloud components update preview -q || true
gcloud components update alpha -q || true
gcloud components update beta -q || true
} 9>/var/run/lock/gcloud-components.lock
if [[ ! -z ${JENKINS_EXPLICIT_VERSION:-} ]]; then
# Use an explicit pinned version like "ci/v0.10.0-101-g6c814c4" or
@ -496,6 +601,21 @@ ARTIFACTS=${WORKSPACE}/_artifacts
mkdir -p ${ARTIFACTS}
export E2E_REPORT_DIR=${ARTIFACTS}
### Pre Set Up ###
# Install gcloud from a custom path if provided. Used to test GKE with gcloud
# at HEAD, release candidate.
if [[ ! -z "${CLOUDSDK_BUCKET:-}" ]]; then
sudo gsutil -m cp -r "${CLOUDSDK_BUCKET}" ~
mv ~/$(basename "${CLOUDSDK_BUCKET}") ~/repo
mkdir ~/cloudsdk
tar zvxf ~/repo/google-cloud-sdk.tar.gz -C ~/cloudsdk
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
export CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL=file://${HOME}/repo/components-2.json
~/cloudsdk/google-cloud-sdk/install.sh --disable-installation-options --bash-completion=false --path-update=false --usage-reporting=false
export PATH=${HOME}/cloudsdk/google-cloud-sdk/bin:${PATH}
export CLOUDSDK_CONFIG=/var/lib/jenkins/.config/gcloud
fi
### Set up ###
if [[ "${E2E_UP,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --down

View File

@ -220,4 +220,35 @@ kube::util::analytics-link() {
echo "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/${path}?pixel)]()"
}
# Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.:
# * default behavior: experimental/v1 -> apis/experimental/v1
# * legacy behavior: api/v1 -> api/v1
# * Special handling for only a group: experimental -> apis/experimental
# * Special handling for only "api" group: api -> api
# * Very special handling for "v1": v1 -> api/v1
kube::util::group-version-to-pkg-path() {
local group_version="$1"
# Special cases first.
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api.
case "${group_version}" in
v1)
echo "api/v1"
;;
api)
echo "api/v1"
;;
api/*)
echo "${group_version}"
;;
api/*)
echo "${group_version}"
;;
*)
echo "apis/${group_version}"
;;
esac
}
# ex: ts=2 sw=2 et filetype=sh

View File

@ -86,6 +86,8 @@ API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-"/127.0.0.1(:[0-9]+)?$,/loc
KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
RKT_PATH=${RKT_PATH:-""}
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
function test_apiserver_off {
@ -251,6 +253,8 @@ function start_kubelet {
--v=${LOG_LEVEL} \
--chaos-chance="${CHAOS_CHANCE}" \
--container-runtime="${CONTAINER_RUNTIME}" \
--rkt-path="${RKT_PATH}" \
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
--hostname-override="127.0.0.1" \
--address="127.0.0.1" \
--api-servers="${API_HOST}:${API_PORT}" \

View File

@ -711,19 +711,19 @@ __EOF__
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" ''
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0001:'
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0002:'
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0003:'
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" ''
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
############################
# Persistent Volume Claims #
@ -731,21 +731,21 @@ __EOF__
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" ''
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-1:'
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-2:'
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-3:'
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" ''
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''

View File

@ -24,14 +24,14 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
function generate_version() {
local groupVersion=$1
local TMPFILE="/tmp/types_swagger_doc_generated.$(date +%s).go"
local group_version=$1
local TMPFILE="/tmp/types_swagger_doc_generated.$(date +%s).go"
echo "Generating swagger type docs for ${groupVersion}"
echo "Generating swagger type docs for ${group_version}"
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
echo "package ${groupVersion##*/}" >> $TMPFILE
cat >> $TMPFILE <<EOF
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
echo "package ${group_version##*/}" >> $TMPFILE
cat >> $TMPFILE <<EOF
// This file contains a collection of methods that can be used from go-resful to
// generate Swagger API documentation for its models. Please read this PR for more
@ -46,21 +46,23 @@ function generate_version() {
// AUTO-GENERATED FUNCTIONS START HERE
EOF
GOPATH=$(godep path):$GOPATH go run cmd/genswaggertypedocs/swagger_type_docs.go -s "pkg/${groupVersion}/types.go" -f - >> $TMPFILE
GOPATH=$(godep path):$GOPATH go run cmd/genswaggertypedocs/swagger_type_docs.go -s \
"pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types.go" -f - \
>> $TMPFILE
echo "// AUTO-GENERATED FUNCTIONS END HERE" >> $TMPFILE
echo "// AUTO-GENERATED FUNCTIONS END HERE" >> $TMPFILE
gofmt -w -s $TMPFILE
mv $TMPFILE "pkg/${groupVersion}/types_swagger_doc_generated.go"
gofmt -w -s $TMPFILE
mv $TMPFILE "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go"
}
GROUP_VERSIONS="api/v1 expapi/v1"
GROUP_VERSIONS="api/v1 experimental/v1"
# To avoid compile errors, remove the currently existing files.
for groupVersion in $GROUP_VERSIONS; do
rm -f "pkg/${groupVersion}/types_swagger_doc_generated.go"
for group_version in $GROUP_VERSIONS; do
rm -f "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go"
done
for groupVersion in $GROUP_VERSIONS; do
generate_version "${groupVersion}"
for group_version in $GROUP_VERSIONS; do
generate_version "${group_version}"
done
"${KUBE_ROOT}/hack/update-swagger-spec.sh"

48
hack/update-gofmt.sh Executable file
View File

@ -0,0 +1,48 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GoFmt apparently is changing @ head...
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5') ]]; then
echo "Unknown go version '${GO_VERSION}', skipping gofmt."
exit 0
fi
cd "${KUBE_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s -w"
find_files | xargs $GOFMT

View File

@ -1,4 +1,3 @@
cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `--cadvisor-port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster).
cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name
@ -39,7 +38,7 @@ cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control obje
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + enable_horizontal_pod_autoscaler + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%}

View File

@ -1,274 +1,277 @@
accept-hosts
accept-paths
account-for-pod-resources
admission-control
admission-control-config-file
advertise-address
advertised-address
algorithm-provider
all-namespaces
allocate-node-cidrs
allow-privileged
api-burst
api-prefix
api-rate
api-servers
api-token
api-version
authorization-mode
authorization-policy-file
auth-path
basic-auth-file
bench-pods
bench-quiet
bench-tasks
bench-workers
bind-address
bind-pods-burst
bind-pods-qps
cadvisor-port
cert-dir
certificate-authority
cgroup-root
chaos-chance
cleanup-iptables
client-ca-file
client-certificate
client-key
cloud-config
cloud-provider
cluster-cidr
cluster-dns
cluster-domain
cluster-name
cluster-tag
concurrent-endpoint-syncs
configure-cbr0
contain-pod-resources
container-port
container-runtime
cors-allowed-origins
create-external-load-balancer
current-release-pr
current-replicas
default-container-cpu-limit
default-container-mem-limit
delay-shutdown
deleting-pods-burst
deleting-pods-qps
deployment-label-key
dest-file
disable-filter
docker-endpoint
docker-exec-handler
dockercfg-path
driver-port
dry-run
duration-sec
e2e-output-dir
enable-debugging-handlers
enable-horizontal-pod-autoscaler
enable-server
etcd-config
etcd-prefix
etcd-server
etcd-servers
event-burst
event-qps
event-ttl
executor-bindall
executor-logv
executor-path
executor-suicide-timeout
experimental-keystone-url
experimental-prefix
external-hostname
external-ip
failover-timeout
file-check-frequency
file-suffix
forward-services
framework-name
framework-weburi
func-dest
fuzz-iters
gce-project
gce-zone
gke-cluster
google-json-key
grace-period
ha-domain
healthz-bind-address
healthz-port
horizontal-pod-autoscaler-sync-period
hostname-override
host-network-sources
http-check-frequency
http-port
ignore-not-found
image-gc-high-threshold
image-gc-low-threshold
insecure-bind-address
insecure-port
insecure-skip-tls-verify
iptables-sync-period
ir-data-source
ir-dbname
ir-influxdb-host
ir-password
ir-user
jenkins-host
jenkins-jobs
km-path
kubectl-path
kubelet-cadvisor-port
kubelet-certificate-authority
kubelet-client-certificate
kubelet-client-key
kubelet-docker-endpoint
kubelet-host-network-sources
kubelet-https
kubelet-network-plugin
kubelet-pod-infra-container-image
kubelet-port
kubelet-root-dir
kubelet-sync-frequency
kubelet-timeout
kube-master
label-columns
last-release-pr
legacy-userspace-proxy
log-flush-frequency
long-running-request-regexp
low-diskspace-threshold-mb
manifest-url
manifest-url-header
masquerade-all
master-service-namespace
max-concurrency
max-connection-bytes-per-sec
maximum-dead-containers
maximum-dead-containers-per-container
max-log-age
max-log-backups
max-log-size
max-outgoing-burst
max-outgoing-qps
max-pods
max-requests-inflight
mesos-authentication-principal
mesos-authentication-provider
mesos-authentication-secret-file
mesos-cgroup-prefix
mesos-executor-cpus
mesos-executor-mem
mesos-master
mesos-role
mesos-user
minimum-container-ttl-duration
minion-max-log-age
minion-max-log-backups
minion-max-log-size
minion-path-override
min-pr-number
min-request-timeout
namespace-sync-period
network-plugin
network-plugin-dir
node-instance-group
node-monitor-grace-period
node-monitor-period
node-startup-grace-period
node-status-update-frequency
node-sync-period
no-headers
num-nodes
oidc-ca-file
oidc-client-id
oidc-issuer-url
oidc-username-claim
oom-score-adj
output-version
out-version
path-override
pod-cidr
pod-eviction-timeout
pod-infra-container-image
pod-running
policy-config-file
poll-interval
portal-net
private-mountns
prom-push-gateway
proxy-bindall
proxy-logv
proxy-port-range
public-address-override
pvclaimbinder-sync-period
read-only-port
really-crash-for-testing
reconcile-cooldown
reconcile-interval
register-node
register-retry-count
registry-burst
registry-qps
reject-methods
reject-paths
repo-root
report-dir
required-contexts
resolv-conf
resource-container
resource-quota-sync-period
resource-version
rkt-path
root-ca-file
root-dir
run-proxy
runtime-config
scheduler-config
secure-port
service-account-key-file
service-account-lookup
service-account-private-key-file
service-address
service-cluster-ip-range
service-node-port-range
service-node-ports
service-sync-period
session-affinity
show-all
shutdown-fd
shutdown-fifo
skip-munges
sort-by
source-file
ssh-keyfile
ssh-user
static-pods-config
stats-port
storage-version
streaming-connection-idle-timeout
suicide-timeout
sync-frequency
system-container
target-port
tcp-services
tls-cert-file
tls-private-key-file
token-auth-file
ttl-secs
type-src
unix-socket
update-period
upgrade-target
use-kubernetes-cluster-service
user-whitelist
watch-cache
watch-only
whitelist-override-label
www-prefix
retry_time
file_content_in_loop
cpu-cfs-quota
accept-hosts
accept-paths
account-for-pod-resources
admission-control
admission-control-config-file
advertise-address
advertised-address
algorithm-provider
all-namespaces
allocate-node-cidrs
allow-privileged
api-burst
api-prefix
api-rate
api-servers
api-token
api-version
authorization-mode
authorization-policy-file
auth-path
basic-auth-file
bench-pods
bench-quiet
bench-tasks
bench-workers
bind-address
bind-pods-burst
bind-pods-qps
cadvisor-port
cert-dir
certificate-authority
cgroup-root
chaos-chance
cleanup-iptables
client-ca-file
client-certificate
client-key
cloud-config
cloud-provider
cluster-cidr
cluster-dns
cluster-domain
cluster-name
cluster-tag
concurrent-endpoint-syncs
configure-cbr0
contain-pod-resources
container-port
container-runtime
cors-allowed-origins
create-external-load-balancer
current-release-pr
current-replicas
default-container-cpu-limit
default-container-mem-limit
delay-shutdown
deleting-pods-burst
deleting-pods-qps
deployment-label-key
dest-file
disable-filter
docker-endpoint
docker-exec-handler
dockercfg-path
driver-port
dry-run
duration-sec
e2e-output-dir
enable-debugging-handlers
enable-horizontal-pod-autoscaler
enable-server
etcd-config
etcd-prefix
etcd-server
etcd-servers
event-burst
event-qps
event-ttl
executor-bindall
executor-logv
executor-path
executor-suicide-timeout
experimental-keystone-url
experimental-prefix
external-hostname
external-ip
failover-timeout
file-check-frequency
file-suffix
forward-services
framework-name
framework-weburi
func-dest
fuzz-iters
gce-project
gce-zone
gke-cluster
google-json-key
grace-period
ha-domain
healthz-bind-address
healthz-port
horizontal-pod-autoscaler-sync-period
hostname-override
host-network-sources
http-check-frequency
http-port
ignore-not-found
image-gc-high-threshold
image-gc-low-threshold
insecure-bind-address
insecure-port
insecure-skip-tls-verify
iptables-sync-period
ir-data-source
ir-dbname
ir-influxdb-host
ir-password
ir-user
jenkins-host
jenkins-jobs
km-path
kubectl-path
kubelet-cadvisor-port
kubelet-certificate-authority
kubelet-client-certificate
kubelet-client-key
kubelet-docker-endpoint
kubelet-host-network-sources
kubelet-https
kubelet-network-plugin
kubelet-pod-infra-container-image
kubelet-port
kubelet-root-dir
kubelet-sync-frequency
kubelet-timeout
kube-master
label-columns
last-release-pr
legacy-userspace-proxy
load-balancer-ip
log-flush-frequency
long-running-request-regexp
low-diskspace-threshold-mb
manifest-url
manifest-url-header
masquerade-all
master-service-namespace
max-concurrency
max-connection-bytes-per-sec
maximum-dead-containers
maximum-dead-containers-per-container
max-log-age
max-log-backups
max-log-size
max-outgoing-burst
max-outgoing-qps
max-pods
max-requests-inflight
mesos-authentication-principal
mesos-authentication-provider
mesos-authentication-secret-file
mesos-cgroup-prefix
mesos-executor-cpus
mesos-executor-mem
mesos-master
mesos-role
mesos-user
minimum-container-ttl-duration
minion-max-log-age
minion-max-log-backups
minion-max-log-size
minion-path-override
min-pr-number
min-request-timeout
namespace-sync-period
network-plugin
network-plugin-dir
node-instance-group
node-monitor-grace-period
node-monitor-period
node-startup-grace-period
node-status-update-frequency
node-sync-period
no-headers
num-nodes
oidc-ca-file
oidc-client-id
oidc-issuer-url
oidc-username-claim
oom-score-adj
output-version
out-version
path-override
pod-cidr
pod-eviction-timeout
pod-infra-container-image
pod-running
policy-config-file
poll-interval
portal-net
private-mountns
prom-push-gateway
proxy-bindall
proxy-logv
proxy-port-range
public-address-override
pvclaimbinder-sync-period
read-only-port
really-crash-for-testing
reconcile-cooldown
reconcile-interval
register-node
register-retry-count
registry-burst
registry-qps
reject-methods
reject-paths
repo-root
report-dir
required-contexts
resolv-conf
resource-container
resource-quota-sync-period
resource-version
rkt-path
rkt-stage1-image
root-ca-file
root-dir
run-proxy
runtime-config
scheduler-config
schema-cache-dir
secure-port
service-account-key-file
service-account-lookup
service-account-private-key-file
service-address
service-cluster-ip-range
service-node-port-range
service-node-ports
service-sync-period
session-affinity
show-all
shutdown-fd
shutdown-fifo
skip-munges
sort-by
source-file
ssh-keyfile
ssh-user
static-pods-config
stats-port
storage-version
streaming-connection-idle-timeout
suicide-timeout
sync-frequency
system-container
target-port
tcp-services
tls-cert-file
tls-private-key-file
token-auth-file
ttl-secs
type-src
unix-socket
update-period
upgrade-target
use-kubernetes-cluster-service
user-whitelist
watch-cache
watch-only
whitelist-override-label
www-prefix
retry_time
file_content_in_loop
cpu-cfs-quota

View File

@ -302,6 +302,7 @@ func deepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con
func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
out.Reason = in.Reason
out.Message = in.Message
return nil
}
@ -1958,6 +1959,7 @@ func deepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cl
}
func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error {
out.Type = in.Type
if in.Ports != nil {
out.Ports = make([]ServicePort, len(in.Ports))
for i := range in.Ports {
@ -1977,7 +1979,6 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl
out.Selector = nil
}
out.ClusterIP = in.ClusterIP
out.Type = in.Type
if in.ExternalIPs != nil {
out.ExternalIPs = make([]string, len(in.ExternalIPs))
for i := range in.ExternalIPs {
@ -1986,6 +1987,7 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl
} else {
out.ExternalIPs = nil
}
out.LoadBalancerIP = in.LoadBalancerIP
out.SessionAffinity = in.SessionAffinity
return nil
}

View File

@ -33,8 +33,8 @@ import (
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets"
_ "k8s.io/kubernetes/pkg/expapi"
_ "k8s.io/kubernetes/pkg/expapi/v1"
_ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/apis/experimental/v1"
flag "github.com/spf13/pflag"
)

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/meta"
apiutil "k8s.io/kubernetes/pkg/api/util"
explatest "k8s.io/kubernetes/pkg/expapi/latest"
explatest "k8s.io/kubernetes/pkg/apis/experimental/latest"
"k8s.io/kubernetes/pkg/runtime"
)

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/registered"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/expapi"
"k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@ -121,15 +121,15 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
c.FuzzNoCustom(j) // fuzz self without calling this function again
//j.TemplateRef = nil // this is required for round trip
},
func(j *expapi.DeploymentStrategy, c fuzz.Continue) {
func(j *experimental.DeploymentStrategy, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values.
strategyTypes := []expapi.DeploymentType{expapi.DeploymentRecreate, expapi.DeploymentRollingUpdate}
strategyTypes := []experimental.DeploymentType{experimental.DeploymentRecreate, experimental.DeploymentRollingUpdate}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != expapi.DeploymentRollingUpdate {
if j.Type != experimental.DeploymentRollingUpdate {
j.RollingUpdate = nil
} else {
rollingUpdate := expapi.RollingUpdateDeployment{}
rollingUpdate := experimental.RollingUpdateDeployment{}
if c.RandBool() {
rollingUpdate.MaxUnavailable = util.NewIntOrStringFromInt(int(c.RandUint64()))
rollingUpdate.MaxSurge = util.NewIntOrStringFromInt(int(c.RandUint64()))
@ -351,7 +351,7 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
c.FuzzNoCustom(n)
n.Spec.ExternalID = "external"
},
func(s *expapi.APIVersion, c fuzz.Continue) {
func(s *experimental.APIVersion, c fuzz.Continue) {
// We can't use c.RandString() here because it may generate empty
// string, which will cause tests failure.
s.APIGroup = "something"

View File

@ -835,8 +835,10 @@ const (
)
type ContainerStateWaiting struct {
// Reason could be pulling image,
// A brief CamelCase string indicating details about why the container is in waiting state.
Reason string `json:"reason,omitempty"`
// A human-readable message indicating details about why the container is in waiting state.
Message string `json:"message,omitempty"`
}
type ContainerStateRunning struct {
@ -1185,6 +1187,9 @@ type LoadBalancerIngress struct {
// ServiceSpec describes the attributes that a user creates on a service
type ServiceSpec struct {
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`
// Required: The list of ports that are exposed by this service.
Ports []ServicePort `json:"ports"`
@ -1200,13 +1205,17 @@ type ServiceSpec struct {
// None can be specified for headless services when proxying is not required
ClusterIP string `json:"clusterIP,omitempty"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`
// ExternalIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
ExternalIPs []string `json:"externalIPs,omitempty"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
// Required: Supports "ClientIP" and "None". Used to maintain session affinity.
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
}

View File

@ -340,6 +340,7 @@ func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta
defaulting.(func(*api.ContainerStateWaiting))(in)
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
@ -2172,6 +2173,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ServiceSpec))(in)
}
out.Type = ServiceType(in.Type)
if in.Ports != nil {
out.Ports = make([]ServicePort, len(in.Ports))
for i := range in.Ports {
@ -2191,7 +2193,6 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
out.Selector = nil
}
out.ClusterIP = in.ClusterIP
out.Type = ServiceType(in.Type)
if in.ExternalIPs != nil {
out.ExternalIPs = make([]string, len(in.ExternalIPs))
for i := range in.ExternalIPs {
@ -2200,6 +2201,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
} else {
out.ExternalIPs = nil
}
out.LoadBalancerIP = in.LoadBalancerIP
out.SessionAffinity = ServiceAffinity(in.SessionAffinity)
return nil
}
@ -2742,6 +2744,7 @@ func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *Container
defaulting.(func(*ContainerStateWaiting))(in)
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
@ -4603,6 +4606,7 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service
out.ExternalIPs = nil
}
out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity)
out.LoadBalancerIP = in.LoadBalancerIP
return nil
}

View File

@ -317,6 +317,7 @@ func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *Cont
func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
out.Reason = in.Reason
out.Message = in.Message
return nil
}
@ -1992,6 +1993,7 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo
out.ExternalIPs = nil
}
out.SessionAffinity = in.SessionAffinity
out.LoadBalancerIP = in.LoadBalancerIP
return nil
}

View File

@ -1036,8 +1036,10 @@ const (
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running, such as pulling its image.
// (brief) reason the container is not yet running.
Reason string `json:"reason,omitempty"`
// Message regarding why the container is not yet running.
Message string `json:"message,omitempty"`
}
// ContainerStateRunning is a running state of a container.
@ -1509,6 +1511,13 @@ type ServiceSpec struct {
// Defaults to None.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
}
// ServicePort conatins information on service's port.

View File

@ -199,8 +199,9 @@ func (ContainerStateTerminated) SwaggerDoc() map[string]string {
}
var map_ContainerStateWaiting = map[string]string{
"": "ContainerStateWaiting is a waiting state of a container.",
"reason": "(brief) reason the container is not yet running, such as pulling its image.",
"": "ContainerStateWaiting is a waiting state of a container.",
"reason": "(brief) reason the container is not yet running.",
"message": "Message regarding why the container is not yet running.",
}
func (ContainerStateWaiting) SwaggerDoc() map[string]string {
@ -1272,6 +1273,7 @@ var map_ServiceSpec = map[string]string{
"type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services",
"externalIPs": "ExternalIPs are used by external load balancers, or can be set by users to handle external traffic that arrives at a node. Externally visible IPs (e.g. load balancers) that should be proxied to this service.",
"sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies",
"loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
}
func (ServiceSpec) SwaggerDoc() map[string]string {

View File

@ -1458,16 +1458,27 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList {
keys.Insert(string(k))
min[string(k)] = q
}
for k, q := range limit.Default {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...)
keys.Insert(string(k))
defaults[string(k)] = q
}
for k, q := range limit.DefaultRequest {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...)
keys.Insert(string(k))
defaultRequests[string(k)] = q
if limit.Type == api.LimitTypePod {
if len(limit.Default) > 0 {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.limits[%d].default", limit.Default, "Default is not supported when limit type is Pod"))
}
if len(limit.DefaultRequest) > 0 {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.limits[%d].defaultRequest", limit.DefaultRequest, "DefaultRequest is not supported when limit type is Pod"))
}
} else {
for k, q := range limit.Default {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...)
keys.Insert(string(k))
defaults[string(k)] = q
}
for k, q := range limit.DefaultRequest {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...)
keys.Insert(string(k))
defaultRequests[string(k)] = q
}
}
for k := range limit.MaxLimitRequestRatio {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].maxLimitRequestRatio[%s]", i, k))...)
}
@ -1479,38 +1490,26 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList {
defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k]
if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 {
minQuantity := limit.Min[api.ResourceName(k)]
maxQuantity := limit.Max[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].min[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String())))
}
if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 {
minQuantity := limit.Min[api.ResourceName(k)]
defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String())))
}
if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 {
maxQuantity := limit.Max[api.ResourceName(k)]
defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String())))
}
if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 {
defaultQuantity := limit.Default[api.ResourceName(k)]
defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String())))
}
if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 {
minQuantity := limit.Min[api.ResourceName(k)]
defaultQuantity := limit.Default[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String())))
}
if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 {
maxQuantity := limit.Max[api.ResourceName(k)]
defaultQuantity := limit.Default[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String())))
}
}

View File

@ -2911,6 +2911,12 @@ func TestValidateLimitRange(t *testing.T) {
Type: api.LimitTypePod,
Max: getResourceList("100m", "10000Mi"),
Min: getResourceList("5m", "100Mi"),
MaxLimitRequestRatio: getResourceList("10", ""),
},
{
Type: api.LimitTypeContainer,
Max: getResourceList("100m", "10000Mi"),
Min: getResourceList("5m", "100Mi"),
Default: getResourceList("50m", "500Mi"),
DefaultRequest: getResourceList("10m", "200Mi"),
MaxLimitRequestRatio: getResourceList("10", ""),
@ -2923,7 +2929,7 @@ func TestValidateLimitRange(t *testing.T) {
spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Type: api.LimitTypeContainer,
Max: getResourceList("100m", "10000T"),
Min: getResourceList("5m", "100Mi"),
Default: getResourceList("50m", "500Mi"),
@ -2978,6 +2984,32 @@ func TestValidateLimitRange(t *testing.T) {
}},
"",
},
"default-limit-type-pod": {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Max: getResourceList("100m", "10000m"),
Min: getResourceList("0m", "100m"),
Default: getResourceList("10m", "100m"),
},
},
}},
"Default is not supported when limit type is Pod",
},
"default-request-limit-type-pod": {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Max: getResourceList("100m", "10000m"),
Min: getResourceList("0m", "100m"),
DefaultRequest: getResourceList("10m", "100m"),
},
},
}},
"DefaultRequest is not supported when limit type is Pod",
},
"min value 100m is greater than max value 10m": {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
@ -2994,7 +3026,7 @@ func TestValidateLimitRange(t *testing.T) {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Type: api.LimitTypeContainer,
Max: getResourceList("1", ""),
Min: getResourceList("100m", ""),
Default: getResourceList("2000m", ""),
@ -3007,7 +3039,7 @@ func TestValidateLimitRange(t *testing.T) {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Type: api.LimitTypeContainer,
Max: getResourceList("1", ""),
Min: getResourceList("100m", ""),
DefaultRequest: getResourceList("2000m", ""),

View File

@ -16,7 +16,7 @@ limitations under the License.
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package expapi
package experimental
import (
time "time"
@ -757,29 +757,29 @@ func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c
return nil
}
func deepCopy_expapi_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
func deepCopy_experimental_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
out.Name = in.Name
out.APIGroup = in.APIGroup
return nil
}
func deepCopy_expapi_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
func deepCopy_experimental_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_expapi_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
if err := deepCopy_experimental_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_expapi_DaemonSetStatus(in.Status, &out.Status, c); err != nil {
if err := deepCopy_experimental_DaemonSetStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
func deepCopy_experimental_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -789,7 +789,7 @@ func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conv
if in.Items != nil {
out.Items = make([]DaemonSet, len(in.Items))
for i := range in.Items {
if err := deepCopy_expapi_DaemonSet(in.Items[i], &out.Items[i], c); err != nil {
if err := deepCopy_experimental_DaemonSet(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
@ -799,7 +799,7 @@ func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conv
return nil
}
func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
func deepCopy_experimental_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
@ -819,30 +819,30 @@ func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conv
return nil
}
func deepCopy_expapi_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
func deepCopy_experimental_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
return nil
}
func deepCopy_expapi_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
func deepCopy_experimental_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_expapi_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
if err := deepCopy_experimental_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_expapi_DeploymentStatus(in.Status, &out.Status, c); err != nil {
if err := deepCopy_experimental_DeploymentStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
func deepCopy_experimental_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -852,7 +852,7 @@ func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *c
if in.Items != nil {
out.Items = make([]Deployment, len(in.Items))
for i := range in.Items {
if err := deepCopy_expapi_Deployment(in.Items[i], &out.Items[i], c); err != nil {
if err := deepCopy_experimental_Deployment(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
@ -862,7 +862,7 @@ func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *c
return nil
}
func deepCopy_expapi_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
func deepCopy_experimental_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas
if in.Selector != nil {
out.Selector = make(map[string]string)
@ -880,24 +880,24 @@ func deepCopy_expapi_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *c
} else {
out.Template = nil
}
if err := deepCopy_expapi_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
if err := deepCopy_experimental_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
return err
}
out.UniqueLabelKey = in.UniqueLabelKey
return nil
}
func deepCopy_expapi_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
func deepCopy_experimental_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
return nil
}
func deepCopy_expapi_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
func deepCopy_experimental_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
out.Type = in.Type
if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment)
if err := deepCopy_expapi_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil {
if err := deepCopy_experimental_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil {
return err
}
} else {
@ -906,19 +906,19 @@ func deepCopy_expapi_DeploymentStrategy(in DeploymentStrategy, out *DeploymentSt
return nil
}
func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
func deepCopy_experimental_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_expapi_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
if err := deepCopy_experimental_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if in.Status != nil {
out.Status = new(HorizontalPodAutoscalerStatus)
if err := deepCopy_expapi_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil {
if err := deepCopy_experimental_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil {
return err
}
} else {
@ -927,7 +927,7 @@ func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *Ho
return nil
}
func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
func deepCopy_experimental_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -937,7 +937,7 @@ func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList,
if in.Items != nil {
out.Items = make([]HorizontalPodAutoscaler, len(in.Items))
for i := range in.Items {
if err := deepCopy_expapi_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil {
if err := deepCopy_experimental_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
@ -947,10 +947,10 @@ func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList,
return nil
}
func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
func deepCopy_experimental_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
if in.ScaleRef != nil {
out.ScaleRef = new(SubresourceReference)
if err := deepCopy_expapi_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil {
if err := deepCopy_experimental_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil {
return err
}
} else {
@ -958,18 +958,18 @@ func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec,
}
out.MinCount = in.MinCount
out.MaxCount = in.MaxCount
if err := deepCopy_expapi_ResourceConsumption(in.Target, &out.Target, c); err != nil {
if err := deepCopy_experimental_ResourceConsumption(in.Target, &out.Target, c); err != nil {
return err
}
return nil
}
func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
func deepCopy_experimental_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentConsumption != nil {
out.CurrentConsumption = new(ResourceConsumption)
if err := deepCopy_expapi_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil {
if err := deepCopy_experimental_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil {
return err
}
} else {
@ -986,14 +986,129 @@ func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerSta
return nil
}
func deepCopy_expapi_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
func deepCopy_experimental_Job(in Job, out *Job, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_experimental_JobSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_experimental_JobStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_experimental_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
if err := deepCopy_util_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil {
return err
}
if err := deepCopy_util_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
func deepCopy_experimental_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Job, len(in.Items))
for i := range in.Items {
if err := deepCopy_experimental_Job(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_experimental_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
if in.Parallelism != nil {
out.Parallelism = new(int)
*out.Parallelism = *in.Parallelism
} else {
out.Parallelism = nil
}
if in.Completions != nil {
out.Completions = new(int)
*out.Completions = *in.Completions
} else {
out.Completions = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
if in.Template != nil {
out.Template = new(api.PodTemplateSpec)
if err := deepCopy_api_PodTemplateSpec(*in.Template, out.Template, c); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func deepCopy_experimental_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
if in.Conditions != nil {
out.Conditions = make([]JobCondition, len(in.Conditions))
for i := range in.Conditions {
if err := deepCopy_experimental_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil {
return err
}
}
} else {
out.Conditions = nil
}
if in.StartTime != nil {
out.StartTime = new(util.Time)
if err := deepCopy_util_Time(*in.StartTime, out.StartTime, c); err != nil {
return err
}
} else {
out.StartTime = nil
}
if in.CompletionTime != nil {
out.CompletionTime = new(util.Time)
if err := deepCopy_util_Time(*in.CompletionTime, out.CompletionTime, c); err != nil {
return err
}
} else {
out.CompletionTime = nil
}
out.Active = in.Active
out.Successful = in.Successful
out.Unsuccessful = in.Unsuccessful
return nil
}
func deepCopy_experimental_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
return nil
}
func deepCopy_expapi_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error {
func deepCopy_experimental_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error {
out.Resource = in.Resource
if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil {
return err
@ -1001,7 +1116,7 @@ func deepCopy_expapi_ResourceConsumption(in ResourceConsumption, out *ResourceCo
return nil
}
func deepCopy_expapi_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
func deepCopy_experimental_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
if err := deepCopy_util_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil {
return err
}
@ -1012,28 +1127,28 @@ func deepCopy_expapi_RollingUpdateDeployment(in RollingUpdateDeployment, out *Ro
return nil
}
func deepCopy_expapi_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
func deepCopy_experimental_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_expapi_ScaleSpec(in.Spec, &out.Spec, c); err != nil {
if err := deepCopy_experimental_ScaleSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_expapi_ScaleStatus(in.Status, &out.Status, c); err != nil {
if err := deepCopy_experimental_ScaleStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_expapi_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
func deepCopy_experimental_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas
return nil
}
func deepCopy_expapi_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
func deepCopy_experimental_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas
if in.Selector != nil {
out.Selector = make(map[string]string)
@ -1046,7 +1161,7 @@ func deepCopy_expapi_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion
return nil
}
func deepCopy_expapi_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error {
func deepCopy_experimental_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error {
out.Kind = in.Kind
out.Namespace = in.Namespace
out.Name = in.Name
@ -1055,7 +1170,7 @@ func deepCopy_expapi_SubresourceReference(in SubresourceReference, out *Subresou
return nil
}
func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
func deepCopy_experimental_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -1066,7 +1181,7 @@ func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyRe
if in.Versions != nil {
out.Versions = make([]APIVersion, len(in.Versions))
for i := range in.Versions {
if err := deepCopy_expapi_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil {
if err := deepCopy_experimental_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil {
return err
}
}
@ -1076,7 +1191,7 @@ func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyRe
return nil
}
func deepCopy_expapi_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
func deepCopy_experimental_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -1094,7 +1209,7 @@ func deepCopy_expapi_ThirdPartyResourceData(in ThirdPartyResourceData, out *Thir
return nil
}
func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
func deepCopy_experimental_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -1104,7 +1219,7 @@ func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, o
if in.Items != nil {
out.Items = make([]ThirdPartyResourceData, len(in.Items))
for i := range in.Items {
if err := deepCopy_expapi_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil {
if err := deepCopy_experimental_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
@ -1114,7 +1229,7 @@ func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, o
return nil
}
func deepCopy_expapi_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
func deepCopy_experimental_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
@ -1124,7 +1239,7 @@ func deepCopy_expapi_ThirdPartyResourceList(in ThirdPartyResourceList, out *Thir
if in.Items != nil {
out.Items = make([]ThirdPartyResource, len(in.Items))
for i := range in.Items {
if err := deepCopy_expapi_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil {
if err := deepCopy_experimental_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
@ -1192,31 +1307,36 @@ func init() {
deepCopy_api_VolumeMount,
deepCopy_api_VolumeSource,
deepCopy_resource_Quantity,
deepCopy_expapi_APIVersion,
deepCopy_expapi_DaemonSet,
deepCopy_expapi_DaemonSetList,
deepCopy_expapi_DaemonSetSpec,
deepCopy_expapi_DaemonSetStatus,
deepCopy_expapi_Deployment,
deepCopy_expapi_DeploymentList,
deepCopy_expapi_DeploymentSpec,
deepCopy_expapi_DeploymentStatus,
deepCopy_expapi_DeploymentStrategy,
deepCopy_expapi_HorizontalPodAutoscaler,
deepCopy_expapi_HorizontalPodAutoscalerList,
deepCopy_expapi_HorizontalPodAutoscalerSpec,
deepCopy_expapi_HorizontalPodAutoscalerStatus,
deepCopy_expapi_ReplicationControllerDummy,
deepCopy_expapi_ResourceConsumption,
deepCopy_expapi_RollingUpdateDeployment,
deepCopy_expapi_Scale,
deepCopy_expapi_ScaleSpec,
deepCopy_expapi_ScaleStatus,
deepCopy_expapi_SubresourceReference,
deepCopy_expapi_ThirdPartyResource,
deepCopy_expapi_ThirdPartyResourceData,
deepCopy_expapi_ThirdPartyResourceDataList,
deepCopy_expapi_ThirdPartyResourceList,
deepCopy_experimental_APIVersion,
deepCopy_experimental_DaemonSet,
deepCopy_experimental_DaemonSetList,
deepCopy_experimental_DaemonSetSpec,
deepCopy_experimental_DaemonSetStatus,
deepCopy_experimental_Deployment,
deepCopy_experimental_DeploymentList,
deepCopy_experimental_DeploymentSpec,
deepCopy_experimental_DeploymentStatus,
deepCopy_experimental_DeploymentStrategy,
deepCopy_experimental_HorizontalPodAutoscaler,
deepCopy_experimental_HorizontalPodAutoscalerList,
deepCopy_experimental_HorizontalPodAutoscalerSpec,
deepCopy_experimental_HorizontalPodAutoscalerStatus,
deepCopy_experimental_Job,
deepCopy_experimental_JobCondition,
deepCopy_experimental_JobList,
deepCopy_experimental_JobSpec,
deepCopy_experimental_JobStatus,
deepCopy_experimental_ReplicationControllerDummy,
deepCopy_experimental_ResourceConsumption,
deepCopy_experimental_RollingUpdateDeployment,
deepCopy_experimental_Scale,
deepCopy_experimental_ScaleSpec,
deepCopy_experimental_ScaleStatus,
deepCopy_experimental_SubresourceReference,
deepCopy_experimental_ThirdPartyResource,
deepCopy_experimental_ThirdPartyResourceData,
deepCopy_experimental_ThirdPartyResourceDataList,
deepCopy_experimental_ThirdPartyResourceList,
deepCopy_util_IntOrString,
deepCopy_util_Time,
)

View File

@ -23,8 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/registered"
_ "k8s.io/kubernetes/pkg/expapi"
"k8s.io/kubernetes/pkg/expapi/v1"
_ "k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/apis/experimental/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
)
@ -39,7 +39,7 @@ var (
RESTMapper meta.RESTMapper
)
const importPrefix = "k8s.io/kubernetes/pkg/expapi"
const importPrefix = "k8s.io/kubernetes/pkg/apis/experimental"
func init() {
Version = registered.RegisteredVersions[0]

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package expapi
package experimental
import (
"k8s.io/kubernetes/pkg/api"
@ -32,6 +32,8 @@ func addKnownTypes() {
&DeploymentList{},
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
&Job{},
&JobList{},
&ReplicationControllerDummy{},
&Scale{},
&ThirdPartyResource{},
@ -47,6 +49,8 @@ func (*Deployment) IsAnAPIObject() {}
func (*DeploymentList) IsAnAPIObject() {}
func (*HorizontalPodAutoscaler) IsAnAPIObject() {}
func (*HorizontalPodAutoscalerList) IsAnAPIObject() {}
func (*Job) IsAnAPIObject() {}
func (*JobList) IsAnAPIObject() {}
func (*ReplicationControllerDummy) IsAnAPIObject() {}
func (*Scale) IsAnAPIObject() {}
func (*ThirdPartyResource) IsAnAPIObject() {}

View File

@ -19,7 +19,7 @@ package testapi
import (
"strings"
"k8s.io/kubernetes/pkg/expapi/latest"
"k8s.io/kubernetes/pkg/apis/experimental/latest"
)
// Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name.

View File

@ -15,7 +15,7 @@ limitations under the License.
*/
/*
This file (together with pkg/expapi/v1/types.go) contain the experimental
This file (together with pkg/apis/experimental/v1/types.go) contain the experimental
types in kubernetes. These API objects are experimental, meaning that the
APIs may be broken at any time by the kubernetes team.
@ -26,7 +26,7 @@ beyond registration differences. In other words, experimental API group
support is experimental.
*/
package expapi
package experimental
import (
"k8s.io/kubernetes/pkg/api"
@ -362,3 +362,102 @@ type ThirdPartyResourceDataList struct {
// Items is a list of third party objects
Items []ThirdPartyResourceData `json:"items"`
}
// Job represents the configuration of a single job.
type Job struct {
api.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
api.ObjectMeta `json:"metadata,omitempty"`
// Spec is a structure defining the expected behavior of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Spec JobSpec `json:"spec,omitempty"`
// Status is a structure describing current status of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Status JobStatus `json:"status,omitempty"`
}
// JobList is a collection of jobs.
type JobList struct {
api.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
api.ListMeta `json:"metadata,omitempty"`
// Items is the list of Job.
Items []Job `json:"items"`
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Parallelism specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
Parallelism *int `json:"parallelism,omitempty"`
// Completions specifies the desired number of successfully finished pods the
// job should be run with. Defaults to 1.
Completions *int `json:"completions,omitempty"`
// Selector is a label query over pods that should match the pod count.
Selector map[string]string `json:"selector"`
// Template is the object that describes the pod that will be created when
// executing a job.
Template *api.PodTemplateSpec `json:"template"`
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// Conditions represent the latest available observations of an object's current state.
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
// StartTime represents time when the job was acknowledged by the Job Manager.
// It is not guaranteed to be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
StartTime *util.Time `json:"startTime,omitempty"`
// CompletionTime represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
CompletionTime *util.Time `json:"completionTime,omitempty"`
// Active is the number of actively running pods.
Active int `json:"active,omitempty"`
// Successful is the number of pods which reached Phase Succeeded.
Successful int `json:"successful,omitempty"`
// Unsuccessful is the number of pods failures, this applies only to jobs
// created with RestartPolicyNever, otherwise this value will always be 0.
Unsuccessful int `json:"unsuccessful,omitempty"`
}
type JobConditionType string
// These are valid conditions of a job.
const (
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition, currently only Complete.
Type JobConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition was checked.
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
// Last time the condition transit from one status to another.
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
// (brief) reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
Message string `json:"message,omitempty"`
}

View File

@ -21,8 +21,8 @@ import (
"k8s.io/kubernetes/pkg/api"
v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/expapi"
"k8s.io/kubernetes/pkg/util"
)
@ -31,12 +31,12 @@ func addConversionFuncs() {
err := api.Scheme.AddConversionFuncs(
convert_api_PodSpec_To_v1_PodSpec,
convert_v1_PodSpec_To_api_PodSpec,
convert_expapi_DeploymentSpec_To_v1_DeploymentSpec,
convert_v1_DeploymentSpec_To_expapi_DeploymentSpec,
convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy,
convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy,
convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment,
convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment,
convert_experimental_DeploymentSpec_To_v1_DeploymentSpec,
convert_v1_DeploymentSpec_To_experimental_DeploymentSpec,
convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy,
convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy,
convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment,
convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
@ -178,9 +178,9 @@ func convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve
return nil
}
func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error {
func convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(in *experimental.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*expapi.DeploymentSpec))(in)
defaulting.(func(*experimental.DeploymentSpec))(in)
}
out.Replicas = new(int)
*out.Replicas = in.Replicas
@ -200,7 +200,7 @@ func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpe
} else {
out.Template = nil
}
if err := convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
if err := convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.UniqueLabelKey = new(string)
@ -208,7 +208,7 @@ func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpe
return nil
}
func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out *expapi.DeploymentSpec, s conversion.Scope) error {
func convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec, out *experimental.DeploymentSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentSpec))(in)
}
@ -231,7 +231,7 @@ func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out
} else {
out.Template = nil
}
if err := convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
if err := convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
if in.UniqueLabelKey != nil {
@ -240,14 +240,14 @@ func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out
return nil
}
func convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(in *expapi.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
func convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(in *experimental.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*expapi.DeploymentStrategy))(in)
defaulting.(func(*experimental.DeploymentStrategy))(in)
}
out.Type = DeploymentType(in.Type)
if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment)
if err := convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
if err := convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err
}
} else {
@ -256,14 +256,14 @@ func convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(in *expapi.Deplo
return nil
}
func convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(in *DeploymentStrategy, out *expapi.DeploymentStrategy, s conversion.Scope) error {
func convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(in *DeploymentStrategy, out *experimental.DeploymentStrategy, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentStrategy))(in)
}
out.Type = expapi.DeploymentType(in.Type)
out.Type = experimental.DeploymentType(in.Type)
if in.RollingUpdate != nil {
out.RollingUpdate = new(expapi.RollingUpdateDeployment)
if err := convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
out.RollingUpdate = new(experimental.RollingUpdateDeployment)
if err := convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err
}
} else {
@ -272,9 +272,9 @@ func convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(in *DeploymentSt
return nil
}
func convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *expapi.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error {
func convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *experimental.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*expapi.RollingUpdateDeployment))(in)
defaulting.(func(*experimental.RollingUpdateDeployment))(in)
}
if out.MaxUnavailable == nil {
out.MaxUnavailable = &util.IntOrString{}
@ -292,7 +292,7 @@ func convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *ex
return nil
}
func convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment(in *RollingUpdateDeployment, out *expapi.RollingUpdateDeployment, s conversion.Scope) error {
func convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in *RollingUpdateDeployment, out *experimental.RollingUpdateDeployment, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RollingUpdateDeployment))(in)
}

View File

@ -998,6 +998,121 @@ func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus,
return nil
}
func deepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error {
if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
if err := deepCopy_util_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil {
return err
}
if err := deepCopy_util_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_v1_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Job, len(in.Items))
for i := range in.Items {
if err := deepCopy_v1_Job(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
if in.Parallelism != nil {
out.Parallelism = new(int)
*out.Parallelism = *in.Parallelism
} else {
out.Parallelism = nil
}
if in.Completions != nil {
out.Completions = new(int)
*out.Completions = *in.Completions
} else {
out.Completions = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
if in.Template != nil {
out.Template = new(v1.PodTemplateSpec)
if err := deepCopy_v1_PodTemplateSpec(*in.Template, out.Template, c); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
if in.Conditions != nil {
out.Conditions = make([]JobCondition, len(in.Conditions))
for i := range in.Conditions {
if err := deepCopy_v1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil {
return err
}
}
} else {
out.Conditions = nil
}
if in.StartTime != nil {
out.StartTime = new(util.Time)
if err := deepCopy_util_Time(*in.StartTime, out.StartTime, c); err != nil {
return err
}
} else {
out.StartTime = nil
}
if in.CompletionTime != nil {
out.CompletionTime = new(util.Time)
if err := deepCopy_util_Time(*in.CompletionTime, out.CompletionTime, c); err != nil {
return err
}
} else {
out.CompletionTime = nil
}
out.Active = in.Active
out.Successful = in.Successful
out.Unsuccessful = in.Unsuccessful
return nil
}
func deepCopy_v1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
@ -1228,6 +1343,11 @@ func init() {
deepCopy_v1_HorizontalPodAutoscalerList,
deepCopy_v1_HorizontalPodAutoscalerSpec,
deepCopy_v1_HorizontalPodAutoscalerStatus,
deepCopy_v1_Job,
deepCopy_v1_JobCondition,
deepCopy_v1_JobList,
deepCopy_v1_JobSpec,
deepCopy_v1_JobStatus,
deepCopy_v1_ReplicationControllerDummy,
deepCopy_v1_ResourceConsumption,
deepCopy_v1_RollingUpdateDeployment,

View File

@ -36,6 +36,8 @@ func addKnownTypes() {
&DeploymentList{},
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
&Job{},
&JobList{},
&ReplicationControllerDummy{},
&Scale{},
&ThirdPartyResource{},
@ -51,6 +53,8 @@ func (*Deployment) IsAnAPIObject() {}
func (*DeploymentList) IsAnAPIObject() {}
func (*HorizontalPodAutoscaler) IsAnAPIObject() {}
func (*HorizontalPodAutoscalerList) IsAnAPIObject() {}
func (*Job) IsAnAPIObject() {}
func (*JobList) IsAnAPIObject() {}
func (*ReplicationControllerDummy) IsAnAPIObject() {}
func (*Scale) IsAnAPIObject() {}
func (*ThirdPartyResource) IsAnAPIObject() {}

View File

@ -363,3 +363,102 @@ type ThirdPartyResourceDataList struct {
// Items is the list of ThirdpartyResourceData.
Items []ThirdPartyResourceData `json:"items"`
}
// Job represents the configuration of a single job.
type Job struct {
v1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
v1.ObjectMeta `json:"metadata,omitempty"`
// Spec is a structure defining the expected behavior of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Spec JobSpec `json:"spec,omitempty"`
// Status is a structure describing current status of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Status JobStatus `json:"status,omitempty"`
}
// JobList is a collection of jobs.
type JobList struct {
v1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
v1.ListMeta `json:"metadata,omitempty"`
// Items is the list of Job.
Items []Job `json:"items"`
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Parallelism specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
Parallelism *int `json:"parallelism,omitempty"`
// Completions specifies the desired number of successfully finished pods the
// job should be run with. Defaults to 1.
Completions *int `json:"completions,omitempty"`
// Selector is a label query over pods that should match the pod count.
Selector map[string]string `json:"selector"`
// Template is the object that describes the pod that will be created when
// executing a job.
Template *v1.PodTemplateSpec `json:"template"`
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// Conditions represent the latest available observations of an object's current state.
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
// StartTime represents time when the job was acknowledged by the Job Manager.
// It is not guaranteed to be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
StartTime *util.Time `json:"startTime,omitempty"`
// CompletionTime represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
CompletionTime *util.Time `json:"completionTime,omitempty"`
// Active is the number of actively running pods.
Active int `json:"active,omitempty"`
// Successful is the number of pods which reached Phase Succeeded.
Successful int `json:"successful,omitempty"`
// Unsuccessful is the number of pods failures, this applies only to jobs
// created with RestartPolicyNever, otherwise this value will always be 0.
Unsuccessful int `json:"unsuccessful,omitempty"`
}
type JobConditionType string
// These are valid conditions of a job.
const (
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition, currently only Complete.
Type JobConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status"`
// Last time the condition was checked.
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
// Last time the condition transit from one status to another.
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
// (brief) reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
Message string `json:"message,omitempty"`
}

Some files were not shown because too many files have changed in this diff Show More