diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 2a74f5eda0c..cfadfdf252e 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -13171,7 +13171,11 @@ "properties": { "reason": { "type": "string", - "description": "(brief) reason the container is not yet running, such as pulling its image." + "description": "(brief) reason the container is not yet running." + }, + "message": { + "type": "string", + "description": "Message regarding why the container is not yet running." } } }, @@ -13662,6 +13666,10 @@ "sessionAffinity": { "type": "string", "description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies" + }, + "loadBalancerIP": { + "type": "string", + "description": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature." } } }, diff --git a/build/common.sh b/build/common.sh index bbb5e0173c4..f351e41ed4e 100755 --- a/build/common.sh +++ b/build/common.sh @@ -835,6 +835,8 @@ function kube::release::package_full_tarball() { cp "${KUBE_ROOT}/README.md" "${release_stage}/" cp "${KUBE_ROOT}/LICENSE" "${release_stage}/" cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/" + mkdir -p "${release_stage}/contrib/completions/bash" + cp "${KUBE_ROOT}/contrib/completions/bash/kubectl" "${release_stage}/contrib/completions/bash" kube::release::clean_cruft diff --git a/cluster/addons/dns/README.md b/cluster/addons/dns/README.md index c6e5824cfe3..118c29dcc4f 100644 --- a/cluster/addons/dns/README.md +++ b/cluster/addons/dns/README.md @@ -43,17 +43,12 @@ For a regular service, this resolves to the port number and the CNAME: `my-svc.my-namespace.svc.cluster.local`. For a headless service, this resolves to multiple answers, one for each pod that is backing the service, and contains the port number and a CNAME of the pod -with the format `auto-generated-name.my-svc.my-namespace.svc.cluster.local` -SRV records always contain the 'svc' segment in them and are not supported for -old-style CNAMEs where the 'svc' segment was omitted. - +of the form `auto-generated-name.my-svc.my-namespace.svc.cluster.local`. ### Backwards compatibility Previous versions of kube-dns made names of the for -`my-svc.my-namespace.cluster.local` (the 'svc' level was added later). For -compatibility, kube-dns supports both names for the time being. Users should -avoid creating a namespace named 'svc', to avoid conflicts. The old name -format is deprecated and will be removed in a future release. +`my-svc.my-namespace.cluster.local` (the 'svc' level was added later). This +is no longer supported. ## How do I find the DNS server? The DNS server itself runs as a Kubernetes Service. This gives it a stable IP @@ -178,6 +173,11 @@ paths to the node's own DNS settings. If the node is able to resolve DNS names specific to the larger environment, pods should be able to, also. See "Known issues" below for a caveat. +If you don't want this, or if you want a different DNS config for pods, you can +use the kubelet's `--resolv-conf` flag. Setting it to "" means that pods will +not inherit DNS. Setting it to a valid file path means that kubelet will use +this file instead of `/etc/resolv.conf` for DNS inheritance. + ## Known issues Kubernetes installs do not configure the nodes' resolv.conf files to use the cluster DNS by default, because that process is inherently distro-specific. @@ -190,7 +190,7 @@ consume 1 `nameserver` record and 3 `search` records. This means that if a local installation already uses 3 `nameserver`s or uses more than 3 `search`es, some of those settings will be lost. As a partial workaround, the node can run `dnsmasq` which will provide more `nameserver` entries, but not more `search` -entries. +entries. You can also use kubelet's `--resolv-conf` flag. ## Making changes Please observe the release process for making changes to the `kube2sky` diff --git a/cluster/addons/dns/kube2sky/kube2sky.go b/cluster/addons/dns/kube2sky/kube2sky.go index 73e8df962a5..686c4da6316 100644 --- a/cluster/addons/dns/kube2sky/kube2sky.go +++ b/cluster/addons/dns/kube2sky/kube2sky.go @@ -35,8 +35,8 @@ import ( "github.com/golang/glog" skymsg "github.com/skynetservices/skydns/msg" kapi "k8s.io/kubernetes/pkg/api" + kcache "k8s.io/kubernetes/pkg/client/cache" kclient "k8s.io/kubernetes/pkg/client/unversioned" - kcache "k8s.io/kubernetes/pkg/client/unversioned/cache" kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" kframework "k8s.io/kubernetes/pkg/controller/framework" kSelector "k8s.io/kubernetes/pkg/fields" diff --git a/cluster/addons/dns/kube2sky/kube2sky_test.go b/cluster/addons/dns/kube2sky/kube2sky_test.go index 9c454ede2ea..a6e154973f9 100644 --- a/cluster/addons/dns/kube2sky/kube2sky_test.go +++ b/cluster/addons/dns/kube2sky/kube2sky_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" ) type fakeEtcdClient struct { diff --git a/cluster/aws/templates/format-disks.sh b/cluster/aws/templates/format-disks.sh index adebf1568b8..102e092f674 100644 --- a/cluster/aws/templates/format-disks.sh +++ b/cluster/aws/templates/format-disks.sh @@ -118,45 +118,51 @@ else # This is the best option, but it is sadly broken on most distros # Bug: https://github.com/docker/docker/issues/4036 - # 95% goes to the docker thin-pool - lvcreate -l 95%VG --thinpool docker-thinpool vg-ephemeral + # 80% goes to the docker thin-pool; we want to leave some space for host-volumes + lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral DOCKER_OPTS="${DOCKER_OPTS} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool" # Note that we don't move docker; docker goes direct to the thinpool - else + + # Remaining space (20%) is for kubernetes data + # TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data? + lvcreate -l 100%FREE -n kubernetes vg-ephemeral + mkfs -t ext4 /dev/vg-ephemeral/kubernetes + mkdir -p /mnt/ephemeral/kubernetes + echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab + mount /mnt/ephemeral/kubernetes + + move_kubelet="/mnt/ephemeral/kubernetes" + else # aufs - # Create a docker lv, use docker on it - # 95% goes to the docker thin-pool + # We used to split docker & kubernetes, but we no longer do that, because + # host volumes go into the kubernetes area, and it is otherwise very easy + # to fill up small volumes. + release=`lsb_release -c -s` if [[ "${release}" != "wheezy" ]] ; then - lvcreate -l 95%VG --thinpool docker-thinpool vg-ephemeral + lvcreate -l 100%FREE --thinpool pool-ephemeral vg-ephemeral - THINPOOL_SIZE=$(lvs vg-ephemeral/docker-thinpool -o LV_SIZE --noheadings --units M --nosuffix) - lvcreate -V${THINPOOL_SIZE}M -T vg-ephemeral/docker-thinpool -n docker + THINPOOL_SIZE=$(lvs vg-ephemeral/pool-ephemeral -o LV_SIZE --noheadings --units M --nosuffix) + lvcreate -V${THINPOOL_SIZE}M -T vg-ephemeral/pool-ephemeral -n ephemeral else # Thin provisioning not supported by Wheezy echo "Detected wheezy; won't use LVM thin provisioning" - lvcreate -l 95%VG -n docker vg-ephemeral + lvcreate -l 100%VG -n ephemeral vg-ephemeral fi - mkfs -t ext4 /dev/vg-ephemeral/docker - mkdir -p /mnt/ephemeral/docker - echo "/dev/vg-ephemeral/docker /mnt/ephemeral/docker ext4 noatime 0 0" >> /etc/fstab - mount /mnt/ephemeral/docker + mkfs -t ext4 /dev/vg-ephemeral/ephemeral + mkdir -p /mnt/ephemeral + echo "/dev/vg-ephemeral/ephemeral /mnt/ephemeral ext4 noatime 0 0" >> /etc/fstab + mount /mnt/ephemeral + + mkdir -p /mnt/ephemeral/kubernetes + move_docker="/mnt/ephemeral" - fi - - # Remaining 5% is for kubernetes data - # TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data? - lvcreate -l 100%FREE -n kubernetes vg-ephemeral - mkfs -t ext4 /dev/vg-ephemeral/kubernetes - mkdir -p /mnt/ephemeral/kubernetes - echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab - mount /mnt/ephemeral/kubernetes - - move_kubelet="/mnt/ephemeral/kubernetes" - else + move_kubelet="/mnt/ephemeral/kubernetes" + fi + else echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}" fi fi diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index c0a18753c08..6aae9232ebb 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -44,6 +44,8 @@ MINION_TAG="${INSTANCE_PREFIX}-minion" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" +RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" +ENABLE_EXPERIMENTAL_API="${KUBE_ENABLE_EXPERIMENTAL_API:-false}" # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 @@ -87,7 +89,6 @@ CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}" ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" # Optional: Create autoscaler for cluster's nodes. -# NOT WORKING YET! ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" @@ -95,6 +96,13 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi +# Optional: Enable feature for autoscaling number of pods +# Experimental feature, not ready for production use. +ENABLE_HORIZONTAL_POD_AUTOSCALER="${KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER:-false}" +if [[ "${ENABLE_HORIZONTAL_POD_AUTOSCALER}" == "true" ]]; then + ENABLE_EXPERIMENTAL_API=true +fi + # Admission Controllers to invoke prior to persisting objects in cluster ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index a45477a3e7d..1ca394370d2 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -45,6 +45,9 @@ MINION_TAG="${INSTANCE_PREFIX}-minion" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" +RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" +ENABLE_EXPERIMENTAL_API="${KUBE_ENABLE_EXPERIMENTAL_API:-false}" + # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET @@ -59,7 +62,10 @@ TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}" KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL" APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}" -CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" +# pod-eviction-timeout is currently 2 * node-monitor-grace-period to allow for some network +# problems, but don't ensure that the Kubelet can be restarted without evicting Pods. We don't +# think it's necessary for tests. +CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL} --pod-eviction-timeout=1m20s" SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" @@ -92,7 +98,6 @@ CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}" ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" # Optional: Create autoscaler for cluster's nodes. -# NOT WORKING YET! ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" @@ -100,6 +105,13 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi +# Optional: Enable feature for autoscaling number of pods +# Experimental feature, not ready for production use. +ENABLE_HORIZONTAL_POD_AUTOSCALER="${KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER:-false}" +if [[ "${ENABLE_HORIZONTAL_POD_AUTOSCALER}" == "true" ]]; then + ENABLE_EXPERIMENTAL_API=true +fi + ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: if set to true kube-up will automatically check for existing resources and clean them up. diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index c199a6643c5..eaf65b89dbd 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -310,6 +310,11 @@ EOF cluster_registry_disk_type: gce cluster_registry_disk_size: $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) cluster_registry_disk_name: ${CLUSTER_REGISTRY_DISK} +EOF + fi + if [ -n "${ENABLE_HORIZONTAL_POD_AUTOSCALER:-}" ]; then + cat <>/srv/salt-overlay/pillar/cluster-params.sls +enable_horizontal_pod_autoscaler: '$(echo "$ENABLE_HORIZONTAL_POD_AUTOSCALER" | sed -e "s/'/''/g")' EOF fi } @@ -568,6 +573,11 @@ EOF # CIDR range. cat <>/etc/salt/minion.d/grains.conf cbr-cidr: ${MASTER_IP_RANGE} +EOF + fi + if [[ ! -z "${RUNTIME_CONFIG:-}" ]]; then + cat <>/etc/salt/minion.d/grains.conf + runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' EOF fi } diff --git a/cluster/gce/coreos/helper.sh b/cluster/gce/coreos/helper.sh index 5ef057acf4a..846bb2591b0 100755 --- a/cluster/gce/coreos/helper.sh +++ b/cluster/gce/coreos/helper.sh @@ -54,6 +54,8 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-}) KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-}) ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-}) MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE}) +ENABLE_HORIZONTAL_POD_AUTOSCALER: $(yaml-quote ${ENABLE_HORIZONTAL_POD_AUTOSCALER}) +RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG}) KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME}) KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME}) RKT_VERSION: $(yaml-quote ${RKT_VERSION}) diff --git a/cluster/gce/debian/helper.sh b/cluster/gce/debian/helper.sh index 6b929c2d9ee..694f29ec85e 100755 --- a/cluster/gce/debian/helper.sh +++ b/cluster/gce/debian/helper.sh @@ -51,6 +51,8 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-}) KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-}) ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-}) MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE}) +ENABLE_HORIZONTAL_POD_AUTOSCALER: $(yaml-quote ${ENABLE_HORIZONTAL_POD_AUTOSCALER}) +RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG}) CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-}) KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-}) KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-}) diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index c884c9c2195..604c855f236 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -53,6 +53,18 @@ function join_csv { # Verify prereqs function verify-prereqs { + if [[ "${ENABLE_EXPERIMENTAL_API}" == "true" ]]; then + if [[ -z "${RUNTIME_CONFIG}" ]]; then + RUNTIME_CONFIG="experimental/v1=true" + else + # TODO: add checking if RUNTIME_CONFIG contains "experimental/v1=false" and appending "experimental/v1=true" if not. + if echo "${RUNTIME_CONFIG}" | grep -q -v "experimental/v1=true"; then + echo "Experimental API should be turned on, but is not turned on in RUNTIME_CONFIG!" + exit 1 + fi + fi + fi + local cmd for cmd in gcloud gsutil; do if ! which "${cmd}" >/dev/null; then @@ -465,6 +477,7 @@ function write-master-env { if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then KUBELET_APISERVER="${MASTER_NAME}" fi + build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml" } diff --git a/cluster/gke/config-common.sh b/cluster/gke/config-common.sh index 790c26b2ded..0b4dbea69dd 100644 --- a/cluster/gke/config-common.sh +++ b/cluster/gke/config-common.sh @@ -25,7 +25,7 @@ NETWORK="${NETWORK:-default}" NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}" FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}" GCLOUD="${GCLOUD:-gcloud}" -CMD_GROUP="${CMD_GROUP:-beta}" +CMD_GROUP="${CMD_GROUP:-}" GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}" MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}" MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-1}" diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index b2812adae0f..37c582a3358 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -99,7 +99,7 @@ function verify-prereqs() { sudo_prefix="sudo" fi ${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true - ${sudo_prefix} gcloud ${gcloud_prompt:-} components update "${CMD_GROUP}"|| true + ${sudo_prefix} gcloud ${gcloud_prompt:-} components update ${CMD_GROUP:-} || true ${sudo_prefix} gcloud ${gcloud_prompt:-} components update kubectl|| true ${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true } @@ -150,7 +150,7 @@ function kube-up() { ) # Bring up the cluster. - "${GCLOUD}" "${CMD_GROUP}" container clusters create "${CLUSTER_NAME}" "${create_args[@]}" + "${GCLOUD}" ${CMD_GROUP:-} container clusters create "${CLUSTER_NAME}" "${create_args[@]}" } # Execute prior to running tests to initialize required structure. This is @@ -200,7 +200,7 @@ function test-setup() { function detect-master() { echo "... in gke:detect-master()" >&2 detect-project >&2 - KUBE_MASTER_IP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ + KUBE_MASTER_IP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \ --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ | grep endpoint | cut -f 2 -d ' ') } @@ -242,7 +242,7 @@ function detect-minion-names { # NODE_INSTANCE_GROUP function detect-node-instance-group { echo "... in gke:detect-node-instance-group()" >&2 - NODE_INSTANCE_GROUP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ + NODE_INSTANCE_GROUP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \ --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ | grep instanceGroupManagers | cut -d '/' -f 11) } @@ -318,6 +318,6 @@ function test-teardown() { function kube-down() { echo "... in gke:kube-down()" >&2 detect-project >&2 - "${GCLOUD}" "${CMD_GROUP}" container clusters delete --project="${PROJECT}" \ + "${GCLOUD}" ${CMD_GROUP:-} container clusters delete --project="${PROJECT}" \ --zone="${ZONE}" "${CLUSTER_NAME}" --quiet } diff --git a/cluster/saltbase/salt/docker/init.sls b/cluster/saltbase/salt/docker/init.sls index 0ef2e4c66f3..3aca7d7610b 100644 --- a/cluster/saltbase/salt/docker/init.sls +++ b/cluster/saltbase/salt/docker/init.sls @@ -84,10 +84,10 @@ net.ipv4.ip_forward: # # To change: # -# 1. Find new deb name with: -# curl https://get.docker.com/ubuntu/dists/docker/main/binary-amd64/Packages +# 1. Find new deb name at: +# http://apt.dockerproject.org/repo/pool/main/d/docker-engine # 2. Download based on that: -# curl -O https://get.docker.com/ubuntu/pool/main/<...> +# curl -O http://apt.dockerproject.org/repo/pool/main/d/docker-engine/ # 3. Upload to GCS: # gsutil cp gs://kubernetes-release/docker/ # 4. Make it world readable: @@ -99,16 +99,22 @@ net.ipv4.ip_forward: {% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %} +# Only upgrade Docker to 1.8.2 for the containerVM image. +# TODO(dchen1107): For release 1.1, we want to update the ContainerVM image to +# include Docker 1.8.2 and comment out the upgrade below. +{% if grains.get('cloud', '') == 'gce' + and grains.get('os_family', '') == 'Debian' + and grains.get('oscodename', '') == 'wheezy' -%} +{% set docker_pkg_name='docker-engine' %} +{% set override_deb='docker-engine_1.8.2-0~wheezy_amd64.deb' %} +{% set override_deb_sha1='dcff80bffcbde458508da58d2a9fe7bef8eed404' %} +{% set override_docker_ver='1.8.2-0~wheezy' %} +{% else %} +{% set docker_pkg_name='lxc-docker-1.7.1' %} +{% set override_docker_ver='1.7.1' %} {% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %} {% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %} -{% set override_docker_ver='1.7.1' %} - -# Comment out below logic for master branch, so that we can upgrade GCE cluster -# to docker 1.7.1 by default. -# -# TODO(dchen1107): For release 1.1, we want to fall back to -# ContainerVM installed docker by set override_deb, override_deb_sha1 and -# override_docker_ver back to '' for gce cloud provider. +{% endif %} {% if override_docker_ver != '' %} purge-old-docker-package: @@ -135,10 +141,10 @@ purge-old-docker-package: - mode: 644 - makedirs: true -lxc-docker-{{ override_docker_ver }}: +docker-upgrade: pkg.installed: - sources: - - lxc-docker-{{ override_docker_ver }}: /var/cache/docker-install/{{ override_deb }} + - {{ docker_pkg_name }}: /var/cache/docker-install/{{ override_deb }} - require: - file: /var/cache/docker-install/{{ override_deb }} {% endif %} # end override_docker_ver != '' @@ -168,7 +174,7 @@ fix-service-docker: - file: {{ environment_file }} {% if override_docker_ver != '' %} - require: - - pkg: lxc-docker-{{ override_docker_ver }} + - pkg: {{ docker_pkg_name }}-{{ override_docker_ver }} {% endif %} {% endif %} @@ -187,13 +193,13 @@ docker: - watch: - file: {{ environment_file }} {% if override_docker_ver != '' %} - - pkg: lxc-docker-{{ override_docker_ver }} + - pkg: docker-upgrade {% endif %} {% if pillar.get('is_systemd') %} - file: {{ pillar.get('systemd_system_path') }}/docker.service {% endif %} {% if override_docker_ver != '' %} - require: - - pkg: lxc-docker-{{ override_docker_ver }} + - pkg: docker-upgrade {% endif %} {% endif %} # end grains.os_family != 'RedHat' diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index 7c0e214213a..a4f82888a01 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -1,6 +1,7 @@ {% set cluster_name = "" -%} {% set cluster_cidr = "" -%} {% set allocate_node_cidrs = "" -%} +{% set enable_horizontal_pod_autoscaler = "" -%} {% if pillar['instance_prefix'] is defined -%} {% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%} @@ -11,6 +12,9 @@ {% if pillar['allocate_node_cidrs'] is defined -%} {% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%} {% endif -%} +{% if pillar['enable_horizontal_pod_autoscaler'] is defined -%} + {% set enable_horizontal_pod_autoscaler = "--enable-horizontal-pod-autoscaler=" + pillar['enable_horizontal_pod_autoscaler'] -%} +{% endif -%} {% set cloud_provider = "" -%} {% set cloud_config = "" -%} @@ -34,7 +38,7 @@ {% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%} {% endif -%} -{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} +{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + enable_horizontal_pod_autoscaler + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} # test_args has to be kept at the end, so they'll overwrite any prior configuration {% if pillar['controller_manager_test_args'] is defined -%} diff --git a/cluster/ubuntu/build.sh b/cluster/ubuntu/build.sh index cb9e7efdcba..0908c138e0f 100755 --- a/cluster/ubuntu/build.sh +++ b/cluster/ubuntu/build.sh @@ -32,7 +32,6 @@ mkdir -p binaries/minion # flannel echo "Download flannel release ..." FLANNEL_VERSION=${FLANNEL_VERSION:-"0.4.0"} -echo "Flannel version is $FLANNEL_VERSION" if [ ! -f flannel.tar.gz ] ; then curl -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz tar xzf flannel.tar.gz @@ -54,10 +53,10 @@ cp $ETCD/etcd $ETCD/etcdctl binaries/master # k8s echo "Download kubernetes release ..." -K8S_VERSION=${K8S_VERSION:-"1.0.3"} +KUBE_VERSION=${KUBE_VERSION:-"1.0.3"} if [ ! -f kubernetes.tar.gz ] ; then - curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz + curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${KUBE_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz tar xzf kubernetes.tar.gz fi pushd kubernetes/server diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index c62faeefb85..2daa16f85c7 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -19,8 +19,6 @@ set -e SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR" -# use an array to record name and ip -declare -A mm MASTER="" MASTER_IP="" MINION_IPS="" @@ -443,24 +441,42 @@ function prepare-push() { echo "Upgrading nodes to local binaries is not yet supported.Please specify the version" exit 1 fi - # Run build.sh to get the latest release - source "${KUBE_ROOT}/cluster/ubuntu/build.sh" + + # Run build.sh to get the required release + pushd ubuntu + source "build.sh" + popd } -# Update a kubernetes master with latest release +# Update a kubernetes master with required release function push-master { source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" setClusterInfo ii=0 for i in ${nodes}; do - if [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then - echo "Cleaning on master ${i#*@}" - ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true + if [[ "${roles[${ii}]}" == "a" ]]; then + echo "Cleaning master ${i#*@}" + ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop; + sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd; + sudo rm -f /opt/bin/kube* /opt/bin/flanneld; + sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld; + sudo rm -rf /etc/default/kube* /etc/default/flanneld; + sudo rm -rf ~/kube' || true provision-master + elif [[ "${roles[${ii}]}" == "ai" ]]; then + echo "Cleaning master ${i#*@}" + ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop; + sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd; + sudo rm -f /opt/bin/kube* /opt/bin/flanneld; + sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld; + sudo rm -rf /etc/default/kube* /etc/default/flanneld; + sudo rm -rf ~/kube' || true + provision-masterandminion elif [[ "${roles[${ii}]}" == "i" ]]; then + ((ii=ii+1)) continue else - echo "unsupported role for ${i}. please check" + echo "unsupported role for ${i}, please check" exit 1 fi ((ii=ii+1)) @@ -468,41 +484,76 @@ function push-master { verify-cluster } -# Update a kubernetes node with latest release +# Update a kubernetes node with required release function push-node() { source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" - node=${1} + node_ip=${1} setClusterInfo ii=0 + existing=false for i in ${nodes}; do - if [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "ai" && $i == *$node ]]; then - echo "Cleaning on node ${i#*@}" - ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true + if [[ "${roles[${ii}]}" == "i" && ${i#*@} == $node_ip ]]; then + echo "Cleaning node ${i#*@}" + ssh -t $i 'sudo -p "[sudo] stop the all process: " service flanneld stop; + sudo rm -f /opt/bin/kube* /opt/bin/flanneld; + sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld; + sudo rm -rf /etc/default/kube* /etc/default/flanneld; + sudo rm -rf ~/kube' || true provision-minion $i + existing=true + elif [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]] && [[ ${i#*@} == $node_ip ]]; then + echo "${i} is master node, please try ./kube-push -m instead" + existing=true + elif [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then + ((ii=ii+1)) + continue else - echo "unsupported role for ${i}, or nodes ${i} don't exist. please check" + echo "unsupported role for ${i}, please check" exit 1 fi ((ii=ii+1)) done - verify-cluster + if [[ "${existing}" == false ]]; then + echo "node ${node_ip} does not exist" + else + verify-cluster + fi + } -# Update a kubernetes cluster with latest source -function kube-push { +# Update a kubernetes cluster with required source +function kube-push { prepare-push - #stop all the kube's process & etcd source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" + + #stop all the kube's process & etcd + ii=0 for i in ${nodes}; do - echo "Cleaning on node ${i#*@}" - ssh -t $i 'sudo -p "[sudo] stop all process: " service etcd stop' || true - ssh -t $i 'rm -f /opt/bin/kube* /etc/init/kube* /etc/init.d/kube* /etc/default/kube*; rm -rf ~/kube' || true + { + echo "Cleaning on node ${i#*@}" + if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then + ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop; + sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd' || true + elif [[ "${roles[${ii}]}" == "i" ]]; then + ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password for stopping flanneld: " service flanneld stop' || true + else + echo "unsupported role for ${i}" + fi + + ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld; + sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld; + sudo rm -rf /etc/default/kube* /etc/default/flanneld; + sudo rm -rf ~/kube' || true + } + ((ii=ii+1)) done - #Update all nodes with the lasted release + + #Update all nodes with the required release if [[ ! -f "ubuntu/binaries/master/kube-apiserver" ]]; then - echo "There is no latest release of kubernetes,please check first" + echo "There is no required release of kubernetes, please check first" exit 1 fi + #provision all nodes,include master&nodes setClusterInfo ii=0 diff --git a/cmd/genconversion/conversion.go b/cmd/genconversion/conversion.go index b89ecd5b5ce..9fab0ad6bae 100644 --- a/cmd/genconversion/conversion.go +++ b/cmd/genconversion/conversion.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/v1" - _ "k8s.io/kubernetes/pkg/expapi" - _ "k8s.io/kubernetes/pkg/expapi/v1" + _ "k8s.io/kubernetes/pkg/apis/experimental" + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" pkg_runtime "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" @@ -44,6 +44,24 @@ var ( groupVersion = flag.StringP("version", "v", "api/v1", "groupPath/version for conversion.") ) +// We're moving to pkg/apis/group/version. This handles new and legacy packages. +func pkgPath(group, version string) string { + if group == "" { + group = "api" + } + gv := group + if version != "" { + gv = path.Join(group, version) + } + switch { + case group == "api": + // TODO(lavalamp): remove this special case when we move api to apis/api + return path.Join(pkgBase, gv) + default: + return path.Join(pkgBase, "apis", gv) + } +} + func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() @@ -70,14 +88,14 @@ func main() { glog.Fatalf("error writing package line: %v", err) } - versionPath := path.Join(pkgBase, group, version) + versionPath := pkgPath(group, version) generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), versionPath) apiShort := generator.AddImport(path.Join(pkgBase, "api")) generator.AddImport(path.Join(pkgBase, "api/resource")) // TODO(wojtek-t): Change the overwrites to a flag. generator.OverwritePackage(version, "") for _, knownType := range api.Scheme.KnownTypes(version) { - if !strings.HasPrefix(knownType.PkgPath(), versionPath) { + if knownType.PkgPath() != versionPath { continue } if err := generator.GenerateConversionsForType(version, knownType); err != nil { diff --git a/cmd/gendeepcopy/deep_copy.go b/cmd/gendeepcopy/deep_copy.go index 7cfb05d9332..3491322860f 100644 --- a/cmd/gendeepcopy/deep_copy.go +++ b/cmd/gendeepcopy/deep_copy.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/v1" - _ "k8s.io/kubernetes/pkg/expapi" - _ "k8s.io/kubernetes/pkg/expapi/v1" + _ "k8s.io/kubernetes/pkg/apis/experimental" + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" pkg_runtime "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" @@ -45,6 +45,32 @@ var ( overwrites = flag.StringP("overwrites", "o", "", "Comma-separated overwrites for package names") ) +// types inside the api package don't need to say "api.Scheme"; all others do. +func destScheme(group, version string) string { + if group == "api" && version == "" { + return "Scheme" + } + return "api.Scheme" +} + +// We're moving to pkg/apis/group/version. This handles new and legacy packages. +func pkgPath(group, version string) string { + if group == "" { + group = "api" + } + gv := group + if version != "" { + gv = path.Join(group, version) + } + switch { + case group == "api": + // TODO(lavalamp): remove this special case when we move api to apis/api + return path.Join(pkgBase, gv) + default: + return path.Join(pkgBase, "apis", gv) + } +} + func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() @@ -65,10 +91,7 @@ func main() { group, version := path.Split(*groupVersion) group = strings.TrimRight(group, "/") - registerTo := "api.Scheme" - if *groupVersion == "api/" { - registerTo = "Scheme" - } + registerTo := destScheme(group, version) pkgname := group if len(version) != 0 { pkgname = version @@ -79,7 +102,7 @@ func main() { glog.Fatalf("error writing package line: %v", err) } - versionPath := path.Join(pkgBase, group, version) + versionPath := pkgPath(group, version) generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, sets.NewString("k8s.io/kubernetes")) generator.AddImport(path.Join(pkgBase, "api")) @@ -93,7 +116,7 @@ func main() { } } for _, knownType := range api.Scheme.KnownTypes(version) { - if !strings.HasPrefix(knownType.PkgPath(), versionPath) { + if knownType.PkgPath() != versionPath { continue } if err := generator.AddType(knownType); err != nil { diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index 82d9357243b..2dd3f07ace5 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -39,13 +39,13 @@ import ( apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/testapi" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/apiserver" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/node" replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication" - explatest "k8s.io/kubernetes/pkg/expapi/latest" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet/cadvisor" diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 5a18f73ace7..c421167bc02 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -35,11 +35,11 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/meta" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/apiserver" "k8s.io/kubernetes/pkg/capabilities" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/cloudprovider" - explatest "k8s.io/kubernetes/pkg/expapi/latest" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/storage" diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 7a7e7aecd63..869d6b03875 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -35,12 +35,13 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller/autoscaler" - "k8s.io/kubernetes/pkg/controller/autoscaler/metrics" + "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/namespace" "k8s.io/kubernetes/pkg/controller/node" "k8s.io/kubernetes/pkg/controller/persistentvolume" + "k8s.io/kubernetes/pkg/controller/podautoscaler" + "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/controller/resourcequota" "k8s.io/kubernetes/pkg/controller/route" @@ -63,6 +64,7 @@ type CMServer struct { CloudConfigFile string ConcurrentEndpointSyncs int ConcurrentRCSyncs int + ConcurrentDSCSyncs int ServiceSyncPeriod time.Duration NodeSyncPeriod time.Duration ResourceQuotaSyncPeriod time.Duration @@ -98,6 +100,7 @@ func NewCMServer() *CMServer { Address: net.ParseIP("127.0.0.1"), ConcurrentEndpointSyncs: 5, ConcurrentRCSyncs: 5, + ConcurrentDSCSyncs: 2, ServiceSyncPeriod: 5 * time.Minute, NodeSyncPeriod: 10 * time.Second, ResourceQuotaSyncPeriod: 10 * time.Second, @@ -213,6 +216,9 @@ func (s *CMServer) Run(_ []string) error { controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) + go daemon.NewDaemonSetsController(kubeClient). + Run(s.ConcurrentDSCSyncs, util.NeverStop) + cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) @@ -248,7 +254,7 @@ func (s *CMServer) Run(_ []string) error { namespaceController.Run() if s.EnableHorizontalPodAutoscaler { - horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)) + horizontalPodAutoscalerController := podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)) horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod) } diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index a7f98813fa3..9a6cfb8ac9c 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -27,10 +27,10 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/config" diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 225d756f26a..c90453b6c52 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -35,11 +35,11 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/client/chaosclient" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/healthz" "k8s.io/kubernetes/pkg/kubelet" @@ -119,6 +119,7 @@ type KubeletServer struct { ResolverConfig string ResourceContainer string RktPath string + RktStage1Image string RootDirectory string RunOnce bool StandaloneMode bool @@ -189,6 +190,7 @@ func NewKubeletServer() *KubeletServer { RegistryBurst: 10, ResourceContainer: "/kubelet", RktPath: "", + RktStage1Image: "", RootDirectory: defaultRootDir, SyncFrequency: 10 * time.Second, SystemContainer: "", @@ -254,6 +256,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.") fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.") fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'") + fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used") fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").") fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.") fs.IntVar(&s.MaxPods, "max-pods", 40, "Number of Pods that can run on this Kubelet.") @@ -364,6 +367,7 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) { ResolverConfig: s.ResolverConfig, ResourceContainer: s.ResourceContainer, RktPath: s.RktPath, + RktStage1Image: s.RktStage1Image, RootDirectory: s.RootDirectory, Runonce: s.RunOnce, StandaloneMode: (len(s.APIServerList) == 0), @@ -789,6 +793,7 @@ type KubeletConfig struct { ResolverConfig string ResourceContainer string RktPath string + RktStage1Image string RootDirectory string Runonce bool StandaloneMode bool @@ -851,6 +856,7 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod kc.CgroupRoot, kc.ContainerRuntime, kc.RktPath, + kc.RktStage1Image, kc.Mounter, kc.DockerDaemonContainer, kc.SystemContainer, diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index 0413c2a5260..f78bc485e80 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -359,6 +359,7 @@ _kubectl_create() flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--output=") two_word_flags+=("-o") + flags+=("--schema-cache-dir=") flags+=("--validate") must_have_one_flag=() @@ -388,6 +389,7 @@ _kubectl_replace() flags+=("--grace-period=") flags+=("--output=") two_word_flags+=("-o") + flags+=("--schema-cache-dir=") flags+=("--timeout=") flags+=("--validate") @@ -534,6 +536,7 @@ _kubectl_rolling-update() flags+=("--output-version=") flags+=("--poll-interval=") flags+=("--rollback") + flags+=("--schema-cache-dir=") flags+=("--show-all") flags+=("-a") flags+=("--sort-by=") @@ -687,6 +690,7 @@ _kubectl_run() flags+=("--image=") flags+=("--labels=") two_word_flags+=("-l") + flags+=("--limits=") flags+=("--no-headers") flags+=("--output=") two_word_flags+=("-o") @@ -695,6 +699,7 @@ _kubectl_run() flags+=("--port=") flags+=("--replicas=") two_word_flags+=("-r") + flags+=("--requests=") flags+=("--restart=") flags+=("--show-all") flags+=("-a") @@ -762,6 +767,7 @@ _kubectl_expose() flags+=("--generator=") flags+=("--labels=") two_word_flags+=("-l") + flags+=("--load-balancer-ip=") flags+=("--name=") flags+=("--no-headers") flags+=("--output=") @@ -781,7 +787,6 @@ _kubectl_expose() flags+=("--type=") must_have_one_flag=() - must_have_one_flag+=("--port=") must_have_one_noun=() } diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index ad44a1c0e2b..70d653ba088 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -47,6 +47,7 @@ import ( "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/pflag" + "k8s.io/kubernetes/pkg/controller/daemon" ) // CMServer is the main context object for the controller manager. @@ -113,6 +114,9 @@ func (s *CMServer) Run(_ []string) error { controllerManager := replicationcontroller.NewReplicationManager(kubeClient, replicationcontroller.BurstReplicas) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) + go daemon.NewDaemonSetsController(kubeClient). + Run(s.ConcurrentDSCSyncs, util.NeverStop) + //TODO(jdef) should eventually support more cloud providers here if s.CloudProvider != mesos.ProviderName { glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider) diff --git a/contrib/mesos/pkg/executor/executor_test.go b/contrib/mesos/pkg/executor/executor_test.go index 2ea53cbad30..3a6dd5f0683 100644 --- a/contrib/mesos/pkg/executor/executor_test.go +++ b/contrib/mesos/pkg/executor/executor_test.go @@ -37,8 +37,8 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/kubelet" kconfig "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/dockertools" diff --git a/contrib/mesos/pkg/executor/service/service.go b/contrib/mesos/pkg/executor/service/service.go index 5508d78ee6e..ba0664efb09 100644 --- a/contrib/mesos/pkg/executor/service/service.go +++ b/contrib/mesos/pkg/executor/service/service.go @@ -325,6 +325,7 @@ func (ks *KubeletExecutorServer) createAndInitKubelet( kc.CgroupRoot, kc.ContainerRuntime, kc.RktPath, + kc.RktStage1Image, kc.Mounter, kc.DockerDaemonContainer, kc.SystemContainer, diff --git a/contrib/mesos/pkg/offers/offers.go b/contrib/mesos/pkg/offers/offers.go index 962fc81663c..b11e4f5dff1 100644 --- a/contrib/mesos/pkg/offers/offers.go +++ b/contrib/mesos/pkg/offers/offers.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/proc" "k8s.io/kubernetes/contrib/mesos/pkg/queue" "k8s.io/kubernetes/contrib/mesos/pkg/runtime" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/util/sets" ) diff --git a/contrib/mesos/pkg/queue/interface.go b/contrib/mesos/pkg/queue/interface.go index 96e51f6e8de..de10a47904a 100644 --- a/contrib/mesos/pkg/queue/interface.go +++ b/contrib/mesos/pkg/queue/interface.go @@ -19,7 +19,7 @@ package queue import ( "time" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" ) type EventType int diff --git a/contrib/mesos/pkg/scheduler/plugin.go b/contrib/mesos/pkg/scheduler/plugin.go index e7f39cff4ed..72cd2e15d11 100644 --- a/contrib/mesos/pkg/scheduler/plugin.go +++ b/contrib/mesos/pkg/scheduler/plugin.go @@ -35,9 +35,9 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util" plugin "k8s.io/kubernetes/plugin/pkg/scheduler" diff --git a/contrib/mesos/pkg/scheduler/plugin_test.go b/contrib/mesos/pkg/scheduler/plugin_test.go index fded56da701..9737c664ee5 100644 --- a/contrib/mesos/pkg/scheduler/plugin_test.go +++ b/contrib/mesos/pkg/scheduler/plugin_test.go @@ -26,8 +26,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/runtime" kutil "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/watch" diff --git a/contrib/mesos/pkg/scheduler/pod.go b/contrib/mesos/pkg/scheduler/pod.go index c6f3dd85e4e..bf70100bcf9 100644 --- a/contrib/mesos/pkg/scheduler/pod.go +++ b/contrib/mesos/pkg/scheduler/pod.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/queue" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" ) // wrapper for the k8s pod type so that we can define additional methods on a "pod" diff --git a/contrib/mesos/pkg/service/endpoints_controller.go b/contrib/mesos/pkg/service/endpoints_controller.go index 589aa0dfe3c..f6316c6bac7 100644 --- a/contrib/mesos/pkg/service/endpoints_controller.go +++ b/contrib/mesos/pkg/service/endpoints_controller.go @@ -26,8 +26,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/endpoints" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" kservice "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" diff --git a/docs/admin/node.md b/docs/admin/node.md index 2d5520311a3..e9f061bc514 100644 --- a/docs/admin/node.md +++ b/docs/admin/node.md @@ -222,7 +222,7 @@ you are doing [manual node administration](#manual-node-administration), then yo capacity when adding a node. The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It -checks that the sum of the limits of containers on the node is no greater than than the node capacity. It +checks that the sum of the limits of containers on the node is no greater than the node capacity. It includes all containers started by kubelet, but not containers started directly by docker, nor processes not in containers. diff --git a/docs/admin/ovs-networking.png b/docs/admin/ovs-networking.png index ca75ab305b8..70a7b9bb175 100644 Binary files a/docs/admin/ovs-networking.png and b/docs/admin/ovs-networking.png differ diff --git a/docs/design/event_compression.md b/docs/design/event_compression.md index e8f9775b8d3..3d900e0700c 100644 --- a/docs/design/event_compression.md +++ b/docs/design/event_compression.md @@ -60,7 +60,7 @@ Instead of a single Timestamp, each event object [contains](http://releases.k8s. Each binary that generates events: * Maintains a historical record of previously generated events: - * Implemented with ["Least Recently Used Cache"](https://github.com/golang/groupcache/blob/master/lru/lru.go) in [`pkg/client/unversioned/record/events_cache.go`](../../pkg/client/unversioned/record/events_cache.go). + * Implemented with ["Least Recently Used Cache"](https://github.com/golang/groupcache/blob/master/lru/lru.go) in [`pkg/client/record/events_cache.go`](../../pkg/client/record/events_cache.go). * The key in the cache is generated from the event object minus timestamps/count/transient fields, specifically the following events fields are used to construct a unique key for an event: * `event.Source.Component` * `event.Source.Host` diff --git a/docs/devel/api_changes.md b/docs/devel/api_changes.md index 45f0dd4c9e7..e0a65fe0092 100644 --- a/docs/devel/api_changes.md +++ b/docs/devel/api_changes.md @@ -38,7 +38,7 @@ with a number of existing API types and with the [API conventions](api-conventions.md). If creating a new API type/resource, we also recommend that you first send a PR containing just a proposal for the new API types, and that you initially target -the experimental API (pkg/expapi). +the experimental API (pkg/apis/experimental). The Kubernetes API has two major components - the internal structures and the versioned APIs. The versioned APIs are intended to be stable, while the @@ -399,10 +399,10 @@ The conversion code resides with each versioned API. There are two files: functions - `pkg/api//conversion_generated.go` containing auto-generated conversion functions - - `pkg/expapi//conversion.go` containing manually written conversion - functions - - `pkg/expapi//conversion_generated.go` containing auto-generated + - `pkg/apis/experimental//conversion.go` containing manually written conversion functions + - `pkg/apis/experimental//conversion_generated.go` containing + auto-generated conversion functions Since auto-generated conversion functions are using manually written ones, those manually written should be named with a defined convention, i.e. a function @@ -437,7 +437,7 @@ of your versioned api objects. The deep copy code resides with each versioned API: - `pkg/api//deep_copy_generated.go` containing auto-generated copy functions - - `pkg/expapi//deep_copy_generated.go` containing auto-generated copy functions + - `pkg/apis/experimental//deep_copy_generated.go` containing auto-generated copy functions To regenerate them: - run @@ -446,6 +446,23 @@ To regenerate them: hack/update-generated-deep-copies.sh ``` +## Making a new API Group + +This section is under construction, as we make the tooling completely generic. + +At the moment, you'll have to make a new directory under pkg/apis/; copy the +directory structure from pkg/apis/experimental. Add the new group/version to all +of the hack/{verify,update}-generated-{deep-copy,conversions,swagger}.sh files +in the appropriate places--it should just require adding your new group/version +to a bash array. You will also need to make sure your new types are imported by +the generation commands (cmd/gendeepcopy/ & cmd/genconversion). These +instructions may not be complete and will be updated as we gain experience. + +Adding API groups outside of the pkg/apis/ directory is not currently supported, +but is clearly desirable. The deep copy & conversion generators need to work by +parsing go files instead of by reflection; then they will be easy to point at +arbitrary directories: see issue [#13775](http://issue.k8s.io/13775). + ## Update the fuzzer Part of our testing regimen for APIs is to "fuzz" (fill with random values) API diff --git a/docs/getting-started-guides/cloudstack.md b/docs/getting-started-guides/cloudstack.md index a08db9866b0..07a9bf3910c 100644 --- a/docs/getting-started-guides/cloudstack.md +++ b/docs/getting-started-guides/cloudstack.md @@ -108,7 +108,7 @@ Once the playbook as finished, it will print out the IP of the Kubernetes master SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster: - $ ssh -i ~/.ssh/id_rsa_k8s core@ + $ ssh -i ~/.ssh/id_rsa_k8s core@ $ fleetctl list-machines MACHINE IP METADATA a017c422... role=node diff --git a/docs/getting-started-guides/docker.md b/docs/getting-started-guides/docker.md index 7028035f232..91852265386 100644 --- a/docs/getting-started-guides/docker.md +++ b/docs/getting-started-guides/docker.md @@ -42,7 +42,7 @@ Running Kubernetes locally via Docker - [Step Three: Run the service proxy](#step-three-run-the-service-proxy) - [Test it out](#test-it-out) - [Run an application](#run-an-application) -- [Expose it as a service:](#expose-it-as-a-service) +- [Expose it as a service](#expose-it-as-a-service) - [A note on turning down your cluster](#a-note-on-turning-down-your-cluster) ### Overview @@ -128,7 +128,7 @@ On OS/X you will need to set up port forwarding via ssh: boot2docker ssh -L8080:localhost:8080 ``` -List the nodes in your cluster by running:: +List the nodes in your cluster by running: ```sh kubectl get nodes @@ -149,7 +149,7 @@ If you are running different Kubernetes clusters, you may need to specify `-s ht kubectl -s http://localhost:8080 run nginx --image=nginx --port=80 ``` -now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. +Now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. ### Expose it as a service @@ -164,7 +164,7 @@ NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR nginx 10.0.93.211 80/TCP run=nginx 1h ``` -If `CLUSTER_IP` is blank run the following command to obtain it. Know issue #10836 +If `CLUSTER_IP` is blank run the following command to obtain it. Know issue [#10836](https://github.com/kubernetes/kubernetes/issues/10836) ```sh kubectl get svc nginx diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index bba7c2f3cfe..705e5e64b3b 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -123,7 +123,7 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" KUBE_API_ARGS="" ``` -* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused" +* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused". Note that Fedora 22 uses etcd 2.0, One of the changes in etcd 2.0 is that now uses port 2379 and 2380 (as opposed to etcd 0.46 which userd 4001 and 7001). ```sh ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001" diff --git a/docs/getting-started-guides/gce.md b/docs/getting-started-guides/gce.md index 8353ac90299..22663d42e3b 100644 --- a/docs/getting-started-guides/gce.md +++ b/docs/getting-started-guides/gce.md @@ -132,6 +132,24 @@ However the gcloud bundled kubectl version may be older than the one downloaded get.k8s.io install script. We recommend you use the downloaded binary to avoid potential issues with client/server version skew. +#### Enabling bash completion of the Kubernetes command line tools + +You may find it useful to enable `kubectl` bash completion: + +``` +$ source ./contrib/completions/bash/kubectl +``` + +**Note**: This will last for the duration of your bash session. If you want to make this permanent you need to add this line in your bash profile. + +Alternatively, on most linux distributions you can also move the completions file to your bash_completions.d like this: + +``` +$ cp ./contrib/completions/bash/kubectl /etc/bash_completion.d/ +``` + +but then you have to update it when you update kubectl. + ### Getting started with your cluster #### Inspect your cluster diff --git a/docs/getting-started-guides/rkt/README.md b/docs/getting-started-guides/rkt/README.md index 05ebcee97f2..909e23335dd 100644 --- a/docs/getting-started-guides/rkt/README.md +++ b/docs/getting-started-guides/rkt/README.md @@ -38,36 +38,31 @@ We still have [a bunch of work](http://issue.k8s.io/8262) to do to make the expe ### **Prerequisite** -- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on your machine and should be enabled. The minimum version required at this moment (2015/05/28) is [215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html). +- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on the machine and should be enabled. The minimum version required at this moment (2015/09/01) is 219 *(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)* - Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt). - The minimum version required for now is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6). - -- Make sure the `rkt metadata service` is running because it is necessary for running pod in private network mode. - More details about the networking of rkt can be found in the [documentation](https://github.com/coreos/rkt/blob/master/Documentation/networking.md). - - To start the `rkt metadata service`, you can simply run: - - ```console - $ sudo rkt metadata-service - ``` - - If you want the service to be running as a systemd service, then: - - ```console - $ sudo systemd-run rkt metadata-service - ``` - - Alternatively, you can use the [rkt-metadata.service](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.service) and [rkt-metadata.socket](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.socket) to start the service. + The minimum version required for now is [v0.8.0](https://github.com/coreos/rkt/releases/tag/v0.8.0). +- Note that for rkt version later than v0.7.0, `metadata service` is not required for running pods in private networks. So now rkt pods will not register the metadata service be default. ### Local cluster -To use rkt as the container runtime, you just need to set the environment variable `CONTAINER_RUNTIME`: +To use rkt as the container runtime, we need to supply `--container-runtime=rkt` and `--rkt-path=$PATH_TO_RKT_BINARY` to kubelet. Additionally we can provide `--rkt-stage1-image` flag +as well to select which [stage1 image](https://github.com/coreos/rkt/blob/master/Documentation/running-lkvm-stage1.md) we want to use. + +If you are using the [hack/local-up-cluster.sh](../../../hack/local-up-cluster.sh) script to launch the local cluster, then you can edit the environment variable `CONTAINER_RUNTIME`, `RKT_PATH` and `RKT_STAGE1_IMAGE` to +set these flags: ```console $ export CONTAINER_RUNTIME=rkt +$ export RKT_PATH=$PATH_TO_RKT_BINARY +$ export RKT_STAGE1_IMAGE=PATH=$PATH_TO_STAGE1_IMAGE +``` + +Then we can launch the local cluster using the script: + +```console $ hack/local-up-cluster.sh ``` @@ -85,7 +80,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: ```console -$ export KUBE_RKT_VERSION=0.5.6 +$ export KUBE_RKT_VERSION=0.8.0 ``` Then you can launch the cluster by: @@ -109,7 +104,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: ```console -$ export KUBE_RKT_VERSION=0.5.6 +$ export KUBE_RKT_VERSION=0.8.0 ``` You can optionally choose the CoreOS channel by setting `COREOS_CHANNEL`: @@ -134,6 +129,46 @@ See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try ou For more complete applications, please look in the [examples directory](../../../examples/). +### Debugging + +Here are severals tips for you when you run into any issues. + +##### Check logs + +By default, the log verbose level is 2. In order to see more logs related to rkt, we can set the verbose level to 4. +For local cluster, we can set the environment variable: `LOG_LEVEL=4`. +If the cluster is using salt, we can edit the [logging.sls](../../../cluster/saltbase/pillar/logging.sls) in the saltbase. + +##### Check rkt pod status + +To check the pods' status, we can use rkt command, such as `rkt list`, `rkt status`, `rkt image list`, etc. +More information about rkt command line can be found [here](https://github.com/coreos/rkt/blob/master/Documentation/commands.md) + +##### Check journal logs + +As we use systemd to launch rkt pods(by creating service files which will run `rkt run-prepared`, we can check the pods' log +using `journalctl`: + +- Check the running state of the systemd service: + +```console +$ sudo journalctl -u $SERVICE_FILE +``` + +where `$SERVICE_FILE` is the name of the service file created for the pod, you can find it in the kubelet logs. + +##### Check the log of the container in the pod: + +```console +$ sudo journalctl -M rkt-$UUID -u $CONTAINER_NAME +``` + +where `$UUID` is the rkt pod's UUID, which you can find via `rkt list --full`, and `$CONTAINER_NAME` is the container's name. + +##### Check Kubernetes events, logs. + +Besides above tricks, Kubernetes also provides us handy tools for debugging the pods. More information can be found [here](../../../docs/user-guide/application-troubleshooting.md) + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/rkt/README.md?pixel)]() diff --git a/docs/getting-started-guides/ubuntu-calico.md b/docs/getting-started-guides/ubuntu-calico.md index e83a097b69f..b2ea65e9e87 100644 --- a/docs/getting-started-guides/ubuntu-calico.md +++ b/docs/getting-started-guides/ubuntu-calico.md @@ -246,7 +246,8 @@ kubernetes/cluster/ubuntu/build.sh sudo cp -f binaries/minion/* /usr/bin # Get the iptables based kube-proxy reccomended for this demo -sudo wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy -P /usr/bin/ +wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy +sudo cp kube-proxy /usr/bin/ sudo chmod +x /usr/bin/kube-proxy ``` diff --git a/docs/man/man1/kubectl-create.1 b/docs/man/man1/kubectl-create.1 index d41c1073703..2c28bd1fabd 100644 --- a/docs/man/man1/kubectl-create.1 +++ b/docs/man/man1/kubectl-create.1 @@ -28,6 +28,10 @@ JSON and YAML formats are accepted. \fB\-o\fP, \fB\-\-output\fP="" Output mode. Use "\-o name" for shorter output (resource/name). +.PP +\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema" + If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema' + .PP \fB\-\-validate\fP=true If true, use a schema to validate the input before sending it diff --git a/docs/man/man1/kubectl-expose.1 b/docs/man/man1/kubectl-expose.1 index 513310db4f2..18a2d871095 100644 --- a/docs/man/man1/kubectl-expose.1 +++ b/docs/man/man1/kubectl-expose.1 @@ -50,6 +50,10 @@ re\-use the labels from the resource it exposes. \fB\-l\fP, \fB\-\-labels\fP="" Labels to apply to the service created by this call. +.PP +\fB\-\-load\-balancer\-ip\fP="" + IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud\-provider specific). + .PP \fB\-\-name\fP="" The name for the newly created object. diff --git a/docs/man/man1/kubectl-replace.1 b/docs/man/man1/kubectl-replace.1 index b76175b1aa2..c3124c575ee 100644 --- a/docs/man/man1/kubectl-replace.1 +++ b/docs/man/man1/kubectl-replace.1 @@ -46,6 +46,10 @@ Please refer to the models in \fB\-o\fP, \fB\-\-output\fP="" Output mode. Use "\-o name" for shorter output (resource/name). +.PP +\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema" + If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema' + .PP \fB\-\-timeout\fP=0 Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object diff --git a/docs/man/man1/kubectl-rolling-update.1 b/docs/man/man1/kubectl-rolling-update.1 index 03cf264dc5e..f0da1054ab2 100644 --- a/docs/man/man1/kubectl-rolling-update.1 +++ b/docs/man/man1/kubectl-rolling-update.1 @@ -60,6 +60,10 @@ existing replication controller and overwrite at least one (common) label in its \fB\-\-rollback\fP=false If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout +.PP +\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema" + If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema' + .PP \fB\-a\fP, \fB\-\-show\-all\fP=false When printing, show all resources (default hide terminated pods.) diff --git a/docs/man/man1/kubectl-run.1 b/docs/man/man1/kubectl-run.1 index 1ba98e3898b..11869df0b6f 100644 --- a/docs/man/man1/kubectl-run.1 +++ b/docs/man/man1/kubectl-run.1 @@ -50,6 +50,10 @@ Creates a replication controller to manage the created container(s). \fB\-l\fP, \fB\-\-labels\fP="" Labels to apply to the pod(s). +.PP +\fB\-\-limits\fP="" + The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi' + .PP \fB\-\-no\-headers\fP=false When using the default output, don't print headers. @@ -76,6 +80,10 @@ Creates a replication controller to manage the created container(s). \fB\-r\fP, \fB\-\-replicas\fP=1 Number of replicas to create for this container. Default is 1. +.PP +\fB\-\-requests\fP="" + The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi' + .PP \fB\-\-restart\fP="Always" The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and \-\-replicas must be 1. Default 'Always' diff --git a/docs/proposals/apiserver-watch.md b/docs/proposals/apiserver-watch.md index b80690301ef..2917eec4f19 100644 --- a/docs/proposals/apiserver-watch.md +++ b/docs/proposals/apiserver-watch.md @@ -166,7 +166,7 @@ the same time, we can introduce an additional etcd event type: Thus, we need to create the EtcdResync event, extend watch.Interface and its implementations to support it and handle those events appropriately in places like - [Reflector](../../pkg/client/unversioned/cache/reflector.go) + [Reflector](../../pkg/client/cache/reflector.go) However, this might turn out to be unnecessary optimization if apiserver will always keep up (which is possible in the new design). We will work diff --git a/docs/user-guide/images.md b/docs/user-guide/images.md index d6d19921c13..b8b37dd8266 100644 --- a/docs/user-guide/images.md +++ b/docs/user-guide/images.md @@ -88,7 +88,7 @@ use the full image name (e.g. gcr.io/my_project/image:tag). All pods in a cluster will have read access to images in this registry. -The kubelet kubelet will authenticate to GCR using the instance's +The kubelet will authenticate to GCR using the instance's Google service account. The service account on the instance will have a `https://www.googleapis.com/auth/devstorage.read_only`, so it can pull from the project's GCR, but not push. diff --git a/docs/user-guide/kubectl/kubectl_create.md b/docs/user-guide/kubectl/kubectl_create.md index aeaf523944c..a5d3d9621db 100644 --- a/docs/user-guide/kubectl/kubectl_create.md +++ b/docs/user-guide/kubectl/kubectl_create.md @@ -61,6 +61,7 @@ $ cat pod.json | kubectl create -f - ``` -f, --filename=[]: Filename, directory, or URL to file to use to create the resource -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). + --schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema' --validate[=true]: If true, use a schema to validate the input before sending it ``` @@ -96,7 +97,7 @@ $ cat pod.json | kubectl create -f - * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152429973 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.289761103 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_create.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_expose.md b/docs/user-guide/kubectl/kubectl_expose.md index 5d1e82a957c..9d57bd83446 100644 --- a/docs/user-guide/kubectl/kubectl_expose.md +++ b/docs/user-guide/kubectl/kubectl_expose.md @@ -45,7 +45,7 @@ selector for a new Service on the specified port. If no labels are specified, th re-use the labels from the resource it exposes. ``` -kubectl expose (-f FILENAME | TYPE NAME) --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type] +kubectl expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type] ``` ### Examples @@ -73,6 +73,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service --generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'. -l, --labels="": Labels to apply to the service created by this call. + --load-balancer-ip="": IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud-provider specific). --name="": The name for the newly created object. --no-headers[=false]: When using the default output, don't print headers. -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. @@ -121,7 +122,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.159044239 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-11 03:36:48.458259032 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_replace.md b/docs/user-guide/kubectl/kubectl_replace.md index 96a15b17b9e..60485a4c0aa 100644 --- a/docs/user-guide/kubectl/kubectl_replace.md +++ b/docs/user-guide/kubectl/kubectl_replace.md @@ -74,6 +74,7 @@ kubectl replace --force -f ./pod.json --force[=false]: Delete and re-create the specified resource --grace-period=-1: Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative. -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). + --schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema' --timeout=0: Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object --validate[=true]: If true, use a schema to validate the input before sending it ``` @@ -110,7 +111,7 @@ kubectl replace --force -f ./pod.json * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153166598 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.290279625 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_replace.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_rolling-update.md b/docs/user-guide/kubectl/kubectl_rolling-update.md index 751c939335f..07cbaebc177 100644 --- a/docs/user-guide/kubectl/kubectl_rolling-update.md +++ b/docs/user-guide/kubectl/kubectl_rolling-update.md @@ -78,6 +78,7 @@ $ kubectl rolling-update frontend --image=image:v2 --output-version="": Output the formatted object with the given version (default api-version). --poll-interval=3s: Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". --rollback[=false]: If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout + --schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema' -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. @@ -118,7 +119,7 @@ $ kubectl rolling-update frontend --image=image:v2 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154895732 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.293748592 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_run.md b/docs/user-guide/kubectl/kubectl_run.md index 7a8cf4da6a9..06ea96c79ad 100644 --- a/docs/user-guide/kubectl/kubectl_run.md +++ b/docs/user-guide/kubectl/kubectl_run.md @@ -87,12 +87,14 @@ $ kubectl run nginx --image=nginx --command -- ... --hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container. --image="": The image for the container to run. -l, --labels="": Labels to apply to the pod(s). + --limits="": The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi' --no-headers[=false]: When using the default output, don't print headers. -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). --overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. --port=-1: The port that this container exposes. -r, --replicas=1: Number of replicas to create for this container. Default is 1. + --requests="": The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi' --restart="Always": The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and --replicas must be 1. Default 'Always' -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. diff --git a/docs/user-guide/replication-controller.md b/docs/user-guide/replication-controller.md index d0d968104d8..8a454302ec2 100644 --- a/docs/user-guide/replication-controller.md +++ b/docs/user-guide/replication-controller.md @@ -79,7 +79,7 @@ Note that replication controllers may themselves have labels and would generally Pods may be removed from a replication controller's target set by changing their labels. This technique may be used to remove pods from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed). -Similarly, deleting a replication controller does not affect the pods it created. Its `replicas` field must first be set to 0 in order to delete the pods controlled. (Note that the client tool, kubectl, provides a single operation, [stop](kubectl/kubectl_stop.md) to delete both the replication controller and the pods it controls. However, there is no such operation in the API at the moment) +Similarly, deleting a replication controller using the API does not affect the pods it created. Its `replicas` field must first be set to `0` in order to delete the pods controlled. (Note that the client tool, `kubectl`, provides a single operation, [delete](kubectl/kubectl_delete.md) to delete both the replication controller and the pods it controls. If you want to leave the pods running when deleting a replication controller, specify `--cascade=false`. However, there is no such operation in the API at the moment) ## Responsibilities of the replication controller diff --git a/docs/user-guide/service-accounts.md b/docs/user-guide/service-accounts.md index dbd99a44bc3..057f557220a 100644 --- a/docs/user-guide/service-accounts.md +++ b/docs/user-guide/service-accounts.md @@ -144,7 +144,7 @@ secrets/build-robot-secret Now you can confirm that the newly built secret is populated with an API token for the "build-robot" service account. ```console -kubectl describe secrets/build-robot-secret +$ kubectl describe secrets/build-robot-secret Name: build-robot-secret Namespace: default Labels: diff --git a/docs/user-guide/services.md b/docs/user-guide/services.md index 53a328e3c47..f271f177711 100644 --- a/docs/user-guide/services.md +++ b/docs/user-guide/services.md @@ -433,6 +433,7 @@ information about the provisioned balancer will be published in the `Service`'s } ], "clusterIP": "10.0.171.239", + "loadBalancerIP": "78.11.24.19", "type": "LoadBalancer" }, "status": { @@ -448,7 +449,11 @@ information about the provisioned balancer will be published in the `Service`'s ``` Traffic from the external load balancer will be directed at the backend `Pods`, -though exactly how that works depends on the cloud provider. +though exactly how that works depends on the cloud provider. Some cloud providers allow +the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the +cloud provider does not support the feature, the field will be ignored. ## Shortcomings diff --git a/examples/examples_test.go b/examples/examples_test.go index 02d1d2040d7..087c0780221 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -194,6 +194,7 @@ func TestExampleObjectSchemas(t *testing.T) { "../examples/glusterfs": { "glusterfs-pod": &api.Pod{}, "glusterfs-endpoints": &api.Endpoints{}, + "glusterfs-service": &api.Service{}, }, "../docs/user-guide/liveness": { "exec-liveness": &api.Pod{}, diff --git a/examples/glusterfs/README.md b/examples/glusterfs/README.md index b4ae9ddaf9a..cde80d2ca5c 100644 --- a/examples/glusterfs/README.md +++ b/examples/glusterfs/README.md @@ -75,6 +75,15 @@ NAME ENDPOINTS glusterfs-cluster 10.240.106.152:1,10.240.79.157:1 ``` +We need also create a service for this endpoints, so that the endpoints will be persistented. We will add this service without a selector to tell Kubernetes we want to add its endpoints manually. You can see [glusterfs-service.json](glusterfs-service.json) for details. + +Use this command to create the service: + +```sh +$ kubectl create -f examples/glusterfs/glusterfs-service.json +``` + + ### Create a POD The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustrates a sample configuration. diff --git a/examples/glusterfs/glusterfs-service.json b/examples/glusterfs/glusterfs-service.json new file mode 100644 index 00000000000..79139febd8e --- /dev/null +++ b/examples/glusterfs/glusterfs-service.json @@ -0,0 +1,12 @@ +{ + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "glusterfs-cluster" + }, + "spec": { + "ports": [ + {"port": 1} + ] + } +} diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index c4df7774807..9554e023aba 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -46,7 +46,7 @@ This example shows how to build a simple, multi-tier web application using Kuber - [Step Three: Fire up the replicated slave pods](#step-three-fire-up-the-replicated-slave-pods) - [Step Four: Create the redis slave service](#step-four-create-the-redis-slave-service) - [Step Five: Create the frontend replicated pods](#step-five-create-the-frontend-replicated-pods) - - [Step Six: Set up the guestbook frontend service.](#step-six-set-up-the-guestbook-frontend-service) + - [Step Six: Set up the guestbook frontend service](#step-six-set-up-the-guestbook-frontend-service) - [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific) - [Create the Frontend Service](#create-the-frontend-service) - [Accessing the guestbook site externally](#accessing-the-guestbook-site-externally) diff --git a/hack/after-build/update-generated-conversions.sh b/hack/after-build/update-generated-conversions.sh index 368c0c88211..91fd774ffcd 100755 --- a/hack/after-build/update-generated-conversions.sh +++ b/hack/after-build/update-generated-conversions.sh @@ -26,26 +26,27 @@ kube::golang::setup_env genconversion=$(kube::util::find-binary "genconversion") function generate_version() { - local version=$1 - local TMPFILE="/tmp/conversion_generated.$(date +%s).go" + local group_version=$1 + local TMPFILE="/tmp/conversion_generated.$(date +%s).go" - echo "Generating for ${version}" + echo "Generating for ${group_version}" - sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE" - cat >> "$TMPFILE" < "$TMPFILE" + cat >> "$TMPFILE" <> "$TMPFILE" + "${genconversion}" -v "${group_version}" -f - >> "$TMPFILE" - mv "$TMPFILE" "pkg/${version}/conversion_generated.go" + mv "$TMPFILE" "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/conversion_generated.go" } -DEFAULT_VERSIONS="api/v1 expapi/v1" -VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS} +# TODO(lavalamp): get this list by listing the pkg/apis/ directory? +DEFAULT_GROUP_VERSIONS="api/v1 experimental/v1" +VERSIONS=${VERSIONS:-$DEFAULT_GROUP_VERSIONS} for ver in $VERSIONS; do - # Ensure that the version being processed is registered by setting - # KUBE_API_VERSIONS. - KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}" + # Ensure that the version being processed is registered by setting + # KUBE_API_VERSIONS. + KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}" done diff --git a/hack/after-build/update-generated-deep-copies.sh b/hack/after-build/update-generated-deep-copies.sh index 9046c70ead1..48336730306 100755 --- a/hack/after-build/update-generated-deep-copies.sh +++ b/hack/after-build/update-generated-deep-copies.sh @@ -25,42 +25,35 @@ kube::golang::setup_env gendeepcopy=$(kube::util::find-binary "gendeepcopy") -function result_file_name() { - local version=$1 - echo "pkg/${version}/deep_copy_generated.go" -} - function generate_version() { - local version=$1 - local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go" + local group_version=$1 + local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go" - echo "Generating for ${version}" + echo "Generating for ${group_version}" - sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE - cat >> $TMPFILE < $TMPFILE + cat >> $TMPFILE </var/run/lock/gcloud-components.lock if [[ ! -z ${JENKINS_EXPLICIT_VERSION:-} ]]; then # Use an explicit pinned version like "ci/v0.10.0-101-g6c814c4" or @@ -496,6 +601,21 @@ ARTIFACTS=${WORKSPACE}/_artifacts mkdir -p ${ARTIFACTS} export E2E_REPORT_DIR=${ARTIFACTS} +### Pre Set Up ### +# Install gcloud from a custom path if provided. Used to test GKE with gcloud +# at HEAD, release candidate. +if [[ ! -z "${CLOUDSDK_BUCKET:-}" ]]; then + sudo gsutil -m cp -r "${CLOUDSDK_BUCKET}" ~ + mv ~/$(basename "${CLOUDSDK_BUCKET}") ~/repo + mkdir ~/cloudsdk + tar zvxf ~/repo/google-cloud-sdk.tar.gz -C ~/cloudsdk + export CLOUDSDK_CORE_DISABLE_PROMPTS=1 + export CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL=file://${HOME}/repo/components-2.json + ~/cloudsdk/google-cloud-sdk/install.sh --disable-installation-options --bash-completion=false --path-update=false --usage-reporting=false + export PATH=${HOME}/cloudsdk/google-cloud-sdk/bin:${PATH} + export CLOUDSDK_CONFIG=/var/lib/jenkins/.config/gcloud +fi + ### Set up ### if [[ "${E2E_UP,,}" == "true" ]]; then go run ./hack/e2e.go ${E2E_OPT} -v --down diff --git a/hack/lib/util.sh b/hack/lib/util.sh index 82ad2354a72..b53882764b9 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -220,4 +220,35 @@ kube::util::analytics-link() { echo "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/${path}?pixel)]()" } +# Takes a group/version and returns the path to its location on disk, sans +# "pkg". E.g.: +# * default behavior: experimental/v1 -> apis/experimental/v1 +# * legacy behavior: api/v1 -> api/v1 +# * Special handling for only a group: experimental -> apis/experimental +# * Special handling for only "api" group: api -> api +# * Very special handling for "v1": v1 -> api/v1 +kube::util::group-version-to-pkg-path() { + local group_version="$1" + # Special cases first. + # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api, + # moving the results to pkg/apis/api. + case "${group_version}" in + v1) + echo "api/v1" + ;; + api) + echo "api/v1" + ;; + api/*) + echo "${group_version}" + ;; + api/*) + echo "${group_version}" + ;; + *) + echo "apis/${group_version}" + ;; + esac +} + # ex: ts=2 sw=2 et filetype=sh diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index dda072a8360..f67ab1041aa 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -86,6 +86,8 @@ API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-"/127.0.0.1(:[0-9]+)?$,/loc KUBELET_PORT=${KUBELET_PORT:-10250} LOG_LEVEL=${LOG_LEVEL:-3} CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"} +RKT_PATH=${RKT_PATH:-""} +RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""} CHAOS_CHANCE=${CHAOS_CHANCE:-0.0} function test_apiserver_off { @@ -251,6 +253,8 @@ function start_kubelet { --v=${LOG_LEVEL} \ --chaos-chance="${CHAOS_CHANCE}" \ --container-runtime="${CONTAINER_RUNTIME}" \ + --rkt-path="${RKT_PATH}" \ + --rkt-stage1-image="${RKT_STAGE1_IMAGE}" \ --hostname-override="127.0.0.1" \ --address="127.0.0.1" \ --api-servers="${API_HOST}:${API_PORT}" \ diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 6209b39ebc2..2ffa525c381 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -711,19 +711,19 @@ __EOF__ ### Create and delete persistent volume examples # Pre-condition: no persistent volumes currently exist - kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" '' + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' # Command kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}" - kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0001:' + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:' kubectl delete pv pv0001 "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}" - kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0002:' + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:' kubectl delete pv pv0002 "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}" - kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0003:' + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:' kubectl delete pv pv0003 "${kube_flags[@]}" # Post-condition: no PVs - kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" '' + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' ############################ # Persistent Volume Claims # @@ -731,21 +731,21 @@ __EOF__ ### Create and delete persistent volume claim examples # Pre-condition: no persistent volume claims currently exist - kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" '' + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' # Command kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}" - kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-1:' + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:' kubectl delete pvc myclaim-1 "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}" - kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-2:' + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:' kubectl delete pvc myclaim-2 "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}" - kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-3:' + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:' kubectl delete pvc myclaim-3 "${kube_flags[@]}" # Post-condition: no PVCs - kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" '' + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' diff --git a/hack/update-generated-swagger-docs.sh b/hack/update-generated-swagger-docs.sh index c35a14893ad..27af1397e9c 100755 --- a/hack/update-generated-swagger-docs.sh +++ b/hack/update-generated-swagger-docs.sh @@ -24,14 +24,14 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env function generate_version() { - local groupVersion=$1 - local TMPFILE="/tmp/types_swagger_doc_generated.$(date +%s).go" + local group_version=$1 + local TMPFILE="/tmp/types_swagger_doc_generated.$(date +%s).go" - echo "Generating swagger type docs for ${groupVersion}" + echo "Generating swagger type docs for ${group_version}" - sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE - echo "package ${groupVersion##*/}" >> $TMPFILE - cat >> $TMPFILE < $TMPFILE + echo "package ${group_version##*/}" >> $TMPFILE + cat >> $TMPFILE <> $TMPFILE + GOPATH=$(godep path):$GOPATH go run cmd/genswaggertypedocs/swagger_type_docs.go -s \ + "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types.go" -f - \ + >> $TMPFILE - echo "// AUTO-GENERATED FUNCTIONS END HERE" >> $TMPFILE + echo "// AUTO-GENERATED FUNCTIONS END HERE" >> $TMPFILE - gofmt -w -s $TMPFILE - mv $TMPFILE "pkg/${groupVersion}/types_swagger_doc_generated.go" + gofmt -w -s $TMPFILE + mv $TMPFILE "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go" } -GROUP_VERSIONS="api/v1 expapi/v1" +GROUP_VERSIONS="api/v1 experimental/v1" # To avoid compile errors, remove the currently existing files. -for groupVersion in $GROUP_VERSIONS; do - rm -f "pkg/${groupVersion}/types_swagger_doc_generated.go" +for group_version in $GROUP_VERSIONS; do + rm -f "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go" done -for groupVersion in $GROUP_VERSIONS; do - generate_version "${groupVersion}" +for group_version in $GROUP_VERSIONS; do + generate_version "${group_version}" done "${KUBE_ROOT}/hack/update-swagger-spec.sh" diff --git a/hack/update-gofmt.sh b/hack/update-gofmt.sh new file mode 100755 index 00000000000..6f7727de83a --- /dev/null +++ b/hack/update-gofmt.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# GoFmt apparently is changing @ head... + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +GO_VERSION=($(go version)) + +if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5') ]]; then + echo "Unknown go version '${GO_VERSION}', skipping gofmt." + exit 0 +fi + +cd "${KUBE_ROOT}" + +find_files() { + find . -not \( \ + \( \ + -wholename './output' \ + -o -wholename './_output' \ + -o -wholename './release' \ + -o -wholename './target' \ + -o -wholename '*/third_party/*' \ + -o -wholename '*/Godeps/*' \ + \) -prune \ + \) -name '*.go' +} + +GOFMT="gofmt -s -w" +find_files | xargs $GOFMT diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index a8816eff7e5..c541cc21188 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -1,4 +1,3 @@ -cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `--cadvisor-port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"] cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name @@ -39,7 +38,7 @@ cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control obje cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} +cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + enable_horizontal_pod_autoscaler + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 9fe4e91dc48..8f23af2de0f 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -1,274 +1,277 @@ -accept-hosts -accept-paths -account-for-pod-resources -admission-control -admission-control-config-file -advertise-address -advertised-address -algorithm-provider -all-namespaces -allocate-node-cidrs -allow-privileged -api-burst -api-prefix -api-rate -api-servers -api-token -api-version -authorization-mode -authorization-policy-file -auth-path -basic-auth-file -bench-pods -bench-quiet -bench-tasks -bench-workers -bind-address -bind-pods-burst -bind-pods-qps -cadvisor-port -cert-dir -certificate-authority -cgroup-root -chaos-chance -cleanup-iptables -client-ca-file -client-certificate -client-key -cloud-config -cloud-provider -cluster-cidr -cluster-dns -cluster-domain -cluster-name -cluster-tag -concurrent-endpoint-syncs -configure-cbr0 -contain-pod-resources -container-port -container-runtime -cors-allowed-origins -create-external-load-balancer -current-release-pr -current-replicas -default-container-cpu-limit -default-container-mem-limit -delay-shutdown -deleting-pods-burst -deleting-pods-qps -deployment-label-key -dest-file -disable-filter -docker-endpoint -docker-exec-handler -dockercfg-path -driver-port -dry-run -duration-sec -e2e-output-dir -enable-debugging-handlers -enable-horizontal-pod-autoscaler -enable-server -etcd-config -etcd-prefix -etcd-server -etcd-servers -event-burst -event-qps -event-ttl -executor-bindall -executor-logv -executor-path -executor-suicide-timeout -experimental-keystone-url -experimental-prefix -external-hostname -external-ip -failover-timeout -file-check-frequency -file-suffix -forward-services -framework-name -framework-weburi -func-dest -fuzz-iters -gce-project -gce-zone -gke-cluster -google-json-key -grace-period -ha-domain -healthz-bind-address -healthz-port -horizontal-pod-autoscaler-sync-period -hostname-override -host-network-sources -http-check-frequency -http-port -ignore-not-found -image-gc-high-threshold -image-gc-low-threshold -insecure-bind-address -insecure-port -insecure-skip-tls-verify -iptables-sync-period -ir-data-source -ir-dbname -ir-influxdb-host -ir-password -ir-user -jenkins-host -jenkins-jobs -km-path -kubectl-path -kubelet-cadvisor-port -kubelet-certificate-authority -kubelet-client-certificate -kubelet-client-key -kubelet-docker-endpoint -kubelet-host-network-sources -kubelet-https -kubelet-network-plugin -kubelet-pod-infra-container-image -kubelet-port -kubelet-root-dir -kubelet-sync-frequency -kubelet-timeout -kube-master -label-columns -last-release-pr -legacy-userspace-proxy -log-flush-frequency -long-running-request-regexp -low-diskspace-threshold-mb -manifest-url -manifest-url-header -masquerade-all -master-service-namespace -max-concurrency -max-connection-bytes-per-sec -maximum-dead-containers -maximum-dead-containers-per-container -max-log-age -max-log-backups -max-log-size -max-outgoing-burst -max-outgoing-qps -max-pods -max-requests-inflight -mesos-authentication-principal -mesos-authentication-provider -mesos-authentication-secret-file -mesos-cgroup-prefix -mesos-executor-cpus -mesos-executor-mem -mesos-master -mesos-role -mesos-user -minimum-container-ttl-duration -minion-max-log-age -minion-max-log-backups -minion-max-log-size -minion-path-override -min-pr-number -min-request-timeout -namespace-sync-period -network-plugin -network-plugin-dir -node-instance-group -node-monitor-grace-period -node-monitor-period -node-startup-grace-period -node-status-update-frequency -node-sync-period -no-headers -num-nodes -oidc-ca-file -oidc-client-id -oidc-issuer-url -oidc-username-claim -oom-score-adj -output-version -out-version -path-override -pod-cidr -pod-eviction-timeout -pod-infra-container-image -pod-running -policy-config-file -poll-interval -portal-net -private-mountns -prom-push-gateway -proxy-bindall -proxy-logv -proxy-port-range -public-address-override -pvclaimbinder-sync-period -read-only-port -really-crash-for-testing -reconcile-cooldown -reconcile-interval -register-node -register-retry-count -registry-burst -registry-qps -reject-methods -reject-paths -repo-root -report-dir -required-contexts -resolv-conf -resource-container -resource-quota-sync-period -resource-version -rkt-path -root-ca-file -root-dir -run-proxy -runtime-config -scheduler-config -secure-port -service-account-key-file -service-account-lookup -service-account-private-key-file -service-address -service-cluster-ip-range -service-node-port-range -service-node-ports -service-sync-period -session-affinity -show-all -shutdown-fd -shutdown-fifo -skip-munges -sort-by -source-file -ssh-keyfile -ssh-user -static-pods-config -stats-port -storage-version -streaming-connection-idle-timeout -suicide-timeout -sync-frequency -system-container -target-port -tcp-services -tls-cert-file -tls-private-key-file -token-auth-file -ttl-secs -type-src -unix-socket -update-period -upgrade-target -use-kubernetes-cluster-service -user-whitelist -watch-cache -watch-only -whitelist-override-label -www-prefix -retry_time -file_content_in_loop -cpu-cfs-quota +accept-hosts +accept-paths +account-for-pod-resources +admission-control +admission-control-config-file +advertise-address +advertised-address +algorithm-provider +all-namespaces +allocate-node-cidrs +allow-privileged +api-burst +api-prefix +api-rate +api-servers +api-token +api-version +authorization-mode +authorization-policy-file +auth-path +basic-auth-file +bench-pods +bench-quiet +bench-tasks +bench-workers +bind-address +bind-pods-burst +bind-pods-qps +cadvisor-port +cert-dir +certificate-authority +cgroup-root +chaos-chance +cleanup-iptables +client-ca-file +client-certificate +client-key +cloud-config +cloud-provider +cluster-cidr +cluster-dns +cluster-domain +cluster-name +cluster-tag +concurrent-endpoint-syncs +configure-cbr0 +contain-pod-resources +container-port +container-runtime +cors-allowed-origins +create-external-load-balancer +current-release-pr +current-replicas +default-container-cpu-limit +default-container-mem-limit +delay-shutdown +deleting-pods-burst +deleting-pods-qps +deployment-label-key +dest-file +disable-filter +docker-endpoint +docker-exec-handler +dockercfg-path +driver-port +dry-run +duration-sec +e2e-output-dir +enable-debugging-handlers +enable-horizontal-pod-autoscaler +enable-server +etcd-config +etcd-prefix +etcd-server +etcd-servers +event-burst +event-qps +event-ttl +executor-bindall +executor-logv +executor-path +executor-suicide-timeout +experimental-keystone-url +experimental-prefix +external-hostname +external-ip +failover-timeout +file-check-frequency +file-suffix +forward-services +framework-name +framework-weburi +func-dest +fuzz-iters +gce-project +gce-zone +gke-cluster +google-json-key +grace-period +ha-domain +healthz-bind-address +healthz-port +horizontal-pod-autoscaler-sync-period +hostname-override +host-network-sources +http-check-frequency +http-port +ignore-not-found +image-gc-high-threshold +image-gc-low-threshold +insecure-bind-address +insecure-port +insecure-skip-tls-verify +iptables-sync-period +ir-data-source +ir-dbname +ir-influxdb-host +ir-password +ir-user +jenkins-host +jenkins-jobs +km-path +kubectl-path +kubelet-cadvisor-port +kubelet-certificate-authority +kubelet-client-certificate +kubelet-client-key +kubelet-docker-endpoint +kubelet-host-network-sources +kubelet-https +kubelet-network-plugin +kubelet-pod-infra-container-image +kubelet-port +kubelet-root-dir +kubelet-sync-frequency +kubelet-timeout +kube-master +label-columns +last-release-pr +legacy-userspace-proxy +load-balancer-ip +log-flush-frequency +long-running-request-regexp +low-diskspace-threshold-mb +manifest-url +manifest-url-header +masquerade-all +master-service-namespace +max-concurrency +max-connection-bytes-per-sec +maximum-dead-containers +maximum-dead-containers-per-container +max-log-age +max-log-backups +max-log-size +max-outgoing-burst +max-outgoing-qps +max-pods +max-requests-inflight +mesos-authentication-principal +mesos-authentication-provider +mesos-authentication-secret-file +mesos-cgroup-prefix +mesos-executor-cpus +mesos-executor-mem +mesos-master +mesos-role +mesos-user +minimum-container-ttl-duration +minion-max-log-age +minion-max-log-backups +minion-max-log-size +minion-path-override +min-pr-number +min-request-timeout +namespace-sync-period +network-plugin +network-plugin-dir +node-instance-group +node-monitor-grace-period +node-monitor-period +node-startup-grace-period +node-status-update-frequency +node-sync-period +no-headers +num-nodes +oidc-ca-file +oidc-client-id +oidc-issuer-url +oidc-username-claim +oom-score-adj +output-version +out-version +path-override +pod-cidr +pod-eviction-timeout +pod-infra-container-image +pod-running +policy-config-file +poll-interval +portal-net +private-mountns +prom-push-gateway +proxy-bindall +proxy-logv +proxy-port-range +public-address-override +pvclaimbinder-sync-period +read-only-port +really-crash-for-testing +reconcile-cooldown +reconcile-interval +register-node +register-retry-count +registry-burst +registry-qps +reject-methods +reject-paths +repo-root +report-dir +required-contexts +resolv-conf +resource-container +resource-quota-sync-period +resource-version +rkt-path +rkt-stage1-image +root-ca-file +root-dir +run-proxy +runtime-config +scheduler-config +schema-cache-dir +secure-port +service-account-key-file +service-account-lookup +service-account-private-key-file +service-address +service-cluster-ip-range +service-node-port-range +service-node-ports +service-sync-period +session-affinity +show-all +shutdown-fd +shutdown-fifo +skip-munges +sort-by +source-file +ssh-keyfile +ssh-user +static-pods-config +stats-port +storage-version +streaming-connection-idle-timeout +suicide-timeout +sync-frequency +system-container +target-port +tcp-services +tls-cert-file +tls-private-key-file +token-auth-file +ttl-secs +type-src +unix-socket +update-period +upgrade-target +use-kubernetes-cluster-service +user-whitelist +watch-cache +watch-only +whitelist-override-label +www-prefix +retry_time +file_content_in_loop +cpu-cfs-quota diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index b7d8f958a7a..53c4f28c3ae 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -302,6 +302,7 @@ func deepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { out.Reason = in.Reason + out.Message = in.Message return nil } @@ -1958,6 +1959,7 @@ func deepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cl } func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error { + out.Type = in.Type if in.Ports != nil { out.Ports = make([]ServicePort, len(in.Ports)) for i := range in.Ports { @@ -1977,7 +1979,6 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl out.Selector = nil } out.ClusterIP = in.ClusterIP - out.Type = in.Type if in.ExternalIPs != nil { out.ExternalIPs = make([]string, len(in.ExternalIPs)) for i := range in.ExternalIPs { @@ -1986,6 +1987,7 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl } else { out.ExternalIPs = nil } + out.LoadBalancerIP = in.LoadBalancerIP out.SessionAffinity = in.SessionAffinity return nil } diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index beccf9604b5..c500623bb5e 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -33,8 +33,8 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" - _ "k8s.io/kubernetes/pkg/expapi" - _ "k8s.io/kubernetes/pkg/expapi/v1" + _ "k8s.io/kubernetes/pkg/apis/experimental" + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" flag "github.com/spf13/pflag" ) diff --git a/pkg/api/testapi/testapi.go b/pkg/api/testapi/testapi.go index 3d58968ead5..8b0cbe59d20 100644 --- a/pkg/api/testapi/testapi.go +++ b/pkg/api/testapi/testapi.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/meta" apiutil "k8s.io/kubernetes/pkg/api/util" - explatest "k8s.io/kubernetes/pkg/expapi/latest" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/runtime" ) diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index de11d81dbbd..c01f628d7ce 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -121,15 +121,15 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { c.FuzzNoCustom(j) // fuzz self without calling this function again //j.TemplateRef = nil // this is required for round trip }, - func(j *expapi.DeploymentStrategy, c fuzz.Continue) { + func(j *experimental.DeploymentStrategy, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again // Ensure that strategyType is one of valid values. - strategyTypes := []expapi.DeploymentType{expapi.DeploymentRecreate, expapi.DeploymentRollingUpdate} + strategyTypes := []experimental.DeploymentType{experimental.DeploymentRecreate, experimental.DeploymentRollingUpdate} j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))] - if j.Type != expapi.DeploymentRollingUpdate { + if j.Type != experimental.DeploymentRollingUpdate { j.RollingUpdate = nil } else { - rollingUpdate := expapi.RollingUpdateDeployment{} + rollingUpdate := experimental.RollingUpdateDeployment{} if c.RandBool() { rollingUpdate.MaxUnavailable = util.NewIntOrStringFromInt(int(c.RandUint64())) rollingUpdate.MaxSurge = util.NewIntOrStringFromInt(int(c.RandUint64())) @@ -351,7 +351,7 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { c.FuzzNoCustom(n) n.Spec.ExternalID = "external" }, - func(s *expapi.APIVersion, c fuzz.Continue) { + func(s *experimental.APIVersion, c fuzz.Continue) { // We can't use c.RandString() here because it may generate empty // string, which will cause tests failure. s.APIGroup = "something" diff --git a/pkg/api/types.go b/pkg/api/types.go index cd36da099c3..ab0df0336fc 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -835,8 +835,10 @@ const ( ) type ContainerStateWaiting struct { - // Reason could be pulling image, + // A brief CamelCase string indicating details about why the container is in waiting state. Reason string `json:"reason,omitempty"` + // A human-readable message indicating details about why the container is in waiting state. + Message string `json:"message,omitempty"` } type ContainerStateRunning struct { @@ -1185,6 +1187,9 @@ type LoadBalancerIngress struct { // ServiceSpec describes the attributes that a user creates on a service type ServiceSpec struct { + // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer + Type ServiceType `json:"type,omitempty"` + // Required: The list of ports that are exposed by this service. Ports []ServicePort `json:"ports"` @@ -1200,13 +1205,17 @@ type ServiceSpec struct { // None can be specified for headless services when proxying is not required ClusterIP string `json:"clusterIP,omitempty"` - // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer - Type ServiceType `json:"type,omitempty"` - // ExternalIPs are used by external load balancers, or can be set by // users to handle external traffic that arrives at a node. ExternalIPs []string `json:"externalIPs,omitempty"` + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + // Required: Supports "ClientIP" and "None". Used to maintain session affinity. SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` } diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index 70a047e8695..22826436560 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -340,6 +340,7 @@ func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta defaulting.(func(*api.ContainerStateWaiting))(in) } out.Reason = in.Reason + out.Message = in.Message return nil } @@ -2172,6 +2173,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServiceSpec))(in) } + out.Type = ServiceType(in.Type) if in.Ports != nil { out.Ports = make([]ServicePort, len(in.Ports)) for i := range in.Ports { @@ -2191,7 +2193,6 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service out.Selector = nil } out.ClusterIP = in.ClusterIP - out.Type = ServiceType(in.Type) if in.ExternalIPs != nil { out.ExternalIPs = make([]string, len(in.ExternalIPs)) for i := range in.ExternalIPs { @@ -2200,6 +2201,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service } else { out.ExternalIPs = nil } + out.LoadBalancerIP = in.LoadBalancerIP out.SessionAffinity = ServiceAffinity(in.SessionAffinity) return nil } @@ -2742,6 +2744,7 @@ func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *Container defaulting.(func(*ContainerStateWaiting))(in) } out.Reason = in.Reason + out.Message = in.Message return nil } @@ -4603,6 +4606,7 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service out.ExternalIPs = nil } out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP return nil } diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index e5db68e2478..60cb5aff58d 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -317,6 +317,7 @@ func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *Cont func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { out.Reason = in.Reason + out.Message = in.Message return nil } @@ -1992,6 +1993,7 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo out.ExternalIPs = nil } out.SessionAffinity = in.SessionAffinity + out.LoadBalancerIP = in.LoadBalancerIP return nil } diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index d70760b9e2a..0acc87d3346 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -1036,8 +1036,10 @@ const ( // ContainerStateWaiting is a waiting state of a container. type ContainerStateWaiting struct { - // (brief) reason the container is not yet running, such as pulling its image. + // (brief) reason the container is not yet running. Reason string `json:"reason,omitempty"` + // Message regarding why the container is not yet running. + Message string `json:"message,omitempty"` } // ContainerStateRunning is a running state of a container. @@ -1509,6 +1511,13 @@ type ServiceSpec struct { // Defaults to None. // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + LoadBalancerIP string `json:"loadBalancerIP,omitempty"` } // ServicePort conatins information on service's port. diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index ef56378a642..d56456dad2f 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -199,8 +199,9 @@ func (ContainerStateTerminated) SwaggerDoc() map[string]string { } var map_ContainerStateWaiting = map[string]string{ - "": "ContainerStateWaiting is a waiting state of a container.", - "reason": "(brief) reason the container is not yet running, such as pulling its image.", + "": "ContainerStateWaiting is a waiting state of a container.", + "reason": "(brief) reason the container is not yet running.", + "message": "Message regarding why the container is not yet running.", } func (ContainerStateWaiting) SwaggerDoc() map[string]string { @@ -1272,6 +1273,7 @@ var map_ServiceSpec = map[string]string{ "type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services", "externalIPs": "ExternalIPs are used by external load balancers, or can be set by users to handle external traffic that arrives at a node. Externally visible IPs (e.g. load balancers) that should be proxied to this service.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 1829e7a8dbf..8047998e327 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -1458,16 +1458,27 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList { keys.Insert(string(k)) min[string(k)] = q } - for k, q := range limit.Default { - allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...) - keys.Insert(string(k)) - defaults[string(k)] = q - } - for k, q := range limit.DefaultRequest { - allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...) - keys.Insert(string(k)) - defaultRequests[string(k)] = q + + if limit.Type == api.LimitTypePod { + if len(limit.Default) > 0 { + allErrs = append(allErrs, errs.NewFieldInvalid("spec.limits[%d].default", limit.Default, "Default is not supported when limit type is Pod")) + } + if len(limit.DefaultRequest) > 0 { + allErrs = append(allErrs, errs.NewFieldInvalid("spec.limits[%d].defaultRequest", limit.DefaultRequest, "DefaultRequest is not supported when limit type is Pod")) + } + } else { + for k, q := range limit.Default { + allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...) + keys.Insert(string(k)) + defaults[string(k)] = q + } + for k, q := range limit.DefaultRequest { + allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...) + keys.Insert(string(k)) + defaultRequests[string(k)] = q + } } + for k := range limit.MaxLimitRequestRatio { allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].maxLimitRequestRatio[%s]", i, k))...) } @@ -1479,38 +1490,26 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList { defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { - minQuantity := limit.Min[api.ResourceName(k)] - maxQuantity := limit.Max[api.ResourceName(k)] allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].min[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) } if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { - minQuantity := limit.Min[api.ResourceName(k)] - defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)] allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) } if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { - maxQuantity := limit.Max[api.ResourceName(k)] - defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)] allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) } if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { - defaultQuantity := limit.Default[api.ResourceName(k)] - defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)] allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) } if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { - minQuantity := limit.Min[api.ResourceName(k)] - defaultQuantity := limit.Default[api.ResourceName(k)] allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) } if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { - maxQuantity := limit.Max[api.ResourceName(k)] - defaultQuantity := limit.Default[api.ResourceName(k)] allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) } } diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 8e320114e23..7e25d526489 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -2911,6 +2911,12 @@ func TestValidateLimitRange(t *testing.T) { Type: api.LimitTypePod, Max: getResourceList("100m", "10000Mi"), Min: getResourceList("5m", "100Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + { + Type: api.LimitTypeContainer, + Max: getResourceList("100m", "10000Mi"), + Min: getResourceList("5m", "100Mi"), Default: getResourceList("50m", "500Mi"), DefaultRequest: getResourceList("10m", "200Mi"), MaxLimitRequestRatio: getResourceList("10", ""), @@ -2923,7 +2929,7 @@ func TestValidateLimitRange(t *testing.T) { spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { - Type: api.LimitTypePod, + Type: api.LimitTypeContainer, Max: getResourceList("100m", "10000T"), Min: getResourceList("5m", "100Mi"), Default: getResourceList("50m", "500Mi"), @@ -2978,6 +2984,32 @@ func TestValidateLimitRange(t *testing.T) { }}, "", }, + "default-limit-type-pod": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000m"), + Min: getResourceList("0m", "100m"), + Default: getResourceList("10m", "100m"), + }, + }, + }}, + "Default is not supported when limit type is Pod", + }, + "default-request-limit-type-pod": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000m"), + Min: getResourceList("0m", "100m"), + DefaultRequest: getResourceList("10m", "100m"), + }, + }, + }}, + "DefaultRequest is not supported when limit type is Pod", + }, "min value 100m is greater than max value 10m": { api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ @@ -2994,7 +3026,7 @@ func TestValidateLimitRange(t *testing.T) { api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { - Type: api.LimitTypePod, + Type: api.LimitTypeContainer, Max: getResourceList("1", ""), Min: getResourceList("100m", ""), Default: getResourceList("2000m", ""), @@ -3007,7 +3039,7 @@ func TestValidateLimitRange(t *testing.T) { api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { - Type: api.LimitTypePod, + Type: api.LimitTypeContainer, Max: getResourceList("1", ""), Min: getResourceList("100m", ""), DefaultRequest: getResourceList("2000m", ""), diff --git a/pkg/expapi/deep_copy_generated.go b/pkg/apis/experimental/deep_copy_generated.go similarity index 76% rename from pkg/expapi/deep_copy_generated.go rename to pkg/apis/experimental/deep_copy_generated.go index 1879cc8e3ac..a2a1340dfa6 100644 --- a/pkg/expapi/deep_copy_generated.go +++ b/pkg/apis/experimental/deep_copy_generated.go @@ -16,7 +16,7 @@ limitations under the License. // DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. -package expapi +package experimental import ( time "time" @@ -757,29 +757,29 @@ func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c return nil } -func deepCopy_expapi_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { +func deepCopy_experimental_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { out.Name = in.Name out.APIGroup = in.APIGroup return nil } -func deepCopy_expapi_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { +func deepCopy_experimental_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_expapi_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { + if err := deepCopy_experimental_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_expapi_DaemonSetStatus(in.Status, &out.Status, c); err != nil { + if err := deepCopy_experimental_DaemonSetStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { +func deepCopy_experimental_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -789,7 +789,7 @@ func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conv if in.Items != nil { out.Items = make([]DaemonSet, len(in.Items)) for i := range in.Items { - if err := deepCopy_expapi_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_experimental_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -799,7 +799,7 @@ func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conv return nil } -func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { +func deepCopy_experimental_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { @@ -819,30 +819,30 @@ func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conv return nil } -func deepCopy_expapi_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { +func deepCopy_experimental_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled return nil } -func deepCopy_expapi_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { +func deepCopy_experimental_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_expapi_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { + if err := deepCopy_experimental_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_expapi_DeploymentStatus(in.Status, &out.Status, c); err != nil { + if err := deepCopy_experimental_DeploymentStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { +func deepCopy_experimental_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -852,7 +852,7 @@ func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *c if in.Items != nil { out.Items = make([]Deployment, len(in.Items)) for i := range in.Items { - if err := deepCopy_expapi_Deployment(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_experimental_Deployment(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -862,7 +862,7 @@ func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *c return nil } -func deepCopy_expapi_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { +func deepCopy_experimental_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { out.Replicas = in.Replicas if in.Selector != nil { out.Selector = make(map[string]string) @@ -880,24 +880,24 @@ func deepCopy_expapi_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *c } else { out.Template = nil } - if err := deepCopy_expapi_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { + if err := deepCopy_experimental_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { return err } out.UniqueLabelKey = in.UniqueLabelKey return nil } -func deepCopy_expapi_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { +func deepCopy_experimental_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas return nil } -func deepCopy_expapi_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { +func deepCopy_experimental_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { out.Type = in.Type if in.RollingUpdate != nil { out.RollingUpdate = new(RollingUpdateDeployment) - if err := deepCopy_expapi_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil { + if err := deepCopy_experimental_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil { return err } } else { @@ -906,19 +906,19 @@ func deepCopy_expapi_DeploymentStrategy(in DeploymentStrategy, out *DeploymentSt return nil } -func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { +func deepCopy_experimental_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_expapi_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { + if err := deepCopy_experimental_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { return err } if in.Status != nil { out.Status = new(HorizontalPodAutoscalerStatus) - if err := deepCopy_expapi_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil { + if err := deepCopy_experimental_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil { return err } } else { @@ -927,7 +927,7 @@ func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *Ho return nil } -func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { +func deepCopy_experimental_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -937,7 +937,7 @@ func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, if in.Items != nil { out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) for i := range in.Items { - if err := deepCopy_expapi_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_experimental_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -947,10 +947,10 @@ func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, return nil } -func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { +func deepCopy_experimental_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { if in.ScaleRef != nil { out.ScaleRef = new(SubresourceReference) - if err := deepCopy_expapi_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil { + if err := deepCopy_experimental_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil { return err } } else { @@ -958,18 +958,18 @@ func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, } out.MinCount = in.MinCount out.MaxCount = in.MaxCount - if err := deepCopy_expapi_ResourceConsumption(in.Target, &out.Target, c); err != nil { + if err := deepCopy_experimental_ResourceConsumption(in.Target, &out.Target, c); err != nil { return err } return nil } -func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { +func deepCopy_experimental_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas if in.CurrentConsumption != nil { out.CurrentConsumption = new(ResourceConsumption) - if err := deepCopy_expapi_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil { + if err := deepCopy_experimental_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil { return err } } else { @@ -986,14 +986,129 @@ func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerSta return nil } -func deepCopy_expapi_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { +func deepCopy_experimental_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := deepCopy_experimental_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := deepCopy_experimental_JobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func deepCopy_experimental_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := deepCopy_util_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := deepCopy_util_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func deepCopy_experimental_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := deepCopy_api_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]Job, len(in.Items)) + for i := range in.Items { + if err := deepCopy_experimental_Job(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func deepCopy_experimental_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { + if in.Parallelism != nil { + out.Parallelism = new(int) + *out.Parallelism = *in.Parallelism + } else { + out.Parallelism = nil + } + if in.Completions != nil { + out.Completions = new(int) + *out.Completions = *in.Completions + } else { + out.Completions = nil + } + if in.Selector != nil { + out.Selector = make(map[string]string) + for key, val := range in.Selector { + out.Selector[key] = val + } + } else { + out.Selector = nil + } + if in.Template != nil { + out.Template = new(api.PodTemplateSpec) + if err := deepCopy_api_PodTemplateSpec(*in.Template, out.Template, c); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func deepCopy_experimental_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + out.Conditions = make([]JobCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := deepCopy_experimental_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + out.StartTime = new(util.Time) + if err := deepCopy_util_Time(*in.StartTime, out.StartTime, c); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + out.CompletionTime = new(util.Time) + if err := deepCopy_util_Time(*in.CompletionTime, out.CompletionTime, c); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Successful = in.Successful + out.Unsuccessful = in.Unsuccessful + return nil +} + +func deepCopy_experimental_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } return nil } -func deepCopy_expapi_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error { +func deepCopy_experimental_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error { out.Resource = in.Resource if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil { return err @@ -1001,7 +1116,7 @@ func deepCopy_expapi_ResourceConsumption(in ResourceConsumption, out *ResourceCo return nil } -func deepCopy_expapi_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { +func deepCopy_experimental_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { if err := deepCopy_util_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil { return err } @@ -1012,28 +1127,28 @@ func deepCopy_expapi_RollingUpdateDeployment(in RollingUpdateDeployment, out *Ro return nil } -func deepCopy_expapi_Scale(in Scale, out *Scale, c *conversion.Cloner) error { +func deepCopy_experimental_Scale(in Scale, out *Scale, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_expapi_ScaleSpec(in.Spec, &out.Spec, c); err != nil { + if err := deepCopy_experimental_ScaleSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_expapi_ScaleStatus(in.Status, &out.Status, c); err != nil { + if err := deepCopy_experimental_ScaleStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_expapi_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { +func deepCopy_experimental_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { out.Replicas = in.Replicas return nil } -func deepCopy_expapi_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { +func deepCopy_experimental_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { out.Replicas = in.Replicas if in.Selector != nil { out.Selector = make(map[string]string) @@ -1046,7 +1161,7 @@ func deepCopy_expapi_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion return nil } -func deepCopy_expapi_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { +func deepCopy_experimental_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name @@ -1055,7 +1170,7 @@ func deepCopy_expapi_SubresourceReference(in SubresourceReference, out *Subresou return nil } -func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { +func deepCopy_experimental_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -1066,7 +1181,7 @@ func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyRe if in.Versions != nil { out.Versions = make([]APIVersion, len(in.Versions)) for i := range in.Versions { - if err := deepCopy_expapi_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil { + if err := deepCopy_experimental_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil { return err } } @@ -1076,7 +1191,7 @@ func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyRe return nil } -func deepCopy_expapi_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { +func deepCopy_experimental_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -1094,7 +1209,7 @@ func deepCopy_expapi_ThirdPartyResourceData(in ThirdPartyResourceData, out *Thir return nil } -func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { +func deepCopy_experimental_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -1104,7 +1219,7 @@ func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, o if in.Items != nil { out.Items = make([]ThirdPartyResourceData, len(in.Items)) for i := range in.Items { - if err := deepCopy_expapi_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_experimental_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -1114,7 +1229,7 @@ func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, o return nil } -func deepCopy_expapi_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { +func deepCopy_experimental_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -1124,7 +1239,7 @@ func deepCopy_expapi_ThirdPartyResourceList(in ThirdPartyResourceList, out *Thir if in.Items != nil { out.Items = make([]ThirdPartyResource, len(in.Items)) for i := range in.Items { - if err := deepCopy_expapi_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_experimental_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -1192,31 +1307,36 @@ func init() { deepCopy_api_VolumeMount, deepCopy_api_VolumeSource, deepCopy_resource_Quantity, - deepCopy_expapi_APIVersion, - deepCopy_expapi_DaemonSet, - deepCopy_expapi_DaemonSetList, - deepCopy_expapi_DaemonSetSpec, - deepCopy_expapi_DaemonSetStatus, - deepCopy_expapi_Deployment, - deepCopy_expapi_DeploymentList, - deepCopy_expapi_DeploymentSpec, - deepCopy_expapi_DeploymentStatus, - deepCopy_expapi_DeploymentStrategy, - deepCopy_expapi_HorizontalPodAutoscaler, - deepCopy_expapi_HorizontalPodAutoscalerList, - deepCopy_expapi_HorizontalPodAutoscalerSpec, - deepCopy_expapi_HorizontalPodAutoscalerStatus, - deepCopy_expapi_ReplicationControllerDummy, - deepCopy_expapi_ResourceConsumption, - deepCopy_expapi_RollingUpdateDeployment, - deepCopy_expapi_Scale, - deepCopy_expapi_ScaleSpec, - deepCopy_expapi_ScaleStatus, - deepCopy_expapi_SubresourceReference, - deepCopy_expapi_ThirdPartyResource, - deepCopy_expapi_ThirdPartyResourceData, - deepCopy_expapi_ThirdPartyResourceDataList, - deepCopy_expapi_ThirdPartyResourceList, + deepCopy_experimental_APIVersion, + deepCopy_experimental_DaemonSet, + deepCopy_experimental_DaemonSetList, + deepCopy_experimental_DaemonSetSpec, + deepCopy_experimental_DaemonSetStatus, + deepCopy_experimental_Deployment, + deepCopy_experimental_DeploymentList, + deepCopy_experimental_DeploymentSpec, + deepCopy_experimental_DeploymentStatus, + deepCopy_experimental_DeploymentStrategy, + deepCopy_experimental_HorizontalPodAutoscaler, + deepCopy_experimental_HorizontalPodAutoscalerList, + deepCopy_experimental_HorizontalPodAutoscalerSpec, + deepCopy_experimental_HorizontalPodAutoscalerStatus, + deepCopy_experimental_Job, + deepCopy_experimental_JobCondition, + deepCopy_experimental_JobList, + deepCopy_experimental_JobSpec, + deepCopy_experimental_JobStatus, + deepCopy_experimental_ReplicationControllerDummy, + deepCopy_experimental_ResourceConsumption, + deepCopy_experimental_RollingUpdateDeployment, + deepCopy_experimental_Scale, + deepCopy_experimental_ScaleSpec, + deepCopy_experimental_ScaleStatus, + deepCopy_experimental_SubresourceReference, + deepCopy_experimental_ThirdPartyResource, + deepCopy_experimental_ThirdPartyResourceData, + deepCopy_experimental_ThirdPartyResourceDataList, + deepCopy_experimental_ThirdPartyResourceList, deepCopy_util_IntOrString, deepCopy_util_Time, ) diff --git a/pkg/expapi/latest/latest.go b/pkg/apis/experimental/latest/latest.go similarity index 93% rename from pkg/expapi/latest/latest.go rename to pkg/apis/experimental/latest/latest.go index 13dbc2c70d8..5a231b0b9d4 100644 --- a/pkg/expapi/latest/latest.go +++ b/pkg/apis/experimental/latest/latest.go @@ -23,8 +23,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/registered" - _ "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/v1" + _ "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/v1" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" ) @@ -39,7 +39,7 @@ var ( RESTMapper meta.RESTMapper ) -const importPrefix = "k8s.io/kubernetes/pkg/expapi" +const importPrefix = "k8s.io/kubernetes/pkg/apis/experimental" func init() { Version = registered.RegisteredVersions[0] diff --git a/pkg/expapi/register.go b/pkg/apis/experimental/register.go similarity index 91% rename from pkg/expapi/register.go rename to pkg/apis/experimental/register.go index 10e656da2cb..1c207f083dd 100644 --- a/pkg/expapi/register.go +++ b/pkg/apis/experimental/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package expapi +package experimental import ( "k8s.io/kubernetes/pkg/api" @@ -32,6 +32,8 @@ func addKnownTypes() { &DeploymentList{}, &HorizontalPodAutoscaler{}, &HorizontalPodAutoscalerList{}, + &Job{}, + &JobList{}, &ReplicationControllerDummy{}, &Scale{}, &ThirdPartyResource{}, @@ -47,6 +49,8 @@ func (*Deployment) IsAnAPIObject() {} func (*DeploymentList) IsAnAPIObject() {} func (*HorizontalPodAutoscaler) IsAnAPIObject() {} func (*HorizontalPodAutoscalerList) IsAnAPIObject() {} +func (*Job) IsAnAPIObject() {} +func (*JobList) IsAnAPIObject() {} func (*ReplicationControllerDummy) IsAnAPIObject() {} func (*Scale) IsAnAPIObject() {} func (*ThirdPartyResource) IsAnAPIObject() {} diff --git a/pkg/expapi/testapi/testapi.go b/pkg/apis/experimental/testapi/testapi.go similarity index 96% rename from pkg/expapi/testapi/testapi.go rename to pkg/apis/experimental/testapi/testapi.go index 5a344b8c95b..229a1bf04ac 100644 --- a/pkg/expapi/testapi/testapi.go +++ b/pkg/apis/experimental/testapi/testapi.go @@ -19,7 +19,7 @@ package testapi import ( "strings" - "k8s.io/kubernetes/pkg/expapi/latest" + "k8s.io/kubernetes/pkg/apis/experimental/latest" ) // Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. diff --git a/pkg/expapi/types.go b/pkg/apis/experimental/types.go similarity index 79% rename from pkg/expapi/types.go rename to pkg/apis/experimental/types.go index e136b93a890..aea6f77e307 100644 --- a/pkg/expapi/types.go +++ b/pkg/apis/experimental/types.go @@ -15,7 +15,7 @@ limitations under the License. */ /* -This file (together with pkg/expapi/v1/types.go) contain the experimental +This file (together with pkg/apis/experimental/v1/types.go) contain the experimental types in kubernetes. These API objects are experimental, meaning that the APIs may be broken at any time by the kubernetes team. @@ -26,7 +26,7 @@ beyond registration differences. In other words, experimental API group support is experimental. */ -package expapi +package experimental import ( "k8s.io/kubernetes/pkg/api" @@ -362,3 +362,102 @@ type ThirdPartyResourceDataList struct { // Items is a list of third party objects Items []ThirdPartyResourceData `json:"items"` } + +// Job represents the configuration of a single job. +type Job struct { + api.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty"` +} + +// JobList is a collection of jobs. +type JobList struct { + api.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ListMeta `json:"metadata,omitempty"` + + // Items is the list of Job. + Items []Job `json:"items"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + Parallelism *int `json:"parallelism,omitempty"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Defaults to 1. + Completions *int `json:"completions,omitempty"` + + // Selector is a label query over pods that should match the pod count. + Selector map[string]string `json:"selector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + Template *api.PodTemplateSpec `json:"template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + StartTime *util.Time `json:"startTime,omitempty"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + CompletionTime *util.Time `json:"completionTime,omitempty"` + + // Active is the number of actively running pods. + Active int `json:"active,omitempty"` + + // Successful is the number of pods which reached Phase Succeeded. + Successful int `json:"successful,omitempty"` + + // Unsuccessful is the number of pods failures, this applies only to jobs + // created with RestartPolicyNever, otherwise this value will always be 0. + Unsuccessful int `json:"unsuccessful,omitempty"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, currently only Complete. + Type JobConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus `json:"status"` + // Last time the condition was checked. + LastProbeTime util.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transit from one status to another. + LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} diff --git a/pkg/expapi/v1/conversion.go b/pkg/apis/experimental/v1/conversion.go similarity index 78% rename from pkg/expapi/v1/conversion.go rename to pkg/apis/experimental/v1/conversion.go index f977ec4bccf..480d1d52cee 100644 --- a/pkg/expapi/v1/conversion.go +++ b/pkg/apis/experimental/v1/conversion.go @@ -21,8 +21,8 @@ import ( "k8s.io/kubernetes/pkg/api" v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/util" ) @@ -31,12 +31,12 @@ func addConversionFuncs() { err := api.Scheme.AddConversionFuncs( convert_api_PodSpec_To_v1_PodSpec, convert_v1_PodSpec_To_api_PodSpec, - convert_expapi_DeploymentSpec_To_v1_DeploymentSpec, - convert_v1_DeploymentSpec_To_expapi_DeploymentSpec, - convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy, - convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy, - convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, - convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment, + convert_experimental_DeploymentSpec_To_v1_DeploymentSpec, + convert_v1_DeploymentSpec_To_experimental_DeploymentSpec, + convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy, + convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy, + convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, + convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. @@ -178,9 +178,9 @@ func convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve return nil } -func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { +func convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(in *experimental.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DeploymentSpec))(in) + defaulting.(func(*experimental.DeploymentSpec))(in) } out.Replicas = new(int) *out.Replicas = in.Replicas @@ -200,7 +200,7 @@ func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpe } else { out.Template = nil } - if err := convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.UniqueLabelKey = new(string) @@ -208,7 +208,7 @@ func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpe return nil } -func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out *expapi.DeploymentSpec, s conversion.Scope) error { +func convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec, out *experimental.DeploymentSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DeploymentSpec))(in) } @@ -231,7 +231,7 @@ func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out } else { out.Template = nil } - if err := convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.UniqueLabelKey != nil { @@ -240,14 +240,14 @@ func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out return nil } -func convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(in *expapi.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { +func convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(in *experimental.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DeploymentStrategy))(in) + defaulting.(func(*experimental.DeploymentStrategy))(in) } out.Type = DeploymentType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(RollingUpdateDeployment) - if err := convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -256,14 +256,14 @@ func convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(in *expapi.Deplo return nil } -func convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(in *DeploymentStrategy, out *expapi.DeploymentStrategy, s conversion.Scope) error { +func convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(in *DeploymentStrategy, out *experimental.DeploymentStrategy, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DeploymentStrategy))(in) } - out.Type = expapi.DeploymentType(in.Type) + out.Type = experimental.DeploymentType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(expapi.RollingUpdateDeployment) - if err := convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(experimental.RollingUpdateDeployment) + if err := convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -272,9 +272,9 @@ func convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(in *DeploymentSt return nil } -func convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *expapi.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { +func convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *experimental.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.RollingUpdateDeployment))(in) + defaulting.(func(*experimental.RollingUpdateDeployment))(in) } if out.MaxUnavailable == nil { out.MaxUnavailable = &util.IntOrString{} @@ -292,7 +292,7 @@ func convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *ex return nil } -func convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment(in *RollingUpdateDeployment, out *expapi.RollingUpdateDeployment, s conversion.Scope) error { +func convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in *RollingUpdateDeployment, out *experimental.RollingUpdateDeployment, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*RollingUpdateDeployment))(in) } diff --git a/pkg/expapi/v1/conversion_generated.go b/pkg/apis/experimental/v1/conversion_generated.go similarity index 74% rename from pkg/expapi/v1/conversion_generated.go rename to pkg/apis/experimental/v1/conversion_generated.go index 12289ef2363..fae682c8401 100644 --- a/pkg/expapi/v1/conversion_generated.go +++ b/pkg/apis/experimental/v1/conversion_generated.go @@ -24,8 +24,8 @@ import ( api "k8s.io/kubernetes/pkg/api" resource "k8s.io/kubernetes/pkg/api/resource" v1 "k8s.io/kubernetes/pkg/api/v1" + experimental "k8s.io/kubernetes/pkg/apis/experimental" conversion "k8s.io/kubernetes/pkg/conversion" - expapi "k8s.io/kubernetes/pkg/expapi" ) func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { @@ -1562,18 +1562,18 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.V return nil } -func convert_expapi_APIVersion_To_v1_APIVersion(in *expapi.APIVersion, out *APIVersion, s conversion.Scope) error { +func convert_experimental_APIVersion_To_v1_APIVersion(in *experimental.APIVersion, out *APIVersion, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.APIVersion))(in) + defaulting.(func(*experimental.APIVersion))(in) } out.Name = in.Name out.APIGroup = in.APIGroup return nil } -func convert_expapi_DaemonSet_To_v1_DaemonSet(in *expapi.DaemonSet, out *DaemonSet, s conversion.Scope) error { +func convert_experimental_DaemonSet_To_v1_DaemonSet(in *experimental.DaemonSet, out *DaemonSet, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonSet))(in) + defaulting.(func(*experimental.DaemonSet))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1581,18 +1581,18 @@ func convert_expapi_DaemonSet_To_v1_DaemonSet(in *expapi.DaemonSet, out *DaemonS if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_experimental_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_experimental_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_expapi_DaemonSetList_To_v1_DaemonSetList(in *expapi.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { +func convert_experimental_DaemonSetList_To_v1_DaemonSetList(in *experimental.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonSetList))(in) + defaulting.(func(*experimental.DaemonSetList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1603,7 +1603,7 @@ func convert_expapi_DaemonSetList_To_v1_DaemonSetList(in *expapi.DaemonSetList, if in.Items != nil { out.Items = make([]DaemonSet, len(in.Items)) for i := range in.Items { - if err := convert_expapi_DaemonSet_To_v1_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_experimental_DaemonSet_To_v1_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -1613,9 +1613,9 @@ func convert_expapi_DaemonSetList_To_v1_DaemonSetList(in *expapi.DaemonSetList, return nil } -func convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec(in *expapi.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { +func convert_experimental_DaemonSetSpec_To_v1_DaemonSetSpec(in *experimental.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonSetSpec))(in) + defaulting.(func(*experimental.DaemonSetSpec))(in) } if in.Selector != nil { out.Selector = make(map[string]string) @@ -1636,9 +1636,9 @@ func convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec(in *expapi.DaemonSetSpec, return nil } -func convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus(in *expapi.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { +func convert_experimental_DaemonSetStatus_To_v1_DaemonSetStatus(in *experimental.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonSetStatus))(in) + defaulting.(func(*experimental.DaemonSetStatus))(in) } out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled @@ -1646,9 +1646,9 @@ func convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus(in *expapi.DaemonSetSt return nil } -func convert_expapi_Deployment_To_v1_Deployment(in *expapi.Deployment, out *Deployment, s conversion.Scope) error { +func convert_experimental_Deployment_To_v1_Deployment(in *experimental.Deployment, out *Deployment, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.Deployment))(in) + defaulting.(func(*experimental.Deployment))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1656,18 +1656,18 @@ func convert_expapi_Deployment_To_v1_Deployment(in *expapi.Deployment, out *Depl if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_expapi_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_experimental_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_expapi_DeploymentList_To_v1_DeploymentList(in *expapi.DeploymentList, out *DeploymentList, s conversion.Scope) error { +func convert_experimental_DeploymentList_To_v1_DeploymentList(in *experimental.DeploymentList, out *DeploymentList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DeploymentList))(in) + defaulting.(func(*experimental.DeploymentList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1678,7 +1678,7 @@ func convert_expapi_DeploymentList_To_v1_DeploymentList(in *expapi.DeploymentLis if in.Items != nil { out.Items = make([]Deployment, len(in.Items)) for i := range in.Items { - if err := convert_expapi_Deployment_To_v1_Deployment(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_experimental_Deployment_To_v1_Deployment(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -1688,18 +1688,18 @@ func convert_expapi_DeploymentList_To_v1_DeploymentList(in *expapi.DeploymentLis return nil } -func convert_expapi_DeploymentStatus_To_v1_DeploymentStatus(in *expapi.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { +func convert_experimental_DeploymentStatus_To_v1_DeploymentStatus(in *experimental.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DeploymentStatus))(in) + defaulting.(func(*experimental.DeploymentStatus))(in) } out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas return nil } -func convert_expapi_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *expapi.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { +func convert_experimental_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *experimental.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.HorizontalPodAutoscaler))(in) + defaulting.(func(*experimental.HorizontalPodAutoscaler))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1707,12 +1707,12 @@ func convert_expapi_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *ex if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_expapi_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_experimental_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { return err } if in.Status != nil { out.Status = new(HorizontalPodAutoscalerStatus) - if err := convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in.Status, out.Status, s); err != nil { + if err := convert_experimental_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in.Status, out.Status, s); err != nil { return err } } else { @@ -1721,9 +1721,9 @@ func convert_expapi_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *ex return nil } -func convert_expapi_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *expapi.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { +func convert_experimental_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *experimental.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.HorizontalPodAutoscalerList))(in) + defaulting.(func(*experimental.HorizontalPodAutoscalerList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1734,7 +1734,7 @@ func convert_expapi_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerLis if in.Items != nil { out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) for i := range in.Items { - if err := convert_expapi_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_experimental_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -1744,13 +1744,13 @@ func convert_expapi_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerLis return nil } -func convert_expapi_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *expapi.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { +func convert_experimental_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *experimental.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.HorizontalPodAutoscalerSpec))(in) + defaulting.(func(*experimental.HorizontalPodAutoscalerSpec))(in) } if in.ScaleRef != nil { out.ScaleRef = new(SubresourceReference) - if err := convert_expapi_SubresourceReference_To_v1_SubresourceReference(in.ScaleRef, out.ScaleRef, s); err != nil { + if err := convert_experimental_SubresourceReference_To_v1_SubresourceReference(in.ScaleRef, out.ScaleRef, s); err != nil { return err } } else { @@ -1758,21 +1758,21 @@ func convert_expapi_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpe } out.MinCount = in.MinCount out.MaxCount = in.MaxCount - if err := convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(&in.Target, &out.Target, s); err != nil { + if err := convert_experimental_ResourceConsumption_To_v1_ResourceConsumption(&in.Target, &out.Target, s); err != nil { return err } return nil } -func convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *expapi.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { +func convert_experimental_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *experimental.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.HorizontalPodAutoscalerStatus))(in) + defaulting.(func(*experimental.HorizontalPodAutoscalerStatus))(in) } out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas if in.CurrentConsumption != nil { out.CurrentConsumption = new(ResourceConsumption) - if err := convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil { + if err := convert_experimental_ResourceConsumption_To_v1_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil { return err } } else { @@ -1788,9 +1788,137 @@ func convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerS return nil } -func convert_expapi_ReplicationControllerDummy_To_v1_ReplicationControllerDummy(in *expapi.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { +func convert_experimental_Job_To_v1_Job(in *experimental.Job, out *Job, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ReplicationControllerDummy))(in) + defaulting.(func(*experimental.Job))(in) + } + if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := convert_experimental_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := convert_experimental_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func convert_experimental_JobCondition_To_v1_JobCondition(in *experimental.JobCondition, out *JobCondition, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*experimental.JobCondition))(in) + } + out.Type = JobConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + if err := s.Convert(&in.LastProbeTime, &out.LastProbeTime, 0); err != nil { + return err + } + if err := s.Convert(&in.LastTransitionTime, &out.LastTransitionTime, 0); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func convert_experimental_JobList_To_v1_JobList(in *experimental.JobList, out *JobList, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*experimental.JobList))(in) + } + if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]Job, len(in.Items)) + for i := range in.Items { + if err := convert_experimental_Job_To_v1_Job(&in.Items[i], &out.Items[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func convert_experimental_JobSpec_To_v1_JobSpec(in *experimental.JobSpec, out *JobSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*experimental.JobSpec))(in) + } + if in.Parallelism != nil { + out.Parallelism = new(int) + *out.Parallelism = *in.Parallelism + } else { + out.Parallelism = nil + } + if in.Completions != nil { + out.Completions = new(int) + *out.Completions = *in.Completions + } else { + out.Completions = nil + } + if in.Selector != nil { + out.Selector = make(map[string]string) + for key, val := range in.Selector { + out.Selector[key] = val + } + } else { + out.Selector = nil + } + if in.Template != nil { + out.Template = new(v1.PodTemplateSpec) + if err := convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func convert_experimental_JobStatus_To_v1_JobStatus(in *experimental.JobStatus, out *JobStatus, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*experimental.JobStatus))(in) + } + if in.Conditions != nil { + out.Conditions = make([]JobCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := convert_experimental_JobCondition_To_v1_JobCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + if err := s.Convert(&in.StartTime, &out.StartTime, 0); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + if err := s.Convert(&in.CompletionTime, &out.CompletionTime, 0); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Successful = in.Successful + out.Unsuccessful = in.Unsuccessful + return nil +} + +func convert_experimental_ReplicationControllerDummy_To_v1_ReplicationControllerDummy(in *experimental.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*experimental.ReplicationControllerDummy))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1798,9 +1926,9 @@ func convert_expapi_ReplicationControllerDummy_To_v1_ReplicationControllerDummy( return nil } -func convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(in *expapi.ResourceConsumption, out *ResourceConsumption, s conversion.Scope) error { +func convert_experimental_ResourceConsumption_To_v1_ResourceConsumption(in *experimental.ResourceConsumption, out *ResourceConsumption, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ResourceConsumption))(in) + defaulting.(func(*experimental.ResourceConsumption))(in) } out.Resource = v1.ResourceName(in.Resource) if err := s.Convert(&in.Quantity, &out.Quantity, 0); err != nil { @@ -1809,9 +1937,9 @@ func convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(in *expapi.Res return nil } -func convert_expapi_Scale_To_v1_Scale(in *expapi.Scale, out *Scale, s conversion.Scope) error { +func convert_experimental_Scale_To_v1_Scale(in *experimental.Scale, out *Scale, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.Scale))(in) + defaulting.(func(*experimental.Scale))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1819,26 +1947,26 @@ func convert_expapi_Scale_To_v1_Scale(in *expapi.Scale, out *Scale, s conversion if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_expapi_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_experimental_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_expapi_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_experimental_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_expapi_ScaleSpec_To_v1_ScaleSpec(in *expapi.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { +func convert_experimental_ScaleSpec_To_v1_ScaleSpec(in *experimental.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ScaleSpec))(in) + defaulting.(func(*experimental.ScaleSpec))(in) } out.Replicas = in.Replicas return nil } -func convert_expapi_ScaleStatus_To_v1_ScaleStatus(in *expapi.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { +func convert_experimental_ScaleStatus_To_v1_ScaleStatus(in *experimental.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ScaleStatus))(in) + defaulting.(func(*experimental.ScaleStatus))(in) } out.Replicas = in.Replicas if in.Selector != nil { @@ -1852,9 +1980,9 @@ func convert_expapi_ScaleStatus_To_v1_ScaleStatus(in *expapi.ScaleStatus, out *S return nil } -func convert_expapi_SubresourceReference_To_v1_SubresourceReference(in *expapi.SubresourceReference, out *SubresourceReference, s conversion.Scope) error { +func convert_experimental_SubresourceReference_To_v1_SubresourceReference(in *experimental.SubresourceReference, out *SubresourceReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.SubresourceReference))(in) + defaulting.(func(*experimental.SubresourceReference))(in) } out.Kind = in.Kind out.Namespace = in.Namespace @@ -1864,9 +1992,9 @@ func convert_expapi_SubresourceReference_To_v1_SubresourceReference(in *expapi.S return nil } -func convert_expapi_ThirdPartyResource_To_v1_ThirdPartyResource(in *expapi.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { +func convert_experimental_ThirdPartyResource_To_v1_ThirdPartyResource(in *experimental.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ThirdPartyResource))(in) + defaulting.(func(*experimental.ThirdPartyResource))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1878,7 +2006,7 @@ func convert_expapi_ThirdPartyResource_To_v1_ThirdPartyResource(in *expapi.Third if in.Versions != nil { out.Versions = make([]APIVersion, len(in.Versions)) for i := range in.Versions { - if err := convert_expapi_APIVersion_To_v1_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil { + if err := convert_experimental_APIVersion_To_v1_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil { return err } } @@ -1888,9 +2016,9 @@ func convert_expapi_ThirdPartyResource_To_v1_ThirdPartyResource(in *expapi.Third return nil } -func convert_expapi_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(in *expapi.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { +func convert_experimental_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(in *experimental.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ThirdPartyResourceData))(in) + defaulting.(func(*experimental.ThirdPartyResourceData))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1904,9 +2032,9 @@ func convert_expapi_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(in *expa return nil } -func convert_expapi_ThirdPartyResourceDataList_To_v1_ThirdPartyResourceDataList(in *expapi.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { +func convert_experimental_ThirdPartyResourceDataList_To_v1_ThirdPartyResourceDataList(in *experimental.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ThirdPartyResourceDataList))(in) + defaulting.(func(*experimental.ThirdPartyResourceDataList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1917,7 +2045,7 @@ func convert_expapi_ThirdPartyResourceDataList_To_v1_ThirdPartyResourceDataList( if in.Items != nil { out.Items = make([]ThirdPartyResourceData, len(in.Items)) for i := range in.Items { - if err := convert_expapi_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_experimental_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -1927,9 +2055,9 @@ func convert_expapi_ThirdPartyResourceDataList_To_v1_ThirdPartyResourceDataList( return nil } -func convert_expapi_ThirdPartyResourceList_To_v1_ThirdPartyResourceList(in *expapi.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { +func convert_experimental_ThirdPartyResourceList_To_v1_ThirdPartyResourceList(in *experimental.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.ThirdPartyResourceList))(in) + defaulting.(func(*experimental.ThirdPartyResourceList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1940,7 +2068,7 @@ func convert_expapi_ThirdPartyResourceList_To_v1_ThirdPartyResourceList(in *expa if in.Items != nil { out.Items = make([]ThirdPartyResource, len(in.Items)) for i := range in.Items { - if err := convert_expapi_ThirdPartyResource_To_v1_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_experimental_ThirdPartyResource_To_v1_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -1950,7 +2078,7 @@ func convert_expapi_ThirdPartyResourceList_To_v1_ThirdPartyResourceList(in *expa return nil } -func convert_v1_APIVersion_To_expapi_APIVersion(in *APIVersion, out *expapi.APIVersion, s conversion.Scope) error { +func convert_v1_APIVersion_To_experimental_APIVersion(in *APIVersion, out *experimental.APIVersion, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*APIVersion))(in) } @@ -1959,7 +2087,7 @@ func convert_v1_APIVersion_To_expapi_APIVersion(in *APIVersion, out *expapi.APIV return nil } -func convert_v1_DaemonSet_To_expapi_DaemonSet(in *DaemonSet, out *expapi.DaemonSet, s conversion.Scope) error { +func convert_v1_DaemonSet_To_experimental_DaemonSet(in *DaemonSet, out *experimental.DaemonSet, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonSet))(in) } @@ -1969,16 +2097,16 @@ func convert_v1_DaemonSet_To_expapi_DaemonSet(in *DaemonSet, out *expapi.DaemonS if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_v1_DaemonSetSpec_To_experimental_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_v1_DaemonSetStatus_To_experimental_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_v1_DaemonSetList_To_expapi_DaemonSetList(in *DaemonSetList, out *expapi.DaemonSetList, s conversion.Scope) error { +func convert_v1_DaemonSetList_To_experimental_DaemonSetList(in *DaemonSetList, out *experimental.DaemonSetList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonSetList))(in) } @@ -1989,9 +2117,9 @@ func convert_v1_DaemonSetList_To_expapi_DaemonSetList(in *DaemonSetList, out *ex return err } if in.Items != nil { - out.Items = make([]expapi.DaemonSet, len(in.Items)) + out.Items = make([]experimental.DaemonSet, len(in.Items)) for i := range in.Items { - if err := convert_v1_DaemonSet_To_expapi_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_v1_DaemonSet_To_experimental_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -2001,7 +2129,7 @@ func convert_v1_DaemonSetList_To_expapi_DaemonSetList(in *DaemonSetList, out *ex return nil } -func convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec(in *DaemonSetSpec, out *expapi.DaemonSetSpec, s conversion.Scope) error { +func convert_v1_DaemonSetSpec_To_experimental_DaemonSetSpec(in *DaemonSetSpec, out *experimental.DaemonSetSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonSetSpec))(in) } @@ -2024,7 +2152,7 @@ func convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec(in *DaemonSetSpec, out *ex return nil } -func convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus(in *DaemonSetStatus, out *expapi.DaemonSetStatus, s conversion.Scope) error { +func convert_v1_DaemonSetStatus_To_experimental_DaemonSetStatus(in *DaemonSetStatus, out *experimental.DaemonSetStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonSetStatus))(in) } @@ -2034,7 +2162,7 @@ func convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus(in *DaemonSetStatus, o return nil } -func convert_v1_Deployment_To_expapi_Deployment(in *Deployment, out *expapi.Deployment, s conversion.Scope) error { +func convert_v1_Deployment_To_experimental_Deployment(in *Deployment, out *experimental.Deployment, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Deployment))(in) } @@ -2044,16 +2172,16 @@ func convert_v1_Deployment_To_expapi_Deployment(in *Deployment, out *expapi.Depl if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_v1_DeploymentStatus_To_expapi_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_v1_DeploymentStatus_To_experimental_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_v1_DeploymentList_To_expapi_DeploymentList(in *DeploymentList, out *expapi.DeploymentList, s conversion.Scope) error { +func convert_v1_DeploymentList_To_experimental_DeploymentList(in *DeploymentList, out *experimental.DeploymentList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DeploymentList))(in) } @@ -2064,9 +2192,9 @@ func convert_v1_DeploymentList_To_expapi_DeploymentList(in *DeploymentList, out return err } if in.Items != nil { - out.Items = make([]expapi.Deployment, len(in.Items)) + out.Items = make([]experimental.Deployment, len(in.Items)) for i := range in.Items { - if err := convert_v1_Deployment_To_expapi_Deployment(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_v1_Deployment_To_experimental_Deployment(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -2076,7 +2204,7 @@ func convert_v1_DeploymentList_To_expapi_DeploymentList(in *DeploymentList, out return nil } -func convert_v1_DeploymentStatus_To_expapi_DeploymentStatus(in *DeploymentStatus, out *expapi.DeploymentStatus, s conversion.Scope) error { +func convert_v1_DeploymentStatus_To_experimental_DeploymentStatus(in *DeploymentStatus, out *experimental.DeploymentStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DeploymentStatus))(in) } @@ -2085,7 +2213,7 @@ func convert_v1_DeploymentStatus_To_expapi_DeploymentStatus(in *DeploymentStatus return nil } -func convert_v1_HorizontalPodAutoscaler_To_expapi_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *expapi.HorizontalPodAutoscaler, s conversion.Scope) error { +func convert_v1_HorizontalPodAutoscaler_To_experimental_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *experimental.HorizontalPodAutoscaler, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HorizontalPodAutoscaler))(in) } @@ -2095,12 +2223,12 @@ func convert_v1_HorizontalPodAutoscaler_To_expapi_HorizontalPodAutoscaler(in *Ho if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_v1_HorizontalPodAutoscalerSpec_To_expapi_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_v1_HorizontalPodAutoscalerSpec_To_experimental_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { return err } if in.Status != nil { - out.Status = new(expapi.HorizontalPodAutoscalerStatus) - if err := convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerStatus(in.Status, out.Status, s); err != nil { + out.Status = new(experimental.HorizontalPodAutoscalerStatus) + if err := convert_v1_HorizontalPodAutoscalerStatus_To_experimental_HorizontalPodAutoscalerStatus(in.Status, out.Status, s); err != nil { return err } } else { @@ -2109,7 +2237,7 @@ func convert_v1_HorizontalPodAutoscaler_To_expapi_HorizontalPodAutoscaler(in *Ho return nil } -func convert_v1_HorizontalPodAutoscalerList_To_expapi_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *expapi.HorizontalPodAutoscalerList, s conversion.Scope) error { +func convert_v1_HorizontalPodAutoscalerList_To_experimental_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *experimental.HorizontalPodAutoscalerList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HorizontalPodAutoscalerList))(in) } @@ -2120,9 +2248,9 @@ func convert_v1_HorizontalPodAutoscalerList_To_expapi_HorizontalPodAutoscalerLis return err } if in.Items != nil { - out.Items = make([]expapi.HorizontalPodAutoscaler, len(in.Items)) + out.Items = make([]experimental.HorizontalPodAutoscaler, len(in.Items)) for i := range in.Items { - if err := convert_v1_HorizontalPodAutoscaler_To_expapi_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_v1_HorizontalPodAutoscaler_To_experimental_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -2132,13 +2260,13 @@ func convert_v1_HorizontalPodAutoscalerList_To_expapi_HorizontalPodAutoscalerLis return nil } -func convert_v1_HorizontalPodAutoscalerSpec_To_expapi_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *expapi.HorizontalPodAutoscalerSpec, s conversion.Scope) error { +func convert_v1_HorizontalPodAutoscalerSpec_To_experimental_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *experimental.HorizontalPodAutoscalerSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HorizontalPodAutoscalerSpec))(in) } if in.ScaleRef != nil { - out.ScaleRef = new(expapi.SubresourceReference) - if err := convert_v1_SubresourceReference_To_expapi_SubresourceReference(in.ScaleRef, out.ScaleRef, s); err != nil { + out.ScaleRef = new(experimental.SubresourceReference) + if err := convert_v1_SubresourceReference_To_experimental_SubresourceReference(in.ScaleRef, out.ScaleRef, s); err != nil { return err } } else { @@ -2146,21 +2274,21 @@ func convert_v1_HorizontalPodAutoscalerSpec_To_expapi_HorizontalPodAutoscalerSpe } out.MinCount = in.MinCount out.MaxCount = in.MaxCount - if err := convert_v1_ResourceConsumption_To_expapi_ResourceConsumption(&in.Target, &out.Target, s); err != nil { + if err := convert_v1_ResourceConsumption_To_experimental_ResourceConsumption(&in.Target, &out.Target, s); err != nil { return err } return nil } -func convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *expapi.HorizontalPodAutoscalerStatus, s conversion.Scope) error { +func convert_v1_HorizontalPodAutoscalerStatus_To_experimental_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *experimental.HorizontalPodAutoscalerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HorizontalPodAutoscalerStatus))(in) } out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas if in.CurrentConsumption != nil { - out.CurrentConsumption = new(expapi.ResourceConsumption) - if err := convert_v1_ResourceConsumption_To_expapi_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil { + out.CurrentConsumption = new(experimental.ResourceConsumption) + if err := convert_v1_ResourceConsumption_To_experimental_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil { return err } } else { @@ -2176,7 +2304,135 @@ func convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerS return nil } -func convert_v1_ReplicationControllerDummy_To_expapi_ReplicationControllerDummy(in *ReplicationControllerDummy, out *expapi.ReplicationControllerDummy, s conversion.Scope) error { +func convert_v1_Job_To_experimental_Job(in *Job, out *experimental.Job, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*Job))(in) + } + if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := convert_v1_JobSpec_To_experimental_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := convert_v1_JobStatus_To_experimental_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func convert_v1_JobCondition_To_experimental_JobCondition(in *JobCondition, out *experimental.JobCondition, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*JobCondition))(in) + } + out.Type = experimental.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := s.Convert(&in.LastProbeTime, &out.LastProbeTime, 0); err != nil { + return err + } + if err := s.Convert(&in.LastTransitionTime, &out.LastTransitionTime, 0); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func convert_v1_JobList_To_experimental_JobList(in *JobList, out *experimental.JobList, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*JobList))(in) + } + if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]experimental.Job, len(in.Items)) + for i := range in.Items { + if err := convert_v1_Job_To_experimental_Job(&in.Items[i], &out.Items[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func convert_v1_JobSpec_To_experimental_JobSpec(in *JobSpec, out *experimental.JobSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*JobSpec))(in) + } + if in.Parallelism != nil { + out.Parallelism = new(int) + *out.Parallelism = *in.Parallelism + } else { + out.Parallelism = nil + } + if in.Completions != nil { + out.Completions = new(int) + *out.Completions = *in.Completions + } else { + out.Completions = nil + } + if in.Selector != nil { + out.Selector = make(map[string]string) + for key, val := range in.Selector { + out.Selector[key] = val + } + } else { + out.Selector = nil + } + if in.Template != nil { + out.Template = new(api.PodTemplateSpec) + if err := convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func convert_v1_JobStatus_To_experimental_JobStatus(in *JobStatus, out *experimental.JobStatus, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*JobStatus))(in) + } + if in.Conditions != nil { + out.Conditions = make([]experimental.JobCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := convert_v1_JobCondition_To_experimental_JobCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + if err := s.Convert(&in.StartTime, &out.StartTime, 0); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + if err := s.Convert(&in.CompletionTime, &out.CompletionTime, 0); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Successful = in.Successful + out.Unsuccessful = in.Unsuccessful + return nil +} + +func convert_v1_ReplicationControllerDummy_To_experimental_ReplicationControllerDummy(in *ReplicationControllerDummy, out *experimental.ReplicationControllerDummy, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ReplicationControllerDummy))(in) } @@ -2186,7 +2442,7 @@ func convert_v1_ReplicationControllerDummy_To_expapi_ReplicationControllerDummy( return nil } -func convert_v1_ResourceConsumption_To_expapi_ResourceConsumption(in *ResourceConsumption, out *expapi.ResourceConsumption, s conversion.Scope) error { +func convert_v1_ResourceConsumption_To_experimental_ResourceConsumption(in *ResourceConsumption, out *experimental.ResourceConsumption, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ResourceConsumption))(in) } @@ -2197,7 +2453,7 @@ func convert_v1_ResourceConsumption_To_expapi_ResourceConsumption(in *ResourceCo return nil } -func convert_v1_Scale_To_expapi_Scale(in *Scale, out *expapi.Scale, s conversion.Scope) error { +func convert_v1_Scale_To_experimental_Scale(in *Scale, out *experimental.Scale, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Scale))(in) } @@ -2207,16 +2463,16 @@ func convert_v1_Scale_To_expapi_Scale(in *Scale, out *expapi.Scale, s conversion if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_v1_ScaleSpec_To_expapi_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_v1_ScaleSpec_To_experimental_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_v1_ScaleStatus_To_expapi_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_v1_ScaleStatus_To_experimental_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_v1_ScaleSpec_To_expapi_ScaleSpec(in *ScaleSpec, out *expapi.ScaleSpec, s conversion.Scope) error { +func convert_v1_ScaleSpec_To_experimental_ScaleSpec(in *ScaleSpec, out *experimental.ScaleSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ScaleSpec))(in) } @@ -2224,7 +2480,7 @@ func convert_v1_ScaleSpec_To_expapi_ScaleSpec(in *ScaleSpec, out *expapi.ScaleSp return nil } -func convert_v1_ScaleStatus_To_expapi_ScaleStatus(in *ScaleStatus, out *expapi.ScaleStatus, s conversion.Scope) error { +func convert_v1_ScaleStatus_To_experimental_ScaleStatus(in *ScaleStatus, out *experimental.ScaleStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ScaleStatus))(in) } @@ -2240,7 +2496,7 @@ func convert_v1_ScaleStatus_To_expapi_ScaleStatus(in *ScaleStatus, out *expapi.S return nil } -func convert_v1_SubresourceReference_To_expapi_SubresourceReference(in *SubresourceReference, out *expapi.SubresourceReference, s conversion.Scope) error { +func convert_v1_SubresourceReference_To_experimental_SubresourceReference(in *SubresourceReference, out *experimental.SubresourceReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*SubresourceReference))(in) } @@ -2252,7 +2508,7 @@ func convert_v1_SubresourceReference_To_expapi_SubresourceReference(in *Subresou return nil } -func convert_v1_ThirdPartyResource_To_expapi_ThirdPartyResource(in *ThirdPartyResource, out *expapi.ThirdPartyResource, s conversion.Scope) error { +func convert_v1_ThirdPartyResource_To_experimental_ThirdPartyResource(in *ThirdPartyResource, out *experimental.ThirdPartyResource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ThirdPartyResource))(in) } @@ -2264,9 +2520,9 @@ func convert_v1_ThirdPartyResource_To_expapi_ThirdPartyResource(in *ThirdPartyRe } out.Description = in.Description if in.Versions != nil { - out.Versions = make([]expapi.APIVersion, len(in.Versions)) + out.Versions = make([]experimental.APIVersion, len(in.Versions)) for i := range in.Versions { - if err := convert_v1_APIVersion_To_expapi_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil { + if err := convert_v1_APIVersion_To_experimental_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil { return err } } @@ -2276,7 +2532,7 @@ func convert_v1_ThirdPartyResource_To_expapi_ThirdPartyResource(in *ThirdPartyRe return nil } -func convert_v1_ThirdPartyResourceData_To_expapi_ThirdPartyResourceData(in *ThirdPartyResourceData, out *expapi.ThirdPartyResourceData, s conversion.Scope) error { +func convert_v1_ThirdPartyResourceData_To_experimental_ThirdPartyResourceData(in *ThirdPartyResourceData, out *experimental.ThirdPartyResourceData, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ThirdPartyResourceData))(in) } @@ -2292,7 +2548,7 @@ func convert_v1_ThirdPartyResourceData_To_expapi_ThirdPartyResourceData(in *Thir return nil } -func convert_v1_ThirdPartyResourceDataList_To_expapi_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *expapi.ThirdPartyResourceDataList, s conversion.Scope) error { +func convert_v1_ThirdPartyResourceDataList_To_experimental_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *experimental.ThirdPartyResourceDataList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ThirdPartyResourceDataList))(in) } @@ -2303,9 +2559,9 @@ func convert_v1_ThirdPartyResourceDataList_To_expapi_ThirdPartyResourceDataList( return err } if in.Items != nil { - out.Items = make([]expapi.ThirdPartyResourceData, len(in.Items)) + out.Items = make([]experimental.ThirdPartyResourceData, len(in.Items)) for i := range in.Items { - if err := convert_v1_ThirdPartyResourceData_To_expapi_ThirdPartyResourceData(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_v1_ThirdPartyResourceData_To_experimental_ThirdPartyResourceData(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -2315,7 +2571,7 @@ func convert_v1_ThirdPartyResourceDataList_To_expapi_ThirdPartyResourceDataList( return nil } -func convert_v1_ThirdPartyResourceList_To_expapi_ThirdPartyResourceList(in *ThirdPartyResourceList, out *expapi.ThirdPartyResourceList, s conversion.Scope) error { +func convert_v1_ThirdPartyResourceList_To_experimental_ThirdPartyResourceList(in *ThirdPartyResourceList, out *experimental.ThirdPartyResourceList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ThirdPartyResourceList))(in) } @@ -2326,9 +2582,9 @@ func convert_v1_ThirdPartyResourceList_To_expapi_ThirdPartyResourceList(in *Thir return err } if in.Items != nil { - out.Items = make([]expapi.ThirdPartyResource, len(in.Items)) + out.Items = make([]experimental.ThirdPartyResource, len(in.Items)) for i := range in.Items { - if err := convert_v1_ThirdPartyResource_To_expapi_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_v1_ThirdPartyResource_To_experimental_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -2378,42 +2634,47 @@ func init() { convert_api_VolumeMount_To_v1_VolumeMount, convert_api_VolumeSource_To_v1_VolumeSource, convert_api_Volume_To_v1_Volume, - convert_expapi_APIVersion_To_v1_APIVersion, - convert_expapi_DaemonSetList_To_v1_DaemonSetList, - convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec, - convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus, - convert_expapi_DaemonSet_To_v1_DaemonSet, - convert_expapi_DeploymentList_To_v1_DeploymentList, - convert_expapi_DeploymentStatus_To_v1_DeploymentStatus, - convert_expapi_Deployment_To_v1_Deployment, - convert_expapi_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, - convert_expapi_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, - convert_expapi_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, - convert_expapi_ReplicationControllerDummy_To_v1_ReplicationControllerDummy, - convert_expapi_ResourceConsumption_To_v1_ResourceConsumption, - convert_expapi_ScaleSpec_To_v1_ScaleSpec, - convert_expapi_ScaleStatus_To_v1_ScaleStatus, - convert_expapi_Scale_To_v1_Scale, - convert_expapi_SubresourceReference_To_v1_SubresourceReference, - convert_expapi_ThirdPartyResourceDataList_To_v1_ThirdPartyResourceDataList, - convert_expapi_ThirdPartyResourceData_To_v1_ThirdPartyResourceData, - convert_expapi_ThirdPartyResourceList_To_v1_ThirdPartyResourceList, - convert_expapi_ThirdPartyResource_To_v1_ThirdPartyResource, - convert_v1_APIVersion_To_expapi_APIVersion, + convert_experimental_APIVersion_To_v1_APIVersion, + convert_experimental_DaemonSetList_To_v1_DaemonSetList, + convert_experimental_DaemonSetSpec_To_v1_DaemonSetSpec, + convert_experimental_DaemonSetStatus_To_v1_DaemonSetStatus, + convert_experimental_DaemonSet_To_v1_DaemonSet, + convert_experimental_DeploymentList_To_v1_DeploymentList, + convert_experimental_DeploymentStatus_To_v1_DeploymentStatus, + convert_experimental_Deployment_To_v1_Deployment, + convert_experimental_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, + convert_experimental_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + convert_experimental_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + convert_experimental_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + convert_experimental_JobCondition_To_v1_JobCondition, + convert_experimental_JobList_To_v1_JobList, + convert_experimental_JobSpec_To_v1_JobSpec, + convert_experimental_JobStatus_To_v1_JobStatus, + convert_experimental_Job_To_v1_Job, + convert_experimental_ReplicationControllerDummy_To_v1_ReplicationControllerDummy, + convert_experimental_ResourceConsumption_To_v1_ResourceConsumption, + convert_experimental_ScaleSpec_To_v1_ScaleSpec, + convert_experimental_ScaleStatus_To_v1_ScaleStatus, + convert_experimental_Scale_To_v1_Scale, + convert_experimental_SubresourceReference_To_v1_SubresourceReference, + convert_experimental_ThirdPartyResourceDataList_To_v1_ThirdPartyResourceDataList, + convert_experimental_ThirdPartyResourceData_To_v1_ThirdPartyResourceData, + convert_experimental_ThirdPartyResourceList_To_v1_ThirdPartyResourceList, + convert_experimental_ThirdPartyResource_To_v1_ThirdPartyResource, + convert_v1_APIVersion_To_experimental_APIVersion, convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, convert_v1_Capabilities_To_api_Capabilities, convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, convert_v1_ContainerPort_To_api_ContainerPort, convert_v1_Container_To_api_Container, - convert_v1_DaemonSetList_To_expapi_DaemonSetList, - convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec, - convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus, - convert_v1_DaemonSet_To_expapi_DaemonSet, - convert_v1_DeploymentList_To_expapi_DeploymentList, - convert_v1_DeploymentStatus_To_expapi_DeploymentStatus, - convert_v1_Deployment_To_expapi_Deployment, + convert_v1_DaemonSetList_To_experimental_DaemonSetList, + convert_v1_DaemonSetSpec_To_experimental_DaemonSetSpec, + convert_v1_DaemonSetStatus_To_experimental_DaemonSetStatus, + convert_v1_DaemonSet_To_experimental_DaemonSet, + convert_v1_DeploymentList_To_experimental_DeploymentList, + convert_v1_DeploymentStatus_To_experimental_DeploymentStatus, + convert_v1_Deployment_To_experimental_Deployment, convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, @@ -2425,12 +2686,17 @@ func init() { convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, convert_v1_HTTPGetAction_To_api_HTTPGetAction, convert_v1_Handler_To_api_Handler, - convert_v1_HorizontalPodAutoscalerList_To_expapi_HorizontalPodAutoscalerList, - convert_v1_HorizontalPodAutoscalerSpec_To_expapi_HorizontalPodAutoscalerSpec, - convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerStatus, - convert_v1_HorizontalPodAutoscaler_To_expapi_HorizontalPodAutoscaler, + convert_v1_HorizontalPodAutoscalerList_To_experimental_HorizontalPodAutoscalerList, + convert_v1_HorizontalPodAutoscalerSpec_To_experimental_HorizontalPodAutoscalerSpec, + convert_v1_HorizontalPodAutoscalerStatus_To_experimental_HorizontalPodAutoscalerStatus, + convert_v1_HorizontalPodAutoscaler_To_experimental_HorizontalPodAutoscaler, convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, + convert_v1_JobCondition_To_experimental_JobCondition, + convert_v1_JobList_To_experimental_JobList, + convert_v1_JobSpec_To_experimental_JobSpec, + convert_v1_JobStatus_To_experimental_JobStatus, + convert_v1_Job_To_experimental_Job, convert_v1_Lifecycle_To_api_Lifecycle, convert_v1_ListMeta_To_api_ListMeta, convert_v1_LocalObjectReference_To_api_LocalObjectReference, @@ -2441,21 +2707,21 @@ func init() { convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, convert_v1_Probe_To_api_Probe, convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - convert_v1_ReplicationControllerDummy_To_expapi_ReplicationControllerDummy, - convert_v1_ResourceConsumption_To_expapi_ResourceConsumption, + convert_v1_ReplicationControllerDummy_To_experimental_ReplicationControllerDummy, + convert_v1_ResourceConsumption_To_experimental_ResourceConsumption, convert_v1_ResourceRequirements_To_api_ResourceRequirements, convert_v1_SELinuxOptions_To_api_SELinuxOptions, - convert_v1_ScaleSpec_To_expapi_ScaleSpec, - convert_v1_ScaleStatus_To_expapi_ScaleStatus, - convert_v1_Scale_To_expapi_Scale, + convert_v1_ScaleSpec_To_experimental_ScaleSpec, + convert_v1_ScaleStatus_To_experimental_ScaleStatus, + convert_v1_Scale_To_experimental_Scale, convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, convert_v1_SecurityContext_To_api_SecurityContext, - convert_v1_SubresourceReference_To_expapi_SubresourceReference, + convert_v1_SubresourceReference_To_experimental_SubresourceReference, convert_v1_TCPSocketAction_To_api_TCPSocketAction, - convert_v1_ThirdPartyResourceDataList_To_expapi_ThirdPartyResourceDataList, - convert_v1_ThirdPartyResourceData_To_expapi_ThirdPartyResourceData, - convert_v1_ThirdPartyResourceList_To_expapi_ThirdPartyResourceList, - convert_v1_ThirdPartyResource_To_expapi_ThirdPartyResource, + convert_v1_ThirdPartyResourceDataList_To_experimental_ThirdPartyResourceDataList, + convert_v1_ThirdPartyResourceData_To_experimental_ThirdPartyResourceData, + convert_v1_ThirdPartyResourceList_To_experimental_ThirdPartyResourceList, + convert_v1_ThirdPartyResource_To_experimental_ThirdPartyResource, convert_v1_TypeMeta_To_api_TypeMeta, convert_v1_VolumeMount_To_api_VolumeMount, convert_v1_VolumeSource_To_api_VolumeSource, diff --git a/pkg/expapi/v1/deep_copy_generated.go b/pkg/apis/experimental/v1/deep_copy_generated.go similarity index 91% rename from pkg/expapi/v1/deep_copy_generated.go rename to pkg/apis/experimental/v1/deep_copy_generated.go index 0e5ba1cb976..bc58c0de5bb 100644 --- a/pkg/expapi/v1/deep_copy_generated.go +++ b/pkg/apis/experimental/v1/deep_copy_generated.go @@ -998,6 +998,121 @@ func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, return nil } +func deepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := deepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := deepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := deepCopy_util_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := deepCopy_util_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := deepCopy_v1_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]Job, len(in.Items)) + for i := range in.Items { + if err := deepCopy_v1_Job(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func deepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { + if in.Parallelism != nil { + out.Parallelism = new(int) + *out.Parallelism = *in.Parallelism + } else { + out.Parallelism = nil + } + if in.Completions != nil { + out.Completions = new(int) + *out.Completions = *in.Completions + } else { + out.Completions = nil + } + if in.Selector != nil { + out.Selector = make(map[string]string) + for key, val := range in.Selector { + out.Selector[key] = val + } + } else { + out.Selector = nil + } + if in.Template != nil { + out.Template = new(v1.PodTemplateSpec) + if err := deepCopy_v1_PodTemplateSpec(*in.Template, out.Template, c); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + out.Conditions = make([]JobCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := deepCopy_v1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + out.StartTime = new(util.Time) + if err := deepCopy_util_Time(*in.StartTime, out.StartTime, c); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + out.CompletionTime = new(util.Time) + if err := deepCopy_util_Time(*in.CompletionTime, out.CompletionTime, c); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Successful = in.Successful + out.Unsuccessful = in.Unsuccessful + return nil +} + func deepCopy_v1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err @@ -1228,6 +1343,11 @@ func init() { deepCopy_v1_HorizontalPodAutoscalerList, deepCopy_v1_HorizontalPodAutoscalerSpec, deepCopy_v1_HorizontalPodAutoscalerStatus, + deepCopy_v1_Job, + deepCopy_v1_JobCondition, + deepCopy_v1_JobList, + deepCopy_v1_JobSpec, + deepCopy_v1_JobStatus, deepCopy_v1_ReplicationControllerDummy, deepCopy_v1_ResourceConsumption, deepCopy_v1_RollingUpdateDeployment, diff --git a/pkg/expapi/v1/defaults.go b/pkg/apis/experimental/v1/defaults.go similarity index 100% rename from pkg/expapi/v1/defaults.go rename to pkg/apis/experimental/v1/defaults.go diff --git a/pkg/expapi/v1/defaults_test.go b/pkg/apis/experimental/v1/defaults_test.go similarity index 100% rename from pkg/expapi/v1/defaults_test.go rename to pkg/apis/experimental/v1/defaults_test.go diff --git a/pkg/expapi/v1/register.go b/pkg/apis/experimental/v1/register.go similarity index 93% rename from pkg/expapi/v1/register.go rename to pkg/apis/experimental/v1/register.go index 30bb3d923d4..88d0514b5a1 100644 --- a/pkg/expapi/v1/register.go +++ b/pkg/apis/experimental/v1/register.go @@ -36,6 +36,8 @@ func addKnownTypes() { &DeploymentList{}, &HorizontalPodAutoscaler{}, &HorizontalPodAutoscalerList{}, + &Job{}, + &JobList{}, &ReplicationControllerDummy{}, &Scale{}, &ThirdPartyResource{}, @@ -51,6 +53,8 @@ func (*Deployment) IsAnAPIObject() {} func (*DeploymentList) IsAnAPIObject() {} func (*HorizontalPodAutoscaler) IsAnAPIObject() {} func (*HorizontalPodAutoscalerList) IsAnAPIObject() {} +func (*Job) IsAnAPIObject() {} +func (*JobList) IsAnAPIObject() {} func (*ReplicationControllerDummy) IsAnAPIObject() {} func (*Scale) IsAnAPIObject() {} func (*ThirdPartyResource) IsAnAPIObject() {} diff --git a/pkg/expapi/v1/types.go b/pkg/apis/experimental/v1/types.go similarity index 80% rename from pkg/expapi/v1/types.go rename to pkg/apis/experimental/v1/types.go index e3d120f9e12..3be346140f4 100644 --- a/pkg/expapi/v1/types.go +++ b/pkg/apis/experimental/v1/types.go @@ -363,3 +363,102 @@ type ThirdPartyResourceDataList struct { // Items is the list of ThirdpartyResourceData. Items []ThirdPartyResourceData `json:"items"` } + +// Job represents the configuration of a single job. +type Job struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty"` +} + +// JobList is a collection of jobs. +type JobList struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of Job. + Items []Job `json:"items"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + Parallelism *int `json:"parallelism,omitempty"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Defaults to 1. + Completions *int `json:"completions,omitempty"` + + // Selector is a label query over pods that should match the pod count. + Selector map[string]string `json:"selector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + Template *v1.PodTemplateSpec `json:"template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + StartTime *util.Time `json:"startTime,omitempty"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + CompletionTime *util.Time `json:"completionTime,omitempty"` + + // Active is the number of actively running pods. + Active int `json:"active,omitempty"` + + // Successful is the number of pods which reached Phase Succeeded. + Successful int `json:"successful,omitempty"` + + // Unsuccessful is the number of pods failures, this applies only to jobs + // created with RestartPolicyNever, otherwise this value will always be 0. + Unsuccessful int `json:"unsuccessful,omitempty"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, currently only Complete. + Type JobConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status"` + // Last time the condition was checked. + LastProbeTime util.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transit from one status to another. + LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} diff --git a/pkg/expapi/v1/types_swagger_doc_generated.go b/pkg/apis/experimental/v1/types_swagger_doc_generated.go similarity index 82% rename from pkg/expapi/v1/types_swagger_doc_generated.go rename to pkg/apis/experimental/v1/types_swagger_doc_generated.go index ced86254859..e43c3244ba3 100644 --- a/pkg/expapi/v1/types_swagger_doc_generated.go +++ b/pkg/apis/experimental/v1/types_swagger_doc_generated.go @@ -178,6 +178,67 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { return map_HorizontalPodAutoscalerStatus } +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, currently only Complete.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Job.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism.", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Defaults to 1.", + "selector": "Selector is a label query over pods that should match the pod count.", + "template": "Template is the object that describes the pod that will be created when executing a job.", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "Conditions represent the latest available observations of an object's current state.", + "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "Active is the number of actively running pods.", + "successful": "Successful is the number of pods which reached Phase Succeeded.", + "unsuccessful": "Unsuccessful is the number of pods failures, this applies only to jobs created with RestartPolicyNever, otherwise this value will always be 0.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + var map_ReplicationControllerDummy = map[string]string{ "": "Dummy definition", } diff --git a/pkg/expapi/validation/validation.go b/pkg/apis/experimental/validation/validation.go similarity index 73% rename from pkg/expapi/validation/validation.go rename to pkg/apis/experimental/validation/validation.go index 2e1c653808d..1a6454fa2cc 100644 --- a/pkg/expapi/validation/validation.go +++ b/pkg/apis/experimental/validation/validation.go @@ -21,13 +21,15 @@ import ( "k8s.io/kubernetes/pkg/api" apivalidation "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" errs "k8s.io/kubernetes/pkg/util/fielderrors" "k8s.io/kubernetes/pkg/util/sets" ) +const isNegativeErrorMsg string = `must be non-negative` + // ValidateHorizontalPodAutoscaler can be used to check whether the given autoscaler name is valid. // Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed. func ValidateHorizontalPodAutoscalerName(name string, prefix bool) (bool, string) { @@ -35,7 +37,7 @@ func ValidateHorizontalPodAutoscalerName(name string, prefix bool) (bool, string return apivalidation.ValidateReplicationControllerName(name, prefix) } -func validateHorizontalPodAutoscalerSpec(autoscaler expapi.HorizontalPodAutoscalerSpec) errs.ValidationErrorList { +func validateHorizontalPodAutoscalerSpec(autoscaler experimental.HorizontalPodAutoscalerSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if autoscaler.MinCount < 0 { allErrs = append(allErrs, errs.NewFieldInvalid("minCount", autoscaler.MinCount, `must be non-negative`)) @@ -57,25 +59,25 @@ func validateHorizontalPodAutoscalerSpec(autoscaler expapi.HorizontalPodAutoscal return allErrs } -func ValidateHorizontalPodAutoscaler(autoscaler *expapi.HorizontalPodAutoscaler) errs.ValidationErrorList { +func ValidateHorizontalPodAutoscaler(autoscaler *experimental.HorizontalPodAutoscaler) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName).Prefix("metadata")...) allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec)...) return allErrs } -func ValidateHorizontalPodAutoscalerUpdate(newAutoscler, oldAutoscaler *expapi.HorizontalPodAutoscaler) errs.ValidationErrorList { +func ValidateHorizontalPodAutoscalerUpdate(newAutoscler, oldAutoscaler *experimental.HorizontalPodAutoscaler) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&newAutoscler.ObjectMeta, &oldAutoscaler.ObjectMeta).Prefix("metadata")...) allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscler.Spec)...) return allErrs } -func ValidateThirdPartyResourceUpdate(old, update *expapi.ThirdPartyResource) errs.ValidationErrorList { +func ValidateThirdPartyResourceUpdate(old, update *experimental.ThirdPartyResource) errs.ValidationErrorList { return ValidateThirdPartyResource(update) } -func ValidateThirdPartyResource(obj *expapi.ThirdPartyResource) errs.ValidationErrorList { +func ValidateThirdPartyResource(obj *experimental.ThirdPartyResource) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if len(obj.Name) == 0 { allErrs = append(allErrs, errs.NewFieldInvalid("name", obj.Name, "name must be non-empty")) @@ -95,7 +97,7 @@ func ValidateThirdPartyResource(obj *expapi.ThirdPartyResource) errs.ValidationE } // ValidateDaemonSet tests if required fields in the DaemonSet are set. -func ValidateDaemonSet(controller *expapi.DaemonSet) errs.ValidationErrorList { +func ValidateDaemonSet(controller *experimental.DaemonSet) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&controller.ObjectMeta, true, apivalidation.ValidateReplicationControllerName).Prefix("metadata")...) allErrs = append(allErrs, ValidateDaemonSetSpec(&controller.Spec).Prefix("spec")...) @@ -103,7 +105,7 @@ func ValidateDaemonSet(controller *expapi.DaemonSet) errs.ValidationErrorList { } // ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. -func ValidateDaemonSetUpdate(oldController, controller *expapi.DaemonSet) errs.ValidationErrorList { +func ValidateDaemonSetUpdate(oldController, controller *experimental.DaemonSet) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta).Prefix("metadata")...) allErrs = append(allErrs, ValidateDaemonSetSpec(&controller.Spec).Prefix("spec")...) @@ -127,7 +129,7 @@ func ValidateDaemonSetTemplateUpdate(oldPodTemplate, podTemplate *api.PodTemplat } // ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. -func ValidateDaemonSetSpec(spec *expapi.DaemonSetSpec) errs.ValidationErrorList { +func ValidateDaemonSetSpec(spec *experimental.DaemonSetSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} selector := labels.Set(spec.Selector).AsSelector() @@ -204,7 +206,7 @@ func IsNotMoreThan100Percent(intOrStringValue util.IntOrString, fieldName string return allErrs } -func ValidateRollingUpdateDeployment(rollingUpdate *expapi.RollingUpdateDeployment, fieldName string) errs.ValidationErrorList { +func ValidateRollingUpdateDeployment(rollingUpdate *experimental.RollingUpdateDeployment, fieldName string) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fieldName+"maxUnavailable")...) allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fieldName+".maxSurge")...) @@ -218,22 +220,22 @@ func ValidateRollingUpdateDeployment(rollingUpdate *expapi.RollingUpdateDeployme return allErrs } -func ValidateDeploymentStrategy(strategy *expapi.DeploymentStrategy, fieldName string) errs.ValidationErrorList { +func ValidateDeploymentStrategy(strategy *experimental.DeploymentStrategy, fieldName string) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if strategy.RollingUpdate == nil { return allErrs } switch strategy.Type { - case expapi.DeploymentRecreate: - allErrs = append(allErrs, errs.NewFieldForbidden("rollingUpdate", "rollingUpdate should be nil when strategy type is "+expapi.DeploymentRecreate)) - case expapi.DeploymentRollingUpdate: + case experimental.DeploymentRecreate: + allErrs = append(allErrs, errs.NewFieldForbidden("rollingUpdate", "rollingUpdate should be nil when strategy type is "+experimental.DeploymentRecreate)) + case experimental.DeploymentRollingUpdate: allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, "rollingUpdate")...) } return allErrs } // Validates given deployment spec. -func ValidateDeploymentSpec(spec *expapi.DeploymentSpec) errs.ValidationErrorList { +func ValidateDeploymentSpec(spec *experimental.DeploymentSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateNonEmptySelector(spec.Selector, "selector")...) allErrs = append(allErrs, apivalidation.ValidatePositiveField(int64(spec.Replicas), "replicas")...) @@ -243,28 +245,75 @@ func ValidateDeploymentSpec(spec *expapi.DeploymentSpec) errs.ValidationErrorLis return allErrs } -func ValidateDeploymentUpdate(old, update *expapi.Deployment) errs.ValidationErrorList { +func ValidateDeploymentUpdate(old, update *experimental.Deployment) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta).Prefix("metadata")...) allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec).Prefix("spec")...) return allErrs } -func ValidateDeployment(obj *expapi.Deployment) errs.ValidationErrorList { +func ValidateDeployment(obj *experimental.Deployment) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName).Prefix("metadata")...) allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec).Prefix("spec")...) return allErrs } -func ValidateThirdPartyResourceDataUpdate(old, update *expapi.ThirdPartyResourceData) errs.ValidationErrorList { +func ValidateThirdPartyResourceDataUpdate(old, update *experimental.ThirdPartyResourceData) errs.ValidationErrorList { return ValidateThirdPartyResourceData(update) } -func ValidateThirdPartyResourceData(obj *expapi.ThirdPartyResourceData) errs.ValidationErrorList { +func ValidateThirdPartyResourceData(obj *experimental.ThirdPartyResourceData) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if len(obj.Name) == 0 { allErrs = append(allErrs, errs.NewFieldInvalid("name", obj.Name, "name must be non-empty")) } return allErrs } + +func ValidateJob(job *experimental.Job) errs.ValidationErrorList { + allErrs := errs.ValidationErrorList{} + // Jobs and rcs have the same name validation + allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&job.ObjectMeta, true, apivalidation.ValidateReplicationControllerName).Prefix("metadata")...) + allErrs = append(allErrs, ValidateJobSpec(&job.Spec).Prefix("spec")...) + return allErrs +} + +func ValidateJobSpec(spec *experimental.JobSpec) errs.ValidationErrorList { + allErrs := errs.ValidationErrorList{} + + if spec.Parallelism != nil && *spec.Parallelism < 0 { + allErrs = append(allErrs, errs.NewFieldInvalid("parallelism", spec.Parallelism, isNegativeErrorMsg)) + } + if spec.Completions != nil && *spec.Completions < 0 { + allErrs = append(allErrs, errs.NewFieldInvalid("completions", spec.Completions, isNegativeErrorMsg)) + } + + selector := labels.Set(spec.Selector).AsSelector() + if selector.Empty() { + allErrs = append(allErrs, errs.NewFieldRequired("selector")) + } + + if spec.Template == nil { + allErrs = append(allErrs, errs.NewFieldRequired("template")) + } else { + labels := labels.Set(spec.Template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, errs.NewFieldInvalid("template.labels", spec.Template.Labels, "selector does not match template")) + } + allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(spec.Template).Prefix("template")...) + if spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure && + spec.Template.Spec.RestartPolicy != api.RestartPolicyNever { + allErrs = append(allErrs, errs.NewFieldValueNotSupported("template.spec.restartPolicy", + spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)})) + } + } + return allErrs +} + +func ValidateJobUpdate(oldJob, job *experimental.Job) errs.ValidationErrorList { + allErrs := errs.ValidationErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&oldJob.ObjectMeta, &job.ObjectMeta).Prefix("metadata")...) + allErrs = append(allErrs, ValidateJobSpec(&job.Spec).Prefix("spec")...) + return allErrs +} diff --git a/pkg/expapi/validation/validation_test.go b/pkg/apis/experimental/validation/validation_test.go similarity index 67% rename from pkg/expapi/validation/validation_test.go rename to pkg/apis/experimental/validation/validation_test.go index 91c562feb71..6cfd1e8d72e 100644 --- a/pkg/expapi/validation/validation_test.go +++ b/pkg/apis/experimental/validation/validation_test.go @@ -22,25 +22,25 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/util" errors "k8s.io/kubernetes/pkg/util/fielderrors" ) func TestValidateHorizontalPodAutoscaler(t *testing.T) { - successCases := []expapi.HorizontalPodAutoscaler{ + successCases := []experimental.HorizontalPodAutoscaler{ { ObjectMeta: api.ObjectMeta{ Name: "myautoscaler", Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Subresource: "scale", }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, }, } @@ -50,19 +50,19 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { } } - errorCases := map[string]expapi.HorizontalPodAutoscaler{ + errorCases := map[string]experimental.HorizontalPodAutoscaler{ "must be non-negative": { ObjectMeta: api.ObjectMeta{ Name: "myautoscaler", Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Subresource: "scale", }, MinCount: -1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, }, "must be bigger or equal to minCount": { @@ -70,13 +70,13 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myautoscaler", Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Subresource: "scale", }, MinCount: 7, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, }, "invalid value": { @@ -84,13 +84,13 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myautoscaler", Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Subresource: "scale", }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("-0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("-0.8")}, }, }, "resource not supported": { @@ -98,13 +98,13 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myautoscaler", Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Subresource: "scale", }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceName("NotSupportedResource"), Quantity: resource.MustParse("0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceName("NotSupportedResource"), Quantity: resource.MustParse("0.8")}, }, }, "required value": { @@ -112,10 +112,10 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myautoscaler", Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ + Spec: experimental.HorizontalPodAutoscalerSpec{ MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, }, } @@ -212,53 +212,53 @@ func TestValidateDaemonSetUpdate(t *testing.T) { } type dsUpdateTest struct { - old expapi.DaemonSet - update expapi.DaemonSet + old experimental.DaemonSet + update experimental.DaemonSet } successCases := []dsUpdateTest{ { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, }, { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector2, Template: &validPodTemplateAbc2.Template, }, }, }, { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateNodeSelector.Template, }, @@ -274,80 +274,80 @@ func TestValidateDaemonSetUpdate(t *testing.T) { } errorCases := map[string]dsUpdateTest{ "change daemon name": { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, }, "invalid selector": { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: invalidSelector, Template: &validPodTemplateAbc.Template, }, }, }, "invalid pod": { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &invalidPodTemplate.Template, }, }, }, "change container image": { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateDef.Template, }, }, }, "read-write volume": { - old: expapi.DaemonSet{ + old: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.DaemonSet{ + update: experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &readWriteVolumePodTemplate.Template, }, @@ -387,17 +387,17 @@ func TestValidateDaemonSet(t *testing.T) { }, }, } - successCases := []expapi.DaemonSet{ + successCases := []experimental.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, { ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, @@ -409,37 +409,37 @@ func TestValidateDaemonSet(t *testing.T) { } } - errorCases := map[string]expapi.DaemonSet{ + errorCases := map[string]experimental.DaemonSet{ "zero-length ID": { ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, "missing-namespace": { ObjectMeta: api.ObjectMeta{Name: "abc-123"}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, "empty selector": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Template: &validPodTemplate.Template, }, }, "selector_doesnt_match": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: map[string]string{"foo": "bar"}, Template: &validPodTemplate.Template, }, }, "invalid manifest": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, }, }, @@ -451,7 +451,7 @@ func TestValidateDaemonSet(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, @@ -464,7 +464,7 @@ func TestValidateDaemonSet(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Template: &invalidPodTemplate.Template, }, }, @@ -476,7 +476,7 @@ func TestValidateDaemonSet(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, @@ -486,7 +486,7 @@ func TestValidateDaemonSet(t *testing.T) { Name: "abc-123", Namespace: api.NamespaceDefault, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &api.PodTemplateSpec{ Spec: api.PodSpec{ @@ -505,7 +505,7 @@ func TestValidateDaemonSet(t *testing.T) { Name: "abc-123", Namespace: api.NamespaceDefault, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: validSelector, Template: &api.PodTemplateSpec{ Spec: api.PodSpec{ @@ -542,13 +542,13 @@ func TestValidateDaemonSet(t *testing.T) { } } -func validDeployment() *expapi.Deployment { - return &expapi.Deployment{ +func validDeployment() *experimental.Deployment { + return &experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: api.NamespaceDefault, }, - Spec: expapi.DeploymentSpec{ + Spec: experimental.DeploymentSpec{ Selector: map[string]string{ "name": "abc", }, @@ -578,7 +578,7 @@ func validDeployment() *expapi.Deployment { } func TestValidateDeployment(t *testing.T) { - successCases := []*expapi.Deployment{ + successCases := []*experimental.Deployment{ validDeployment(), } for _, successCase := range successCases { @@ -587,8 +587,8 @@ func TestValidateDeployment(t *testing.T) { } } - errorCases := map[string]*expapi.Deployment{} - errorCases["metadata.name: required value"] = &expapi.Deployment{ + errorCases := map[string]*experimental.Deployment{} + errorCases["metadata.name: required value"] = &experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, }, @@ -612,17 +612,17 @@ func TestValidateDeployment(t *testing.T) { // rollingUpdate should be nil for recreate. invalidRecreateDeployment := validDeployment() - invalidRecreateDeployment.Spec.Strategy = expapi.DeploymentStrategy{ - Type: expapi.DeploymentRecreate, - RollingUpdate: &expapi.RollingUpdateDeployment{}, + invalidRecreateDeployment.Spec.Strategy = experimental.DeploymentStrategy{ + Type: experimental.DeploymentRecreate, + RollingUpdate: &experimental.RollingUpdateDeployment{}, } errorCases["rollingUpdate should be nil when strategy type is Recreate"] = invalidRecreateDeployment // MaxSurge should be in the form of 20%. invalidMaxSurgeDeployment := validDeployment() - invalidMaxSurgeDeployment.Spec.Strategy = expapi.DeploymentStrategy{ - Type: expapi.DeploymentRollingUpdate, - RollingUpdate: &expapi.RollingUpdateDeployment{ + invalidMaxSurgeDeployment.Spec.Strategy = experimental.DeploymentStrategy{ + Type: experimental.DeploymentRollingUpdate, + RollingUpdate: &experimental.RollingUpdateDeployment{ MaxSurge: util.NewIntOrStringFromString("20Percent"), }, } @@ -630,9 +630,9 @@ func TestValidateDeployment(t *testing.T) { // MaxSurge and MaxUnavailable cannot both be zero. invalidRollingUpdateDeployment := validDeployment() - invalidRollingUpdateDeployment.Spec.Strategy = expapi.DeploymentStrategy{ - Type: expapi.DeploymentRollingUpdate, - RollingUpdate: &expapi.RollingUpdateDeployment{ + invalidRollingUpdateDeployment.Spec.Strategy = experimental.DeploymentStrategy{ + Type: experimental.DeploymentRollingUpdate, + RollingUpdate: &experimental.RollingUpdateDeployment{ MaxSurge: util.NewIntOrStringFromString("0%"), MaxUnavailable: util.NewIntOrStringFromInt(0), }, @@ -641,9 +641,9 @@ func TestValidateDeployment(t *testing.T) { // MaxUnavailable should not be more than 100%. invalidMaxUnavailableDeployment := validDeployment() - invalidMaxUnavailableDeployment.Spec.Strategy = expapi.DeploymentStrategy{ - Type: expapi.DeploymentRollingUpdate, - RollingUpdate: &expapi.RollingUpdateDeployment{ + invalidMaxUnavailableDeployment.Spec.Strategy = experimental.DeploymentStrategy{ + Type: experimental.DeploymentRollingUpdate, + RollingUpdate: &experimental.RollingUpdateDeployment{ MaxUnavailable: util.NewIntOrStringFromString("110%"), }, } @@ -658,3 +658,129 @@ func TestValidateDeployment(t *testing.T) { } } } + +func TestValidateJob(t *testing.T) { + validSelector := map[string]string{"a": "b"} + validPodTemplateSpec := api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + } + successCases := []experimental.Job{ + { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Selector: validSelector, + Template: &validPodTemplateSpec, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateJob(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + negative := -1 + errorCases := map[string]experimental.Job{ + "spec.parallelism:must be non-negative": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Parallelism: &negative, + Selector: validSelector, + Template: &validPodTemplateSpec, + }, + }, + "spec.completions:must be non-negative": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Completions: &negative, + Selector: validSelector, + Template: &validPodTemplateSpec, + }, + }, + "spec.selector:required value": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Selector: map[string]string{}, + Template: &validPodTemplateSpec, + }, + }, + "spec.template:required value": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Selector: validSelector, + }, + }, + "spec.template.labels:selector does not match template": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Selector: validSelector, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"y": "z"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + "spec.template.spec.restartPolicy:unsupported value": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + }, + Spec: experimental.JobSpec{ + Selector: validSelector, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + } + + for k, v := range errorCases { + errs := ValidateJob(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } else { + s := strings.Split(k, ":") + err := errs[0].(*errors.ValidationError) + if err.Field != s[0] || !strings.Contains(err.Error(), s[1]) { + t.Errorf("unexpected error: %v, expected: %s", errs[0], k) + } + } + } +} diff --git a/pkg/client/unversioned/cache/delta_fifo.go b/pkg/client/cache/delta_fifo.go similarity index 100% rename from pkg/client/unversioned/cache/delta_fifo.go rename to pkg/client/cache/delta_fifo.go diff --git a/pkg/client/unversioned/cache/delta_fifo_test.go b/pkg/client/cache/delta_fifo_test.go similarity index 100% rename from pkg/client/unversioned/cache/delta_fifo_test.go rename to pkg/client/cache/delta_fifo_test.go diff --git a/pkg/client/unversioned/cache/doc.go b/pkg/client/cache/doc.go similarity index 100% rename from pkg/client/unversioned/cache/doc.go rename to pkg/client/cache/doc.go diff --git a/pkg/client/unversioned/cache/expiration_cache.go b/pkg/client/cache/expiration_cache.go similarity index 100% rename from pkg/client/unversioned/cache/expiration_cache.go rename to pkg/client/cache/expiration_cache.go diff --git a/pkg/client/unversioned/cache/expiration_cache_fakes.go b/pkg/client/cache/expiration_cache_fakes.go similarity index 100% rename from pkg/client/unversioned/cache/expiration_cache_fakes.go rename to pkg/client/cache/expiration_cache_fakes.go diff --git a/pkg/client/unversioned/cache/expiration_cache_test.go b/pkg/client/cache/expiration_cache_test.go similarity index 100% rename from pkg/client/unversioned/cache/expiration_cache_test.go rename to pkg/client/cache/expiration_cache_test.go diff --git a/pkg/client/unversioned/cache/fifo.go b/pkg/client/cache/fifo.go similarity index 100% rename from pkg/client/unversioned/cache/fifo.go rename to pkg/client/cache/fifo.go diff --git a/pkg/client/unversioned/cache/fifo_test.go b/pkg/client/cache/fifo_test.go similarity index 100% rename from pkg/client/unversioned/cache/fifo_test.go rename to pkg/client/cache/fifo_test.go diff --git a/pkg/client/unversioned/cache/index.go b/pkg/client/cache/index.go similarity index 100% rename from pkg/client/unversioned/cache/index.go rename to pkg/client/cache/index.go diff --git a/pkg/client/unversioned/cache/index_test.go b/pkg/client/cache/index_test.go similarity index 100% rename from pkg/client/unversioned/cache/index_test.go rename to pkg/client/cache/index_test.go diff --git a/pkg/client/unversioned/cache/listers.go b/pkg/client/cache/listers.go similarity index 96% rename from pkg/client/unversioned/cache/listers.go rename to pkg/client/cache/listers.go index cba3e5b3003..0bb917f69a1 100644 --- a/pkg/client/unversioned/cache/listers.go +++ b/pkg/client/cache/listers.go @@ -21,7 +21,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/labels" ) @@ -232,7 +232,7 @@ type StoreToDaemonSetLister struct { } // Exists checks if the given daemon set exists in the store. -func (s *StoreToDaemonSetLister) Exists(ds *expapi.DaemonSet) (bool, error) { +func (s *StoreToDaemonSetLister) Exists(ds *experimental.DaemonSet) (bool, error) { _, exists, err := s.Store.Get(ds) if err != nil { return false, err @@ -242,17 +242,17 @@ func (s *StoreToDaemonSetLister) Exists(ds *expapi.DaemonSet) (bool, error) { // List lists all daemon sets in the store. // TODO: converge on the interface in pkg/client -func (s *StoreToDaemonSetLister) List() (dss []expapi.DaemonSet, err error) { +func (s *StoreToDaemonSetLister) List() (dss []experimental.DaemonSet, err error) { for _, c := range s.Store.List() { - dss = append(dss, *(c.(*expapi.DaemonSet))) + dss = append(dss, *(c.(*experimental.DaemonSet))) } return dss, nil } // GetPodDaemonSets returns a list of daemon sets managing a pod. Returns an error iff no matching daemon sets are found. -func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []expapi.DaemonSet, err error) { +func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []experimental.DaemonSet, err error) { var selector labels.Selector - var daemonSet expapi.DaemonSet + var daemonSet experimental.DaemonSet if len(pod.Labels) == 0 { err = fmt.Errorf("No daemon sets found for pod %v because it has no labels", pod.Name) @@ -260,7 +260,7 @@ func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []ex } for _, m := range s.Store.List() { - daemonSet = *m.(*expapi.DaemonSet) + daemonSet = *m.(*experimental.DaemonSet) if daemonSet.Namespace != pod.Namespace { continue } diff --git a/pkg/client/unversioned/cache/listers_test.go b/pkg/client/cache/listers_test.go similarity index 92% rename from pkg/client/unversioned/cache/listers_test.go rename to pkg/client/cache/listers_test.go index f9505d26136..79098254a9e 100644 --- a/pkg/client/unversioned/cache/listers_test.go +++ b/pkg/client/cache/listers_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" ) @@ -160,44 +160,44 @@ func TestStoreToDaemonSetLister(t *testing.T) { store := NewStore(MetaNamespaceKeyFunc) lister := StoreToDaemonSetLister{store} testCases := []struct { - inDSs []*expapi.DaemonSet - list func() ([]expapi.DaemonSet, error) + inDSs []*experimental.DaemonSet + list func() ([]experimental.DaemonSet, error) outDaemonSetNames sets.String expectErr bool }{ // Basic listing { - inDSs: []*expapi.DaemonSet{ + inDSs: []*experimental.DaemonSet{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}, }, - list: func() ([]expapi.DaemonSet, error) { + list: func() ([]experimental.DaemonSet, error) { return lister.List() }, outDaemonSetNames: sets.NewString("basic"), }, // Listing multiple daemon sets { - inDSs: []*expapi.DaemonSet{ + inDSs: []*experimental.DaemonSet{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}, {ObjectMeta: api.ObjectMeta{Name: "complex"}}, {ObjectMeta: api.ObjectMeta{Name: "complex2"}}, }, - list: func() ([]expapi.DaemonSet, error) { + list: func() ([]experimental.DaemonSet, error) { return lister.List() }, outDaemonSetNames: sets.NewString("basic", "complex", "complex2"), }, // No pod labels { - inDSs: []*expapi.DaemonSet{ + inDSs: []*experimental.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: map[string]string{"foo": "baz"}, }, }, }, - list: func() ([]expapi.DaemonSet, error) { + list: func() ([]experimental.DaemonSet, error) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "pod1", Namespace: "ns"}, } @@ -208,12 +208,12 @@ func TestStoreToDaemonSetLister(t *testing.T) { }, // No DS selectors { - inDSs: []*expapi.DaemonSet{ + inDSs: []*experimental.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, }, }, - list: func() ([]expapi.DaemonSet, error) { + list: func() ([]experimental.DaemonSet, error) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod1", @@ -228,21 +228,21 @@ func TestStoreToDaemonSetLister(t *testing.T) { }, // Matching labels to selectors and namespace { - inDSs: []*expapi.DaemonSet{ + inDSs: []*experimental.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: map[string]string{"foo": "bar"}, }, }, { ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, - list: func() ([]expapi.DaemonSet, error) { + list: func() ([]experimental.DaemonSet, error) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod1", diff --git a/pkg/client/unversioned/cache/listwatch.go b/pkg/client/cache/listwatch.go similarity index 100% rename from pkg/client/unversioned/cache/listwatch.go rename to pkg/client/cache/listwatch.go diff --git a/pkg/client/unversioned/cache/listwatch_test.go b/pkg/client/cache/listwatch_test.go similarity index 100% rename from pkg/client/unversioned/cache/listwatch_test.go rename to pkg/client/cache/listwatch_test.go diff --git a/pkg/client/unversioned/cache/reflector.go b/pkg/client/cache/reflector.go similarity index 100% rename from pkg/client/unversioned/cache/reflector.go rename to pkg/client/cache/reflector.go diff --git a/pkg/client/unversioned/cache/reflector_test.go b/pkg/client/cache/reflector_test.go similarity index 100% rename from pkg/client/unversioned/cache/reflector_test.go rename to pkg/client/cache/reflector_test.go diff --git a/pkg/client/unversioned/cache/store.go b/pkg/client/cache/store.go similarity index 100% rename from pkg/client/unversioned/cache/store.go rename to pkg/client/cache/store.go diff --git a/pkg/client/unversioned/cache/store_test.go b/pkg/client/cache/store_test.go similarity index 100% rename from pkg/client/unversioned/cache/store_test.go rename to pkg/client/cache/store_test.go diff --git a/pkg/client/unversioned/cache/thread_safe_store.go b/pkg/client/cache/thread_safe_store.go similarity index 100% rename from pkg/client/unversioned/cache/thread_safe_store.go rename to pkg/client/cache/thread_safe_store.go diff --git a/pkg/client/unversioned/cache/undelta_store.go b/pkg/client/cache/undelta_store.go similarity index 100% rename from pkg/client/unversioned/cache/undelta_store.go rename to pkg/client/cache/undelta_store.go diff --git a/pkg/client/unversioned/cache/undelta_store_test.go b/pkg/client/cache/undelta_store_test.go similarity index 100% rename from pkg/client/unversioned/cache/undelta_store_test.go rename to pkg/client/cache/undelta_store_test.go diff --git a/pkg/client/unversioned/record/doc.go b/pkg/client/record/doc.go similarity index 100% rename from pkg/client/unversioned/record/doc.go rename to pkg/client/record/doc.go diff --git a/pkg/client/unversioned/record/event.go b/pkg/client/record/event.go similarity index 100% rename from pkg/client/unversioned/record/event.go rename to pkg/client/record/event.go diff --git a/pkg/client/unversioned/record/event_test.go b/pkg/client/record/event_test.go similarity index 97% rename from pkg/client/unversioned/record/event_test.go rename to pkg/client/record/event_test.go index 90bb082496e..5234513c466 100644 --- a/pkg/client/unversioned/record/event_test.go +++ b/pkg/client/record/event_test.go @@ -21,6 +21,7 @@ import ( "reflect" "strconv" "strings" + "sync" "testing" "k8s.io/kubernetes/pkg/api" @@ -271,39 +272,41 @@ func TestEventf(t *testing.T) { } for _, item := range table { - called := make(chan struct{}) + var wg sync.WaitGroup + // We expect only one callback + wg.Add(1) testEvents := testEventSink{ OnCreate: func(event *api.Event) (*api.Event, error) { + defer wg.Done() returnEvent, _ := validateEvent(event, item.expect, t) if item.expectUpdate { t.Errorf("Expected event update(), got event create()") } - called <- struct{}{} return returnEvent, nil }, OnUpdate: func(event *api.Event) (*api.Event, error) { + defer wg.Done() returnEvent, _ := validateEvent(event, item.expect, t) if !item.expectUpdate { t.Errorf("Expected event create(), got event update()") } - called <- struct{}{} return returnEvent, nil }, } eventBroadcaster := NewBroadcaster() sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) logWatcher1 := eventBroadcaster.StartLogging(t.Logf) // Prove that it is useful + wg.Add(1) logWatcher2 := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) { + defer wg.Done() if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a { t.Errorf("Expected '%v', got '%v'", e, a) } - called <- struct{}{} }) recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "eventTest"}) recorder.Eventf(item.obj, item.reason, item.messageFmt, item.elements...) - <-called - <-called + wg.Wait() sinkWatcher.Stop() logWatcher1.Stop() logWatcher2.Stop() @@ -316,17 +319,17 @@ func validateEvent(actualEvent *api.Event, expectedEvent *api.Event, t *testing. if actualEvent.FirstTimestamp.IsZero() || actualEvent.LastTimestamp.IsZero() { t.Errorf("timestamp wasn't set: %#v", *actualEvent) } - if actualEvent.FirstTimestamp.Equal(actualEvent.LastTimestamp) { - if expectCompression { - t.Errorf("FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurrence of the event, but were different. Actual Event: %#v", actualEvent.FirstTimestamp, actualEvent.LastTimestamp, *actualEvent) - } - } else { - if !expectCompression { - t.Errorf("FirstTimestamp (%q) and LastTimestamp (%q) must be different to indicate event compression happened, but were the same. Actual Event: %#v", actualEvent.FirstTimestamp, actualEvent.LastTimestamp, *actualEvent) - } - } actualFirstTimestamp := actualEvent.FirstTimestamp actualLastTimestamp := actualEvent.LastTimestamp + if actualFirstTimestamp.Equal(actualLastTimestamp) { + if expectCompression { + t.Errorf("FirstTimestamp (%q) and LastTimestamp (%q) must be different to indicate event compression happened, but were the same. Actual Event: %#v", actualFirstTimestamp, actualLastTimestamp, *actualEvent) + } + } else { + if expectedEvent.Count == 1 { + t.Errorf("FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurrence of the event, but were different. Actual Event: %#v", actualFirstTimestamp, actualLastTimestamp, *actualEvent) + } + } // Temp clear time stamps for comparison because actual values don't matter for comparison actualEvent.FirstTimestamp = expectedEvent.FirstTimestamp actualEvent.LastTimestamp = expectedEvent.LastTimestamp diff --git a/pkg/client/unversioned/record/events_cache.go b/pkg/client/record/events_cache.go similarity index 100% rename from pkg/client/unversioned/record/events_cache.go rename to pkg/client/record/events_cache.go diff --git a/pkg/client/unversioned/record/events_cache_test.go b/pkg/client/record/events_cache_test.go similarity index 100% rename from pkg/client/unversioned/record/events_cache_test.go rename to pkg/client/record/events_cache_test.go diff --git a/pkg/client/unversioned/record/fake.go b/pkg/client/record/fake.go similarity index 100% rename from pkg/client/unversioned/record/fake.go rename to pkg/client/record/fake.go diff --git a/pkg/client/unversioned/daemon_sets.go b/pkg/client/unversioned/daemon_sets.go index 321b34cc465..857ecaf5c94 100644 --- a/pkg/client/unversioned/daemon_sets.go +++ b/pkg/client/unversioned/daemon_sets.go @@ -17,7 +17,7 @@ limitations under the License. package unversioned import ( - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -29,10 +29,10 @@ type DaemonSetsNamespacer interface { } type DaemonSetInterface interface { - List(selector labels.Selector) (*expapi.DaemonSetList, error) - Get(name string) (*expapi.DaemonSet, error) - Create(ctrl *expapi.DaemonSet) (*expapi.DaemonSet, error) - Update(ctrl *expapi.DaemonSet) (*expapi.DaemonSet, error) + List(selector labels.Selector) (*experimental.DaemonSetList, error) + Get(name string) (*experimental.DaemonSet, error) + Create(ctrl *experimental.DaemonSet) (*experimental.DaemonSet, error) + Update(ctrl *experimental.DaemonSet) (*experimental.DaemonSet, error) Delete(name string) error Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) } @@ -50,29 +50,29 @@ func newDaemonSets(c *ExperimentalClient, namespace string) *daemonSets { // Ensure statically that daemonSets implements DaemonSetsInterface. var _ DaemonSetInterface = &daemonSets{} -func (c *daemonSets) List(selector labels.Selector) (result *expapi.DaemonSetList, err error) { - result = &expapi.DaemonSetList{} +func (c *daemonSets) List(selector labels.Selector) (result *experimental.DaemonSetList, err error) { + result = &experimental.DaemonSetList{} err = c.r.Get().Namespace(c.ns).Resource("daemonsets").LabelsSelectorParam(selector).Do().Into(result) return } // Get returns information about a particular daemon set. -func (c *daemonSets) Get(name string) (result *expapi.DaemonSet, err error) { - result = &expapi.DaemonSet{} +func (c *daemonSets) Get(name string) (result *experimental.DaemonSet, err error) { + result = &experimental.DaemonSet{} err = c.r.Get().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Into(result) return } // Create creates a new daemon set. -func (c *daemonSets) Create(daemon *expapi.DaemonSet) (result *expapi.DaemonSet, err error) { - result = &expapi.DaemonSet{} +func (c *daemonSets) Create(daemon *experimental.DaemonSet) (result *experimental.DaemonSet, err error) { + result = &experimental.DaemonSet{} err = c.r.Post().Namespace(c.ns).Resource("daemonsets").Body(daemon).Do().Into(result) return } // Update updates an existing daemon set. -func (c *daemonSets) Update(daemon *expapi.DaemonSet) (result *expapi.DaemonSet, err error) { - result = &expapi.DaemonSet{} +func (c *daemonSets) Update(daemon *experimental.DaemonSet) (result *experimental.DaemonSet, err error) { + result = &experimental.DaemonSet{} err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).Body(daemon).Do().Into(result) return } diff --git a/pkg/client/unversioned/daemon_sets_test.go b/pkg/client/unversioned/daemon_sets_test.go index 6df19940149..f556f6b689f 100644 --- a/pkg/client/unversioned/daemon_sets_test.go +++ b/pkg/client/unversioned/daemon_sets_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/labels" ) @@ -37,8 +37,8 @@ func TestListDaemonSets(t *testing.T) { Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, ""), }, Response: Response{StatusCode: 200, - Body: &expapi.DaemonSetList{ - Items: []expapi.DaemonSet{ + Body: &experimental.DaemonSetList{ + Items: []experimental.DaemonSet{ { ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -47,7 +47,7 @@ func TestListDaemonSets(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, @@ -66,7 +66,7 @@ func TestGetDaemonSet(t *testing.T) { Request: testRequest{Method: "GET", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, - Body: &expapi.DaemonSet{ + Body: &experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ @@ -74,7 +74,7 @@ func TestGetDaemonSet(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, @@ -97,14 +97,14 @@ func TestGetDaemonSetWithNoName(t *testing.T) { func TestUpdateDaemonSet(t *testing.T) { ns := api.NamespaceDefault - requestDaemonSet := &expapi.DaemonSet{ + requestDaemonSet := &experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, } c := &testClient{ Request: testRequest{Method: "PUT", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, - Body: &expapi.DaemonSet{ + Body: &experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ @@ -112,7 +112,7 @@ func TestUpdateDaemonSet(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, @@ -134,14 +134,14 @@ func TestDeleteDaemon(t *testing.T) { func TestCreateDaemonSet(t *testing.T) { ns := api.NamespaceDefault - requestDaemonSet := &expapi.DaemonSet{ + requestDaemonSet := &experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "foo"}, } c := &testClient{ Request: testRequest{Method: "POST", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, ""), Body: requestDaemonSet, Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, - Body: &expapi.DaemonSet{ + Body: &experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ @@ -149,7 +149,7 @@ func TestCreateDaemonSet(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, diff --git a/pkg/client/unversioned/deployment.go b/pkg/client/unversioned/deployment.go index 71b81b73c8e..a4e2924858a 100644 --- a/pkg/client/unversioned/deployment.go +++ b/pkg/client/unversioned/deployment.go @@ -18,7 +18,7 @@ package unversioned import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -31,11 +31,11 @@ type DeploymentsNamespacer interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - List(label labels.Selector, field fields.Selector) (*expapi.DeploymentList, error) - Get(name string) (*expapi.Deployment, error) + List(label labels.Selector, field fields.Selector) (*experimental.DeploymentList, error) + Get(name string) (*experimental.Deployment, error) Delete(name string, options *api.DeleteOptions) error - Create(Deployment *expapi.Deployment) (*expapi.Deployment, error) - Update(Deployment *expapi.Deployment) (*expapi.Deployment, error) + Create(Deployment *experimental.Deployment) (*experimental.Deployment, error) + Update(Deployment *experimental.Deployment) (*experimental.Deployment, error) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) } @@ -54,15 +54,15 @@ func newDeployments(c *ExperimentalClient, namespace string) *deployments { } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(label labels.Selector, field fields.Selector) (result *expapi.DeploymentList, err error) { - result = &expapi.DeploymentList{} +func (c *deployments) List(label labels.Selector, field fields.Selector) (result *experimental.DeploymentList, err error) { + result = &experimental.DeploymentList{} err = c.client.Get().Namespace(c.ns).Resource("deployments").LabelsSelectorParam(label).FieldsSelectorParam(field).Do().Into(result) return } // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string) (result *expapi.Deployment, err error) { - result = &expapi.Deployment{} +func (c *deployments) Get(name string) (result *experimental.Deployment, err error) { + result = &experimental.Deployment{} err = c.client.Get().Namespace(c.ns).Resource("deployments").Name(name).Do().Into(result) return } @@ -80,15 +80,15 @@ func (c *deployments) Delete(name string, options *api.DeleteOptions) error { } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *expapi.Deployment) (result *expapi.Deployment, err error) { - result = &expapi.Deployment{} +func (c *deployments) Create(deployment *experimental.Deployment) (result *experimental.Deployment, err error) { + result = &experimental.Deployment{} err = c.client.Post().Namespace(c.ns).Resource("deployments").Body(deployment).Do().Into(result) return } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *expapi.Deployment) (result *expapi.Deployment, err error) { - result = &expapi.Deployment{} +func (c *deployments) Update(deployment *experimental.Deployment) (result *experimental.Deployment, err error) { + result = &experimental.Deployment{} err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).Body(deployment).Do().Into(result) return } diff --git a/pkg/client/unversioned/deployment_test.go b/pkg/client/unversioned/deployment_test.go index 352479de238..6d41c18248b 100644 --- a/pkg/client/unversioned/deployment_test.go +++ b/pkg/client/unversioned/deployment_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" ) @@ -33,7 +33,7 @@ func getDeploymentsResoureName() string { func TestDeploymentCreate(t *testing.T) { ns := api.NamespaceDefault - deployment := expapi.Deployment{ + deployment := experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: ns, @@ -58,7 +58,7 @@ func TestDeploymentCreate(t *testing.T) { func TestDeploymentGet(t *testing.T) { ns := api.NamespaceDefault - deployment := &expapi.Deployment{ + deployment := &experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: ns, @@ -80,8 +80,8 @@ func TestDeploymentGet(t *testing.T) { func TestDeploymentList(t *testing.T) { ns := api.NamespaceDefault - deploymentList := &expapi.DeploymentList{ - Items: []expapi.Deployment{ + deploymentList := &experimental.DeploymentList{ + Items: []experimental.Deployment{ { ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -105,7 +105,7 @@ func TestDeploymentList(t *testing.T) { func TestDeploymentUpdate(t *testing.T) { ns := api.NamespaceDefault - deployment := &expapi.Deployment{ + deployment := &experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: ns, diff --git a/pkg/client/unversioned/experimental.go b/pkg/client/unversioned/experimental.go index f23083bd571..00f33daa1c0 100644 --- a/pkg/client/unversioned/experimental.go +++ b/pkg/client/unversioned/experimental.go @@ -22,7 +22,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api" - explatest "k8s.io/kubernetes/pkg/expapi/latest" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/version" ) diff --git a/pkg/client/unversioned/horizontalpodautoscaler.go b/pkg/client/unversioned/horizontalpodautoscaler.go index 8c6a743c411..94f8030b64a 100644 --- a/pkg/client/unversioned/horizontalpodautoscaler.go +++ b/pkg/client/unversioned/horizontalpodautoscaler.go @@ -18,7 +18,7 @@ package unversioned import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -31,11 +31,11 @@ type HorizontalPodAutoscalersNamespacer interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - List(label labels.Selector, field fields.Selector) (*expapi.HorizontalPodAutoscalerList, error) - Get(name string) (*expapi.HorizontalPodAutoscaler, error) + List(label labels.Selector, field fields.Selector) (*experimental.HorizontalPodAutoscalerList, error) + Get(name string) (*experimental.HorizontalPodAutoscaler, error) Delete(name string, options *api.DeleteOptions) error - Create(horizontalPodAutoscaler *expapi.HorizontalPodAutoscaler) (*expapi.HorizontalPodAutoscaler, error) - Update(horizontalPodAutoscaler *expapi.HorizontalPodAutoscaler) (*expapi.HorizontalPodAutoscaler, error) + Create(horizontalPodAutoscaler *experimental.HorizontalPodAutoscaler) (*experimental.HorizontalPodAutoscaler, error) + Update(horizontalPodAutoscaler *experimental.HorizontalPodAutoscaler) (*experimental.HorizontalPodAutoscaler, error) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) } @@ -54,15 +54,15 @@ func newHorizontalPodAutoscalers(c *ExperimentalClient, namespace string) *horiz } // List takes label and field selectors, and returns the list of horizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(label labels.Selector, field fields.Selector) (result *expapi.HorizontalPodAutoscalerList, err error) { - result = &expapi.HorizontalPodAutoscalerList{} +func (c *horizontalPodAutoscalers) List(label labels.Selector, field fields.Selector) (result *experimental.HorizontalPodAutoscalerList, err error) { + result = &experimental.HorizontalPodAutoscalerList{} err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").LabelsSelectorParam(label).FieldsSelectorParam(field).Do().Into(result) return } // Get takes the name of the horizontalPodAutoscaler, and returns the corresponding HorizontalPodAutoscaler object, and an error if it occurs -func (c *horizontalPodAutoscalers) Get(name string) (result *expapi.HorizontalPodAutoscaler, err error) { - result = &expapi.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Get(name string) (result *experimental.HorizontalPodAutoscaler, err error) { + result = &experimental.HorizontalPodAutoscaler{} err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Do().Into(result) return } @@ -81,15 +81,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOption } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *expapi.HorizontalPodAutoscaler) (result *expapi.HorizontalPodAutoscaler, err error) { - result = &expapi.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *experimental.HorizontalPodAutoscaler) (result *experimental.HorizontalPodAutoscaler, err error) { + result = &experimental.HorizontalPodAutoscaler{} err = c.client.Post().Namespace(c.ns).Resource("horizontalPodAutoscalers").Body(horizontalPodAutoscaler).Do().Into(result) return } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *expapi.HorizontalPodAutoscaler) (result *expapi.HorizontalPodAutoscaler, err error) { - result = &expapi.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *experimental.HorizontalPodAutoscaler) (result *experimental.HorizontalPodAutoscaler, err error) { + result = &experimental.HorizontalPodAutoscaler{} err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).Body(horizontalPodAutoscaler).Do().Into(result) return } diff --git a/pkg/client/unversioned/horizontalpodautoscaler_test.go b/pkg/client/unversioned/horizontalpodautoscaler_test.go index 0ca1b5f0a1a..020defc813a 100644 --- a/pkg/client/unversioned/horizontalpodautoscaler_test.go +++ b/pkg/client/unversioned/horizontalpodautoscaler_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" ) @@ -33,7 +33,7 @@ func getHorizontalPodAutoscalersResoureName() string { func TestHorizontalPodAutoscalerCreate(t *testing.T) { ns := api.NamespaceDefault - horizontalPodAutoscaler := expapi.HorizontalPodAutoscaler{ + horizontalPodAutoscaler := experimental.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: ns, @@ -58,7 +58,7 @@ func TestHorizontalPodAutoscalerCreate(t *testing.T) { func TestHorizontalPodAutoscalerGet(t *testing.T) { ns := api.NamespaceDefault - horizontalPodAutoscaler := &expapi.HorizontalPodAutoscaler{ + horizontalPodAutoscaler := &experimental.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: ns, @@ -80,8 +80,8 @@ func TestHorizontalPodAutoscalerGet(t *testing.T) { func TestHorizontalPodAutoscalerList(t *testing.T) { ns := api.NamespaceDefault - horizontalPodAutoscalerList := &expapi.HorizontalPodAutoscalerList{ - Items: []expapi.HorizontalPodAutoscaler{ + horizontalPodAutoscalerList := &experimental.HorizontalPodAutoscalerList{ + Items: []experimental.HorizontalPodAutoscaler{ { ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -105,7 +105,7 @@ func TestHorizontalPodAutoscalerList(t *testing.T) { func TestHorizontalPodAutoscalerUpdate(t *testing.T) { ns := api.NamespaceDefault - horizontalPodAutoscaler := &expapi.HorizontalPodAutoscaler{ + horizontalPodAutoscaler := &experimental.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: ns, diff --git a/pkg/client/unversioned/scale.go b/pkg/client/unversioned/scale.go index 4152ec9b2fb..8b664eb6750 100644 --- a/pkg/client/unversioned/scale.go +++ b/pkg/client/unversioned/scale.go @@ -18,7 +18,7 @@ package unversioned import ( "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" ) type ScaleNamespacer interface { @@ -27,8 +27,8 @@ type ScaleNamespacer interface { // ScaleInterface has methods to work with Scale (sub)resources. type ScaleInterface interface { - Get(string, string) (*expapi.Scale, error) - Update(string, *expapi.Scale) (*expapi.Scale, error) + Get(string, string) (*experimental.Scale, error) + Update(string, *experimental.Scale) (*experimental.Scale, error) } // horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface @@ -46,15 +46,15 @@ func newScales(c *ExperimentalClient, namespace string) *scales { } // Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *expapi.Scale, err error) { - result = &expapi.Scale{} +func (c *scales) Get(kind string, name string) (result *experimental.Scale, err error) { + result = &experimental.Scale{} resource, _ := meta.KindToResource(kind, false) err = c.client.Get().Namespace(c.ns).Resource(resource).Name(name).SubResource("scale").Do().Into(result) return } -func (c *scales) Update(kind string, scale *expapi.Scale) (result *expapi.Scale, err error) { - result = &expapi.Scale{} +func (c *scales) Update(kind string, scale *experimental.Scale) (result *experimental.Scale, err error) { + result = &experimental.Scale{} resource, _ := meta.KindToResource(kind, false) err = c.client.Put(). Namespace(scale.Namespace). diff --git a/pkg/client/unversioned/testclient/fake_daemon_sets.go b/pkg/client/unversioned/testclient/fake_daemon_sets.go index f50ec708488..7b04ebd1534 100644 --- a/pkg/client/unversioned/testclient/fake_daemon_sets.go +++ b/pkg/client/unversioned/testclient/fake_daemon_sets.go @@ -17,8 +17,8 @@ limitations under the License. package testclient import ( + "k8s.io/kubernetes/pkg/apis/experimental" kClientLib "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -34,40 +34,40 @@ type FakeDaemonSets struct { // Ensure statically that FakeDaemonSets implements DaemonInterface. var _ kClientLib.DaemonSetInterface = &FakeDaemonSets{} -func (c *FakeDaemonSets) Get(name string) (*expapi.DaemonSet, error) { - obj, err := c.Fake.Invokes(NewGetAction("daemonsets", c.Namespace, name), &expapi.DaemonSet{}) +func (c *FakeDaemonSets) Get(name string) (*experimental.DaemonSet, error) { + obj, err := c.Fake.Invokes(NewGetAction("daemonsets", c.Namespace, name), &experimental.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*expapi.DaemonSet), err + return obj.(*experimental.DaemonSet), err } -func (c *FakeDaemonSets) List(label labels.Selector) (*expapi.DaemonSetList, error) { - obj, err := c.Fake.Invokes(NewListAction("daemonsets", c.Namespace, label, nil), &expapi.DaemonSetList{}) +func (c *FakeDaemonSets) List(label labels.Selector) (*experimental.DaemonSetList, error) { + obj, err := c.Fake.Invokes(NewListAction("daemonsets", c.Namespace, label, nil), &experimental.DaemonSetList{}) if obj == nil { return nil, err } - return obj.(*expapi.DaemonSetList), err + return obj.(*experimental.DaemonSetList), err } -func (c *FakeDaemonSets) Create(daemon *expapi.DaemonSet) (*expapi.DaemonSet, error) { - obj, err := c.Fake.Invokes(NewCreateAction("daemonsets", c.Namespace, daemon), &expapi.DaemonSet{}) +func (c *FakeDaemonSets) Create(daemon *experimental.DaemonSet) (*experimental.DaemonSet, error) { + obj, err := c.Fake.Invokes(NewCreateAction("daemonsets", c.Namespace, daemon), &experimental.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*expapi.DaemonSet), err + return obj.(*experimental.DaemonSet), err } -func (c *FakeDaemonSets) Update(daemon *expapi.DaemonSet) (*expapi.DaemonSet, error) { - obj, err := c.Fake.Invokes(NewUpdateAction("daemonsets", c.Namespace, daemon), &expapi.DaemonSet{}) +func (c *FakeDaemonSets) Update(daemon *experimental.DaemonSet) (*experimental.DaemonSet, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("daemonsets", c.Namespace, daemon), &experimental.DaemonSet{}) if obj == nil { return nil, err } - return obj.(*expapi.DaemonSet), err + return obj.(*experimental.DaemonSet), err } func (c *FakeDaemonSets) Delete(name string) error { - _, err := c.Fake.Invokes(NewDeleteAction("daemonsets", c.Namespace, name), &expapi.DaemonSet{}) + _, err := c.Fake.Invokes(NewDeleteAction("daemonsets", c.Namespace, name), &experimental.DaemonSet{}) return err } diff --git a/pkg/client/unversioned/testclient/fake_deployments.go b/pkg/client/unversioned/testclient/fake_deployments.go index 38942476ffd..9433a385805 100644 --- a/pkg/client/unversioned/testclient/fake_deployments.go +++ b/pkg/client/unversioned/testclient/fake_deployments.go @@ -18,7 +18,7 @@ package testclient import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -31,22 +31,22 @@ type FakeDeployments struct { Namespace string } -func (c *FakeDeployments) Get(name string) (*expapi.Deployment, error) { - obj, err := c.Fake.Invokes(NewGetAction("deployments", c.Namespace, name), &expapi.Deployment{}) +func (c *FakeDeployments) Get(name string) (*experimental.Deployment, error) { + obj, err := c.Fake.Invokes(NewGetAction("deployments", c.Namespace, name), &experimental.Deployment{}) if obj == nil { return nil, err } - return obj.(*expapi.Deployment), err + return obj.(*experimental.Deployment), err } -func (c *FakeDeployments) List(label labels.Selector, field fields.Selector) (*expapi.DeploymentList, error) { - obj, err := c.Fake.Invokes(NewListAction("deployments", c.Namespace, label, field), &expapi.DeploymentList{}) +func (c *FakeDeployments) List(label labels.Selector, field fields.Selector) (*experimental.DeploymentList, error) { + obj, err := c.Fake.Invokes(NewListAction("deployments", c.Namespace, label, field), &experimental.DeploymentList{}) if obj == nil { return nil, err } - list := &expapi.DeploymentList{} - for _, deployment := range obj.(*expapi.DeploymentList).Items { + list := &experimental.DeploymentList{} + for _, deployment := range obj.(*experimental.DeploymentList).Items { if label.Matches(labels.Set(deployment.Labels)) { list.Items = append(list.Items, deployment) } @@ -54,26 +54,26 @@ func (c *FakeDeployments) List(label labels.Selector, field fields.Selector) (*e return list, err } -func (c *FakeDeployments) Create(deployment *expapi.Deployment) (*expapi.Deployment, error) { +func (c *FakeDeployments) Create(deployment *experimental.Deployment) (*experimental.Deployment, error) { obj, err := c.Fake.Invokes(NewCreateAction("deployments", c.Namespace, deployment), deployment) if obj == nil { return nil, err } - return obj.(*expapi.Deployment), err + return obj.(*experimental.Deployment), err } -func (c *FakeDeployments) Update(deployment *expapi.Deployment) (*expapi.Deployment, error) { +func (c *FakeDeployments) Update(deployment *experimental.Deployment) (*experimental.Deployment, error) { obj, err := c.Fake.Invokes(NewUpdateAction("deployments", c.Namespace, deployment), deployment) if obj == nil { return nil, err } - return obj.(*expapi.Deployment), err + return obj.(*experimental.Deployment), err } func (c *FakeDeployments) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake.Invokes(NewDeleteAction("deployments", c.Namespace, name), &expapi.Deployment{}) + _, err := c.Fake.Invokes(NewDeleteAction("deployments", c.Namespace, name), &experimental.Deployment{}) return err } diff --git a/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go b/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go index 4c3b7369d9d..c7c92027686 100644 --- a/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go +++ b/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go @@ -18,7 +18,7 @@ package testclient import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -31,22 +31,22 @@ type FakeHorizontalPodAutoscalers struct { Namespace string } -func (c *FakeHorizontalPodAutoscalers) Get(name string) (*expapi.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewGetAction("horizontalpodautoscalers", c.Namespace, name), &expapi.HorizontalPodAutoscaler{}) +func (c *FakeHorizontalPodAutoscalers) Get(name string) (*experimental.HorizontalPodAutoscaler, error) { + obj, err := c.Fake.Invokes(NewGetAction("horizontalpodautoscalers", c.Namespace, name), &experimental.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*expapi.HorizontalPodAutoscaler), err + return obj.(*experimental.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) List(label labels.Selector, field fields.Selector) (*expapi.HorizontalPodAutoscalerList, error) { - obj, err := c.Fake.Invokes(NewListAction("horizontalpodautoscalers", c.Namespace, label, field), &expapi.HorizontalPodAutoscalerList{}) +func (c *FakeHorizontalPodAutoscalers) List(label labels.Selector, field fields.Selector) (*experimental.HorizontalPodAutoscalerList, error) { + obj, err := c.Fake.Invokes(NewListAction("horizontalpodautoscalers", c.Namespace, label, field), &experimental.HorizontalPodAutoscalerList{}) if obj == nil { return nil, err } - list := &expapi.HorizontalPodAutoscalerList{} - for _, a := range obj.(*expapi.HorizontalPodAutoscalerList).Items { + list := &experimental.HorizontalPodAutoscalerList{} + for _, a := range obj.(*experimental.HorizontalPodAutoscalerList).Items { if label.Matches(labels.Set(a.Labels)) { list.Items = append(list.Items, a) } @@ -54,26 +54,26 @@ func (c *FakeHorizontalPodAutoscalers) List(label labels.Selector, field fields. return list, err } -func (c *FakeHorizontalPodAutoscalers) Create(a *expapi.HorizontalPodAutoscaler) (*expapi.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) Create(a *experimental.HorizontalPodAutoscaler) (*experimental.HorizontalPodAutoscaler, error) { obj, err := c.Fake.Invokes(NewCreateAction("horizontalpodautoscalers", c.Namespace, a), a) if obj == nil { return nil, err } - return obj.(*expapi.HorizontalPodAutoscaler), err + return obj.(*experimental.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) Update(a *expapi.HorizontalPodAutoscaler) (*expapi.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) Update(a *experimental.HorizontalPodAutoscaler) (*experimental.HorizontalPodAutoscaler, error) { obj, err := c.Fake.Invokes(NewUpdateAction("horizontalpodautoscalers", c.Namespace, a), a) if obj == nil { return nil, err } - return obj.(*expapi.HorizontalPodAutoscaler), err + return obj.(*experimental.HorizontalPodAutoscaler), err } func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake.Invokes(NewDeleteAction("horizontalpodautoscalers", c.Namespace, name), &expapi.HorizontalPodAutoscaler{}) + _, err := c.Fake.Invokes(NewDeleteAction("horizontalpodautoscalers", c.Namespace, name), &experimental.HorizontalPodAutoscaler{}) return err } diff --git a/pkg/client/unversioned/testclient/fake_scales.go b/pkg/client/unversioned/testclient/fake_scales.go index 95d7220791f..9c29f61e4f0 100644 --- a/pkg/client/unversioned/testclient/fake_scales.go +++ b/pkg/client/unversioned/testclient/fake_scales.go @@ -17,7 +17,7 @@ limitations under the License. package testclient import ( - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" ) // FakeScales implements ScaleInterface. Meant to be embedded into a struct to get a default @@ -27,19 +27,19 @@ type FakeScales struct { Namespace string } -func (c *FakeScales) Get(kind string, name string) (result *expapi.Scale, err error) { +func (c *FakeScales) Get(kind string, name string) (result *experimental.Scale, err error) { action := GetActionImpl{} action.Verb = "get" action.Namespace = c.Namespace action.Resource = kind action.Subresource = "scale" action.Name = name - obj, err := c.Fake.Invokes(action, &expapi.Scale{}) - result = obj.(*expapi.Scale) + obj, err := c.Fake.Invokes(action, &experimental.Scale{}) + result = obj.(*experimental.Scale) return } -func (c *FakeScales) Update(kind string, scale *expapi.Scale) (result *expapi.Scale, err error) { +func (c *FakeScales) Update(kind string, scale *experimental.Scale) (result *experimental.Scale, err error) { action := UpdateActionImpl{} action.Verb = "update" action.Namespace = c.Namespace @@ -47,6 +47,6 @@ func (c *FakeScales) Update(kind string, scale *expapi.Scale) (result *expapi.Sc action.Subresource = "scale" action.Object = scale obj, err := c.Fake.Invokes(action, scale) - result = obj.(*expapi.Scale) + result = obj.(*experimental.Scale) return } diff --git a/pkg/cloudprovider/cloud.go b/pkg/cloudprovider/cloud.go index 50241fd28de..9212f740a6c 100644 --- a/pkg/cloudprovider/cloud.go +++ b/pkg/cloudprovider/cloud.go @@ -81,7 +81,7 @@ type TCPLoadBalancer interface { // if so, what its status is. GetTCPLoadBalancer(name, region string) (status *api.LoadBalancerStatus, exists bool, err error) // EnsureTCPLoadBalancer creates a new tcp load balancer, or updates an existing one. Returns the status of the balancer - EnsureTCPLoadBalancer(name, region string, externalIP net.IP, ports []*api.ServicePort, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error) + EnsureTCPLoadBalancer(name, region string, loadBalancerIP net.IP, ports []*api.ServicePort, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error) // UpdateTCPLoadBalancer updates hosts under the specified load balancer. UpdateTCPLoadBalancer(name, region string, hosts []string) error // EnsureTCPLoadBalancerDeleted deletes the specified load balancer if it diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 1926b2683ba..c6987de8030 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -352,7 +352,7 @@ func makeFirewallName(name string) string { // EnsureTCPLoadBalancer is an implementation of TCPLoadBalancer.EnsureTCPLoadBalancer. // TODO(a-robinson): Don't just ignore specified IP addresses. Check if they're // owned by the project and available to be used, and use them if they are. -func (gce *GCECloud) EnsureTCPLoadBalancer(name, region string, externalIP net.IP, ports []*api.ServicePort, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error) { +func (gce *GCECloud) EnsureTCPLoadBalancer(name, region string, loadBalancerIP net.IP, ports []*api.ServicePort, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error) { if len(hosts) == 0 { return nil, fmt.Errorf("Cannot EnsureTCPLoadBalancer() with no hosts") } @@ -399,6 +399,10 @@ func (gce *GCECloud) EnsureTCPLoadBalancer(name, region string, externalIP net.I PortRange: fmt.Sprintf("%d-%d", minPort, maxPort), Target: gce.targetPoolURL(name, region), } + if loadBalancerIP != nil { + req.IPAddress = loadBalancerIP.String() + } + op, err := gce.service.ForwardingRules.Insert(gce.projectID, region, req).Do() if err != nil && !isHTTPErrorCode(err, http.StatusConflict) { return nil, err diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index 8bf1ef69d90..f5f814b8321 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -525,8 +525,8 @@ func (lb *LoadBalancer) GetTCPLoadBalancer(name, region string) (*api.LoadBalanc // a list of regions (from config) and query/create loadbalancers in // each region. -func (lb *LoadBalancer) EnsureTCPLoadBalancer(name, region string, externalIP net.IP, ports []*api.ServicePort, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) { - glog.V(4).Infof("EnsureTCPLoadBalancer(%v, %v, %v, %v, %v, %v)", name, region, externalIP, ports, hosts, affinity) +func (lb *LoadBalancer) EnsureTCPLoadBalancer(name, region string, loadBalancerIP net.IP, ports []*api.ServicePort, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) { + glog.V(4).Infof("EnsureTCPLoadBalancer(%v, %v, %v, %v, %v, %v)", name, region, loadBalancerIP, ports, hosts, affinity) if len(ports) > 1 { return nil, fmt.Errorf("multiple ports are not yet supported in openstack load balancers") @@ -618,8 +618,8 @@ func (lb *LoadBalancer) EnsureTCPLoadBalancer(name, region string, externalIP ne SubnetID: lb.opts.SubnetId, Persistence: persistence, } - if externalIP != nil { - createOpts.Address = externalIP.String() + if loadBalancerIP != nil { + createOpts.Address = loadBalancerIP.String() } vip, err := vips.Create(lb.network, createOpts).Extract() diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 62ef191e0c2..d49d720981b 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -20,17 +20,19 @@ import ( "fmt" "time" + "sync/atomic" + "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "sync/atomic" ) const ( @@ -213,6 +215,8 @@ func NewControllerExpectations() *ControllerExpectations { type PodControlInterface interface { // CreateReplica creates new replicated pods according to the spec. CreateReplica(namespace string, controller *api.ReplicationController) error + // CreateReplicaOnNode creates a new pod according to the spec on the specified node. + CreateReplicaOnNode(namespace string, ds *experimental.DaemonSet, nodeName string) error // DeletePod deletes the pod identified by podID. DeletePod(namespace string, podID string) error } @@ -290,6 +294,40 @@ func (r RealPodControl) CreateReplica(namespace string, controller *api.Replicat return nil } +func (r RealPodControl) CreateReplicaOnNode(namespace string, ds *experimental.DaemonSet, nodeName string) error { + desiredLabels := getReplicaLabelSet(ds.Spec.Template) + desiredAnnotations, err := getReplicaAnnotationSet(ds.Spec.Template, ds) + if err != nil { + return err + } + prefix := getReplicaPrefix(ds.Name) + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: desiredLabels, + Annotations: desiredAnnotations, + GenerateName: prefix, + }, + } + if err := api.Scheme.Convert(&ds.Spec.Template.Spec, &pod.Spec); err != nil { + return fmt.Errorf("unable to convert pod template: %v", err) + } + // if a pod does not have labels then it cannot be controlled by any controller + if labels.Set(pod.Labels).AsSelector().Empty() { + return fmt.Errorf("unable to create pod replica, no labels") + } + pod.Spec.NodeName = nodeName + if newPod, err := r.KubeClient.Pods(namespace).Create(pod); err != nil { + r.Recorder.Eventf(ds, "failedCreate", "Error creating: %v", err) + return fmt.Errorf("unable to create pod replica: %v", err) + } else { + glog.V(4).Infof("Controller %v created pod %v", ds.Name, newPod.Name) + r.Recorder.Eventf(ds, "successfulCreate", "Created pod: %v", newPod.Name) + } + + return nil +} + func (r RealPodControl) DeletePod(namespace, podID string) error { return r.KubeClient.Pods(namespace).Delete(podID, nil) } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 215c95b10bd..07574f2500a 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -28,9 +28,9 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/util" diff --git a/pkg/controller/daemon/controller.go b/pkg/controller/daemon/controller.go new file mode 100644 index 00000000000..e6db35b14d1 --- /dev/null +++ b/pkg/controller/daemon/controller.go @@ -0,0 +1,497 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package daemon + +import ( + "reflect" + "sort" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/workqueue" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + // Daemon sets will periodically check that their daemon pods are running as expected. + FullDaemonSetResyncPeriod = 30 * time.Second // TODO: Figure out if this time seems reasonable. + // Nodes don't need relisting. + FullNodeResyncPeriod = 0 + // Daemon pods don't need relisting. + FullDaemonPodResyncPeriod = 0 + // If sending a status upate to API server fails, we retry a finite number of times. + StatusUpdateRetries = 1 +) + +// DaemonSetsController is responsible for synchronizing DaemonSet objects stored +// in the system with actual running pods. +type DaemonSetsController struct { + kubeClient client.Interface + podControl controller.PodControlInterface + + // To allow injection of syncDaemonSet for testing. + syncHandler func(dsKey string) error + // A TTLCache of pod creates/deletes each ds expects to see + expectations controller.ControllerExpectationsInterface + // A store of daemon sets + dsStore cache.StoreToDaemonSetLister + // A store of pods + podStore cache.StoreToPodLister + // A store of nodes + nodeStore cache.StoreToNodeLister + // Watches changes to all daemon sets. + dsController *framework.Controller + // Watches changes to all pods + podController *framework.Controller + // Watches changes to all nodes. + nodeController *framework.Controller + // Daemon sets that need to be synced. + queue *workqueue.Type +} + +func NewDaemonSetsController(kubeClient client.Interface) *DaemonSetsController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) + + dsc := &DaemonSetsController{ + kubeClient: kubeClient, + podControl: controller.RealPodControl{ + KubeClient: kubeClient, + Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}), + }, + expectations: controller.NewControllerExpectations(), + queue: workqueue.New(), + } + // Manage addition/update of daemon sets. + dsc.dsStore.Store, dsc.dsController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func() (runtime.Object, error) { + return dsc.kubeClient.Experimental().DaemonSets(api.NamespaceAll).List(labels.Everything()) + }, + WatchFunc: func(rv string) (watch.Interface, error) { + return dsc.kubeClient.Experimental().DaemonSets(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) + }, + }, + &experimental.DaemonSet{}, + FullDaemonSetResyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + ds := obj.(*experimental.DaemonSet) + glog.V(4).Infof("Adding daemon set %s", ds.Name) + dsc.enqueueDaemonSet(obj) + }, + UpdateFunc: func(old, cur interface{}) { + oldDS := old.(*experimental.DaemonSet) + glog.V(4).Infof("Updating daemon set %s", oldDS.Name) + dsc.enqueueDaemonSet(cur) + }, + DeleteFunc: func(obj interface{}) { + ds := obj.(*experimental.DaemonSet) + glog.V(4).Infof("Deleting daemon set %s", ds.Name) + dsc.enqueueDaemonSet(obj) + }, + }, + ) + // Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete + // more pods until all the effects (expectations) of a daemon set's create/delete have been observed. + dsc.podStore.Store, dsc.podController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func() (runtime.Object, error) { + return dsc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) + }, + WatchFunc: func(rv string) (watch.Interface, error) { + return dsc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) + }, + }, + &api.Pod{}, + FullDaemonPodResyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: dsc.addPod, + UpdateFunc: dsc.updatePod, + DeleteFunc: dsc.deletePod, + }, + ) + // Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change, + dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func() (runtime.Object, error) { + return dsc.kubeClient.Nodes().List(labels.Everything(), fields.Everything()) + }, + WatchFunc: func(rv string) (watch.Interface, error) { + return dsc.kubeClient.Nodes().Watch(labels.Everything(), fields.Everything(), rv) + }, + }, + &api.Node{}, + FullNodeResyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: dsc.addNode, + UpdateFunc: dsc.updateNode, + }, + ) + dsc.syncHandler = dsc.syncDaemonSet + return dsc +} + +// Run begins watching and syncing daemon sets. +func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { + go dsc.dsController.Run(stopCh) + go dsc.podController.Run(stopCh) + go dsc.nodeController.Run(stopCh) + for i := 0; i < workers; i++ { + go util.Until(dsc.worker, time.Second, stopCh) + } + <-stopCh + glog.Infof("Shutting down Daemon Set Controller") + dsc.queue.ShutDown() +} + +func (dsc *DaemonSetsController) worker() { + for { + func() { + dsKey, quit := dsc.queue.Get() + if quit { + return + } + defer dsc.queue.Done(dsKey) + err := dsc.syncHandler(dsKey.(string)) + if err != nil { + glog.Errorf("Error syncing daemon set with key %s: %v", dsKey.(string), err) + } + }() + } +} + +func (dsc *DaemonSetsController) enqueueAllDaemonSets() { + glog.V(4).Infof("Enqueueing all daemon sets") + ds, err := dsc.dsStore.List() + if err != nil { + glog.Errorf("Error enqueueing daemon sets: %v", err) + return + } + for i := range ds { + dsc.enqueueDaemonSet(&ds[i]) + } +} + +func (dsc *DaemonSetsController) enqueueDaemonSet(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + return + } + dsc.queue.Add(key) +} + +func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *experimental.DaemonSet { + sets, err := dsc.dsStore.GetPodDaemonSets(pod) + if err != nil { + glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name) + return nil + } + // More than two items in this list indicates user error. If two daemon + // sets overlap, sort by creation timestamp, subsort by name, then pick + // the first. + glog.Errorf("user error! more than one daemon is selecting pods with labels: %+v", pod.Labels) + sort.Sort(byCreationTimestamp(sets)) + return &sets[0] +} + +func (dsc *DaemonSetsController) addPod(obj interface{}) { + pod := obj.(*api.Pod) + glog.V(4).Infof("Pod %s added.", pod.Name) + if ds := dsc.getPodDaemonSet(pod); ds != nil { + dsKey, err := controller.KeyFunc(ds) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + return + } + dsc.expectations.CreationObserved(dsKey) + dsc.enqueueDaemonSet(ds) + } +} + +// When a pod is updated, figure out what sets manage it and wake them +// up. If the labels of the pod have changed we need to awaken both the old +// and new set. old and cur must be *api.Pod types. +func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { + if api.Semantic.DeepEqual(old, cur) { + // A periodic relist will send update events for all known pods. + return + } + curPod := cur.(*api.Pod) + glog.V(4).Infof("Pod %s updated.", curPod.Name) + if curDS := dsc.getPodDaemonSet(curPod); curDS != nil { + dsc.enqueueDaemonSet(curDS) + } + oldPod := old.(*api.Pod) + // If the labels have not changed, then the daemon set responsible for + // the pod is the same as it was before. In that case we have enqueued the daemon + // set above, and do not have to enqueue the set again. + if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { + // It's ok if both oldDS and curDS are the same, because curDS will set + // the expectations on its run so oldDS will have no effect. + if oldDS := dsc.getPodDaemonSet(oldPod); oldDS != nil { + dsc.enqueueDaemonSet(oldDS) + } + } +} + +func (dsc *DaemonSetsController) deletePod(obj interface{}) { + pod, ok := obj.(*api.Pod) + glog.V(4).Infof("Pod %s deleted.", pod.Name) + // When a delete is dropped, the relist will notice a pod in the store not + // in the list, leading to the insertion of a tombstone object which contains + // the deleted key/value. Note that this value might be stale. If the pod + // changed labels the new rc will not be woken up till the periodic resync. + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Errorf("Couldn't get object from tombstone %+v", obj) + return + } + pod, ok = tombstone.Obj.(*api.Pod) + if !ok { + glog.Errorf("Tombstone contained object that is not a pod %+v", obj) + return + } + } + if ds := dsc.getPodDaemonSet(pod); ds != nil { + dsKey, err := controller.KeyFunc(ds) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + return + } + dsc.expectations.DeletionObserved(dsKey) + dsc.enqueueDaemonSet(ds) + } +} + +func (dsc *DaemonSetsController) addNode(obj interface{}) { + // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). + dsc.enqueueAllDaemonSets() +} + +func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { + oldNode := old.(*api.Node) + curNode := cur.(*api.Node) + if api.Semantic.DeepEqual(oldNode.Name, curNode.Name) && api.Semantic.DeepEqual(oldNode.Namespace, curNode.Namespace) && api.Semantic.DeepEqual(oldNode.Labels, curNode.Labels) { + // A periodic relist will send update events for all known pods. + return + } + // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). + dsc.enqueueAllDaemonSets() +} + +// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes. +func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *experimental.DaemonSet) (map[string][]*api.Pod, error) { + nodeToDaemonPods := make(map[string][]*api.Pod) + daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(labels.Set(ds.Spec.Selector).AsSelector()) + if err != nil { + return nodeToDaemonPods, err + } + for i := range daemonPods.Items { + nodeName := daemonPods.Items[i].Spec.NodeName + nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], &daemonPods.Items[i]) + } + return nodeToDaemonPods, nil +} + +func (dsc *DaemonSetsController) manage(ds *experimental.DaemonSet) { + // Find out which nodes are running the daemon pods selected by ds. + nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) + if err != nil { + glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err) + } + + // For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon + // pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node. + nodeList, err := dsc.nodeStore.List() + if err != nil { + glog.Errorf("Couldn't get list of nodes when syncing daemon set %+v: %v", ds, err) + } + var nodesNeedingDaemonPods, podsToDelete []string + for i := range nodeList.Items { + // Check if the node satisfies the daemon set's node selector. + nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() + shouldRun := nodeSelector.Matches(labels.Set(nodeList.Items[i].Labels)) + // If the daemon set specifies a node name, check that it matches with nodeName. + nodeName := nodeList.Items[i].Name + shouldRun = shouldRun && (ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == nodeName) + daemonPods, isRunning := nodeToDaemonPods[nodeName] + if shouldRun && !isRunning { + // If daemon pod is supposed to be running on node, but isn't, create daemon pod. + nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodeName) + } else if shouldRun && len(daemonPods) > 1 { + // If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods. + // TODO: sort the daemon pods by creation time, so the the oldest is preserved. + for i := 1; i < len(daemonPods); i++ { + podsToDelete = append(podsToDelete, daemonPods[i].Name) + } + } else if !shouldRun && isRunning { + // If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node. + for i := range daemonPods { + podsToDelete = append(podsToDelete, daemonPods[i].Name) + } + } + } + + // We need to set expectations before creating/deleting pods to avoid race conditions. + dsKey, err := controller.KeyFunc(ds) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + return + } + dsc.expectations.SetExpectations(dsKey, len(nodesNeedingDaemonPods), len(podsToDelete)) + + glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v", ds.Name, nodesNeedingDaemonPods) + for i := range nodesNeedingDaemonPods { + if err := dsc.podControl.CreateReplicaOnNode(ds.Namespace, ds, nodesNeedingDaemonPods[i]); err != nil { + glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) + dsc.expectations.CreationObserved(dsKey) + util.HandleError(err) + } + } + + glog.V(4).Infof("Pods to delete for daemon set %s: %+v", ds.Name, podsToDelete) + for i := range podsToDelete { + if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[i]); err != nil { + glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) + dsc.expectations.DeletionObserved(dsKey) + util.HandleError(err) + } + } +} + +func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *experimental.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error { + if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled { + return nil + } + + var updateErr, getErr error + for i := 0; i <= StatusUpdateRetries; i++ { + ds.Status.DesiredNumberScheduled = desiredNumberScheduled + ds.Status.CurrentNumberScheduled = currentNumberScheduled + ds.Status.NumberMisscheduled = numberMisscheduled + _, updateErr = dsClient.Update(ds) + if updateErr == nil { + // successful update + return nil + } + // Update the set with the latest resource version for the next poll + if ds, getErr = dsClient.Get(ds.Name); getErr != nil { + // If the GET fails we can't trust status.Replicas anymore. This error + // is bound to be more interesting than the update failure. + return getErr + } + } + return updateErr +} + +func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *experimental.DaemonSet) { + glog.Infof("Updating daemon set status") + nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) + if err != nil { + glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err) + } + + nodeList, err := dsc.nodeStore.List() + if err != nil { + glog.Errorf("Couldn't get list of nodes when updating daemon set %+v: %v", ds, err) + } + + var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int + for _, node := range nodeList.Items { + nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() + shouldRun := nodeSelector.Matches(labels.Set(node.Labels)) + numDaemonPods := len(nodeToDaemonPods[node.Name]) + + if numDaemonPods > 0 { + currentNumberScheduled++ + } + + if shouldRun { + desiredNumberScheduled++ + } else if numDaemonPods >= 0 { + numberMisscheduled++ + } + } + + err = storeDaemonSetStatus(dsc.kubeClient.Experimental().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled) + if err != nil { + glog.Errorf("Error storing status for daemon set %+v: %v", ds, err) + } +} + +func (dsc *DaemonSetsController) syncDaemonSet(key string) error { + startTime := time.Now() + defer func() { + glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime)) + }() + obj, exists, err := dsc.dsStore.Store.GetByKey(key) + if err != nil { + glog.Infof("Unable to retrieve ds %v from store: %v", key, err) + dsc.queue.Add(key) + return err + } + if !exists { + glog.V(3).Infof("daemon set has been deleted %v", key) + dsc.expectations.DeleteExpectations(key) + return nil + } + ds := obj.(*experimental.DaemonSet) + + // Don't process a daemon set until all its creations and deletions have been processed. + // For example if daemon set foo asked for 3 new daemon pods in the previous call to manage, + // then we do not want to call manage on foo until the daemon pods have been created. + dsKey, err := controller.KeyFunc(ds) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + return err + } + dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey) + if dsNeedsSync { + dsc.manage(ds) + } + + dsc.updateDaemonSetStatus(ds) + return nil +} + +// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker. +type byCreationTimestamp []experimental.DaemonSet + +func (o byCreationTimestamp) Len() int { return len(o) } +func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o byCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) +} diff --git a/pkg/controller/daemon/controller_test.go b/pkg/controller/daemon/controller_test.go new file mode 100644 index 00000000000..1ab55b2b72c --- /dev/null +++ b/pkg/controller/daemon/controller_test.go @@ -0,0 +1,321 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package daemon + +import ( + "fmt" + "sync" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/client/cache" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/securitycontext" +) + +var ( + simpleDaemonSetLabel = map[string]string{"name": "simple-daemon", "type": "production"} + simpleDaemonSetLabel2 = map[string]string{"name": "simple-daemon", "type": "test"} + simpleNodeLabel = map[string]string{"color": "blue", "speed": "fast"} + simpleNodeLabel2 = map[string]string{"color": "red", "speed": "fast"} +) + +type FakePodControl struct { + daemonSet []experimental.DaemonSet + deletePodName []string + lock sync.Mutex + err error +} + +func init() { + api.ForTesting_ReferencesAllowBlankSelfLinks = true +} + +func (f *FakePodControl) CreateReplica(namespace string, spec *api.ReplicationController) error { + return nil +} + +func (f *FakePodControl) CreateReplicaOnNode(namespace string, ds *experimental.DaemonSet, nodeName string) error { + f.lock.Lock() + defer f.lock.Unlock() + if f.err != nil { + return f.err + } + f.daemonSet = append(f.daemonSet, *ds) + return nil +} + +func (f *FakePodControl) DeletePod(namespace string, podName string) error { + f.lock.Lock() + defer f.lock.Unlock() + if f.err != nil { + return f.err + } + f.deletePodName = append(f.deletePodName, podName) + return nil +} +func (f *FakePodControl) clear() { + f.lock.Lock() + defer f.lock.Unlock() + f.deletePodName = []string{} + f.daemonSet = []experimental.DaemonSet{} +} + +func newDaemonSet(name string) *experimental.DaemonSet { + return &experimental.DaemonSet{ + TypeMeta: api.TypeMeta{APIVersion: testapi.Experimental.Version()}, + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: api.NamespaceDefault, + }, + Spec: experimental.DaemonSetSpec{ + Selector: simpleDaemonSetLabel, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + DNSPolicy: api.DNSDefault, + }, + }, + }, + } +} + +func newNode(name string, label map[string]string) *api.Node { + return &api.Node{ + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: label, + Namespace: api.NamespaceDefault, + }, + } +} + +func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]string) { + for i := startIndex; i < startIndex+numNodes; i++ { + nodeStore.Add(newNode(fmt.Sprintf("node-%d", i), label)) + } +} + +func newPod(podName string, nodeName string, label map[string]string) *api.Pod { + pod := &api.Pod{ + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, + ObjectMeta: api.ObjectMeta{ + GenerateName: podName, + Labels: label, + Namespace: api.NamespaceDefault, + }, + Spec: api.PodSpec{ + NodeName: nodeName, + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + DNSPolicy: api.DNSDefault, + }, + } + api.GenerateName(api.SimpleNameGenerator, &pod.ObjectMeta) + return pod +} + +func addPods(podStore cache.Store, nodeName string, label map[string]string, number int) { + for i := 0; i < number; i++ { + podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label)) + } +} + +func newTestController() (*DaemonSetsController, *FakePodControl) { + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) + manager := NewDaemonSetsController(client) + podControl := &FakePodControl{} + manager.podControl = podControl + return manager, podControl +} + +func validateSyncDaemonSets(t *testing.T, fakePodControl *FakePodControl, expectedCreates, expectedDeletes int) { + if len(fakePodControl.daemonSet) != expectedCreates { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.daemonSet)) + } + if len(fakePodControl.deletePodName) != expectedDeletes { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.deletePodName)) + } +} + +func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *experimental.DaemonSet, podControl *FakePodControl, expectedCreates, expectedDeletes int) { + key, err := controller.KeyFunc(ds) + if err != nil { + t.Errorf("Could not get key for daemon.") + } + manager.syncHandler(key) + validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes) +} + +// DaemonSets without node selectors should launch pods on every node. +func TestSimpleDaemonSetLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) +} + +// DaemonSets without node selectors should launch pods on every node. +func TestNoNodesDoesNothing(t *testing.T) { + manager, podControl := newTestController() + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSets without node selectors should launch pods on every node. +func TestOneNodeDaemonLaunchesPod(t *testing.T) { + manager, podControl := newTestController() + manager.nodeStore.Add(newNode("only-node", nil)) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. +func TestDealsWithExistingPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel, 1) + addPods(manager.podStore.Store, "node-2", simpleDaemonSetLabel, 2) + addPods(manager.podStore.Store, "node-3", simpleDaemonSetLabel, 5) + addPods(manager.podStore.Store, "node-4", simpleDaemonSetLabel2, 2) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5) +} + +// Daemon with node selector should launch pods on nodes matching selector. +func TestSelectorDaemonLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + daemon := newDaemonSet("foo") + daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(daemon) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0) +} + +// Daemon with node selector should delete pods from nodes that do not satisfy selector. +func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) + addPods(manager.podStore.Store, "node-0", simpleDaemonSetLabel2, 2) + addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel, 3) + addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel2, 1) + addPods(manager.podStore.Store, "node-4", simpleDaemonSetLabel, 1) + daemon := newDaemonSet("foo") + daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(daemon) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 5, 4) +} + +// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. +func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) + addPods(manager.podStore.Store, "node-0", simpleDaemonSetLabel, 1) + addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel, 3) + addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel2, 2) + addPods(manager.podStore.Store, "node-2", simpleDaemonSetLabel, 4) + addPods(manager.podStore.Store, "node-6", simpleDaemonSetLabel, 13) + addPods(manager.podStore.Store, "node-7", simpleDaemonSetLabel2, 4) + addPods(manager.podStore.Store, "node-9", simpleDaemonSetLabel, 1) + addPods(manager.podStore.Store, "node-9", simpleDaemonSetLabel2, 1) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20) +} + +// DaemonSet with node selector which does not match any node labels should not launch pods. +func TestBadSelectorDaemonDoesNothing(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSet with node name should launch pod on node with corresponding name. +func TestNameDaemonSetLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeName = "node-0" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSet with node name that does not exist should not launch pods. +func TestBadNameDaemonSetDoesNothing(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeName = "node-10" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. +func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-6" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. +func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-0" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} diff --git a/pkg/controller/daemon/doc.go b/pkg/controller/daemon/doc.go new file mode 100644 index 00000000000..db689ac1bb6 --- /dev/null +++ b/pkg/controller/daemon/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package daemon contains logic for watching and synchronizing +// daemons. +package daemon diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 49a439508f6..ab06f869a51 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -26,8 +26,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/endpoints" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index 49314368ffd..9fb4e1bae55 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -26,8 +26,8 @@ import ( endptspkg "k8s.io/kubernetes/pkg/api/endpoints" _ "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" ) diff --git a/pkg/controller/framework/controller.go b/pkg/controller/framework/controller.go index d874e751d7f..ca182343aa4 100644 --- a/pkg/controller/framework/controller.go +++ b/pkg/controller/framework/controller.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" ) diff --git a/pkg/controller/framework/controller_test.go b/pkg/controller/framework/controller_test.go index 836790b3c83..d5fb51871fe 100644 --- a/pkg/controller/framework/controller_test.go +++ b/pkg/controller/framework/controller_test.go @@ -24,7 +24,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" @@ -335,10 +335,6 @@ func TestUpdate(t *testing.T) { }, ) - // Run the controller and run it until we close stop. - stop := make(chan struct{}) - go controller.Run(stop) - pod := func(name, check string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -371,11 +367,18 @@ func TestUpdate(t *testing.T) { }, } - // run every test a few times, in parallel const threads = 3 + testDoneWG.Add(threads * len(tests)) + + // Run the controller and run it until we close stop. + // Once Run() is called, calls to testDoneWG.Done() might start, so + // all testDoneWG.Add() calls must happen before this point + stop := make(chan struct{}) + go controller.Run(stop) + + // run every test a few times, in parallel var wg sync.WaitGroup wg.Add(threads * len(tests)) - testDoneWG.Add(threads * len(tests)) for i := 0; i < threads; i++ { for j, f := range tests { go func(name string, f func(string)) { diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index 4b95ba32131..198c608880a 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -22,8 +22,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 18541f5ea82..2e9bdc0c85f 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -25,8 +25,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go index edd8520bb53..1cb45dff4bb 100644 --- a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go @@ -23,8 +23,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go b/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go index 9e3846f3718..6b8e8b8a501 100644 --- a/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go @@ -22,8 +22,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" diff --git a/pkg/controller/persistentvolume/types.go b/pkg/controller/persistentvolume/types.go index bb0e094f766..bcce67039e4 100644 --- a/pkg/controller/persistentvolume/types.go +++ b/pkg/controller/persistentvolume/types.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" ) // persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity. diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go b/pkg/controller/podautoscaler/horizontal.go similarity index 89% rename from pkg/controller/autoscaler/horizontalpodautoscaler_controller.go rename to pkg/controller/podautoscaler/horizontal.go index 11e98ccadca..938616c8116 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package autoscalercontroller +package podautoscaler import ( "fmt" @@ -23,9 +23,9 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/controller/autoscaler/metrics" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" @@ -40,7 +40,7 @@ const ( tolerance = 0.1 ) -type HorizontalPodAutoscalerController struct { +type HorizontalController struct { client client.Interface metricsClient metrics.MetricsClient } @@ -48,14 +48,14 @@ type HorizontalPodAutoscalerController struct { var downscaleForbiddenWindow, _ = time.ParseDuration("20m") var upscaleForbiddenWindow, _ = time.ParseDuration("3m") -func New(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalPodAutoscalerController { - return &HorizontalPodAutoscalerController{ +func NewHorizontalController(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalController { + return &HorizontalController{ client: client, metricsClient: metricsClient, } } -func (a *HorizontalPodAutoscalerController) Run(syncPeriod time.Duration) { +func (a *HorizontalController) Run(syncPeriod time.Duration) { go util.Until(func() { if err := a.reconcileAutoscalers(); err != nil { glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err) @@ -63,7 +63,7 @@ func (a *HorizontalPodAutoscalerController) Run(syncPeriod time.Duration) { }, syncPeriod, util.NeverStop) } -func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { +func (a *HorizontalController) reconcileAutoscalers() error { ns := api.NamespaceAll list, err := a.client.Experimental().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) if err != nil { @@ -134,7 +134,7 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { desiredReplicas = currentReplicas } - status := expapi.HorizontalPodAutoscalerStatus{ + status := experimental.HorizontalPodAutoscalerStatus{ CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, CurrentConsumption: currentConsumption, diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go b/pkg/controller/podautoscaler/horizontal_test.go similarity index 78% rename from pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go rename to pkg/controller/podautoscaler/horizontal_test.go index bdffbbb18ba..1bb03ae810e 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package autoscalercontroller +package podautoscaler import ( "fmt" @@ -26,9 +26,9 @@ import ( _ "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/controller/autoscaler/metrics" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" @@ -57,14 +57,14 @@ type fakeMetricsClient struct { } type fakeResourceConsumptionClient struct { - metrics map[api.ResourceName]expapi.ResourceConsumption + metrics map[api.ResourceName]experimental.ResourceConsumption } func (f *fakeMetricsClient) ResourceConsumption(namespace string) metrics.ResourceConsumptionClient { return f.consumption } -func (f *fakeResourceConsumptionClient) Get(resource api.ResourceName, selector map[string]string) (*expapi.ResourceConsumption, error) { +func (f *fakeResourceConsumptionClient) Get(resource api.ResourceName, selector map[string]string) (*experimental.ResourceConsumption, error) { consumption, found := f.metrics[resource] if !found { return nil, fmt.Errorf("resource not found: %v", resource) @@ -110,15 +110,15 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { - hpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscalerList{ - Items: []expapi.HorizontalPodAutoscaler{ + hpaResponse := serverResponse{http.StatusOK, &experimental.HorizontalPodAutoscalerList{ + Items: []experimental.HorizontalPodAutoscaler{ { ObjectMeta: api.ObjectMeta{ Name: hpaName, Namespace: namespace, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Kind: "replicationController", Name: rcName, Namespace: namespace, @@ -126,36 +126,36 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")}, }, }}}} - scaleResponse := serverResponse{http.StatusOK, &expapi.Scale{ + scaleResponse := serverResponse{http.StatusOK, &experimental.Scale{ ObjectMeta: api.ObjectMeta{ Name: rcName, Namespace: namespace, }, - Spec: expapi.ScaleSpec{ + Spec: experimental.ScaleSpec{ Replicas: 1, }, - Status: expapi.ScaleStatus{ + Status: experimental.ScaleStatus{ Replicas: 1, Selector: map[string]string{"name": podNameLabel}, }, }} - status := expapi.HorizontalPodAutoscalerStatus{ + status := experimental.HorizontalPodAutoscalerStatus{ CurrentReplicas: 1, DesiredReplicas: 3, } - updateHpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscaler{ + updateHpaResponse := serverResponse{http.StatusOK, &experimental.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: hpaName, Namespace: namespace, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Kind: "replicationController", Name: rcName, Namespace: namespace, @@ -163,7 +163,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")}, }, Status: &status, }} @@ -177,12 +177,12 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { defer testServer.Close() kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()}) - fakeRC := fakeResourceConsumptionClient{metrics: map[api.ResourceName]expapi.ResourceConsumption{ + fakeRC := fakeResourceConsumptionClient{metrics: map[api.ResourceName]experimental.ResourceConsumption{ api.ResourceCPU: {Resource: api.ResourceCPU, Quantity: resource.MustParse("650m")}, }} fake := fakeMetricsClient{consumption: &fakeRC} - hpaController := New(kubeClient, &fake) + hpaController := NewHorizontalController(kubeClient, &fake) err := hpaController.reconcileAutoscalers() if err != nil { @@ -195,7 +195,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { if err != nil { t.Fatal("Failed to decode: %v %v", err) } - hpa, _ := obj.(*expapi.HorizontalPodAutoscaler) + hpa, _ := obj.(*experimental.HorizontalPodAutoscaler) assert.Equal(t, 3, hpa.Status.DesiredReplicas) assert.Equal(t, int64(650), hpa.Status.CurrentConsumption.Quantity.MilliValue()) diff --git a/pkg/controller/autoscaler/metrics/metrics_client.go b/pkg/controller/podautoscaler/metrics/metrics_client.go similarity index 85% rename from pkg/controller/autoscaler/metrics/metrics_client.go rename to pkg/controller/podautoscaler/metrics/metrics_client.go index cae98e33ee2..7f631309e43 100644 --- a/pkg/controller/autoscaler/metrics/metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/metrics_client.go @@ -25,8 +25,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" @@ -47,12 +47,12 @@ type MetricsClient interface { type ResourceConsumptionClient interface { // Gets average resource consumption for pods under the given selector. - Get(resourceName api.ResourceName, selector map[string]string) (*expapi.ResourceConsumption, error) + Get(resourceName api.ResourceName, selector map[string]string) (*experimental.ResourceConsumption, error) } // Aggregates results into ResourceConsumption. Also returns number of // pods included in the aggregation. -type metricAggregator func(heapster.MetricResultList) (expapi.ResourceConsumption, int) +type metricAggregator func(heapster.MetricResultList) (experimental.ResourceConsumption, int) type metricDefinition struct { name string @@ -76,23 +76,23 @@ func NewHeapsterMetricsClient(client client.Interface) *HeapsterMetricsClient { var heapsterMetricDefinitions = map[api.ResourceName]metricDefinition{ api.ResourceCPU: {"cpu-usage", - func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) { + func(metrics heapster.MetricResultList) (experimental.ResourceConsumption, int) { sum, count := calculateSumFromLatestSample(metrics) value := "0" if count > 0 { // assumes that cpu usage is in millis value = fmt.Sprintf("%dm", sum/uint64(count)) } - return expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count + return experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count }}, api.ResourceMemory: {"memory-usage", - func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) { + func(metrics heapster.MetricResultList) (experimental.ResourceConsumption, int) { sum, count := calculateSumFromLatestSample(metrics) value := int64(0) if count > 0 { value = int64(sum) / int64(count) } - return expapi.ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count + return experimental.ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count }}, } @@ -104,7 +104,7 @@ func (h *HeapsterMetricsClient) ResourceConsumption(namespace string) ResourceCo } } -func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, selector map[string]string) (*expapi.ResourceConsumption, error) { +func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, selector map[string]string) (*experimental.ResourceConsumption, error) { podList, err := h.client.Pods(h.namespace). List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything()) @@ -118,7 +118,7 @@ func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, s return h.getForPods(resourceName, podNames) } -func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*expapi.ResourceConsumption, error) { +func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*experimental.ResourceConsumption, error) { metricSpec, metricDefined := h.resourceDefinitions[resourceName] if !metricDefined { return nil, fmt.Errorf("heapster metric not defined for %v", resourceName) diff --git a/pkg/controller/autoscaler/metrics/metrics_client_test.go b/pkg/controller/podautoscaler/metrics/metrics_client_test.go similarity index 100% rename from pkg/controller/autoscaler/metrics/metrics_client_test.go rename to pkg/controller/podautoscaler/metrics/metrics_client_test.go diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index 942a1be7676..082abe963e0 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -24,9 +24,9 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index cd58f9e68c2..404099a0271 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -27,8 +27,9 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/labels" @@ -69,6 +70,10 @@ func (f *FakePodControl) CreateReplica(namespace string, spec *api.ReplicationCo return nil } +func (f *FakePodControl) CreateReplicaOnNode(namespace string, daemon *experimental.DaemonSet, nodeName string) error { + return nil +} + func (f *FakePodControl) DeletePod(namespace string, podName string) error { f.lock.Lock() defer f.lock.Unlock() diff --git a/pkg/controller/service/servicecontroller.go b/pkg/controller/service/servicecontroller.go index e92e4b2f270..688283b015f 100644 --- a/pkg/controller/service/servicecontroller.go +++ b/pkg/controller/service/servicecontroller.go @@ -26,9 +26,9 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/types" @@ -378,28 +378,14 @@ func (s *ServiceController) createExternalLoadBalancer(service *api.Service) err return err } name := s.loadBalancerName(service) - if len(service.Spec.ExternalIPs) > 0 { - for _, publicIP := range service.Spec.ExternalIPs { - // TODO: Make this actually work for multiple IPs by using different - // names for each. For now, we'll just create the first and break. - status, err := s.balancer.EnsureTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP), - ports, hostsFromNodeList(&nodes), service.Spec.SessionAffinity) - if err != nil { - return err - } else { - service.Status.LoadBalancer = *status - } - break - } + status, err := s.balancer.EnsureTCPLoadBalancer(name, s.zone.Region, net.ParseIP(service.Spec.LoadBalancerIP), + ports, hostsFromNodeList(&nodes), service.Spec.SessionAffinity) + if err != nil { + return err } else { - status, err := s.balancer.EnsureTCPLoadBalancer(name, s.zone.Region, nil, - ports, hostsFromNodeList(&nodes), service.Spec.SessionAffinity) - if err != nil { - return err - } else { - service.Status.LoadBalancer = *status - } + service.Status.LoadBalancer = *status } + return nil } @@ -477,6 +463,9 @@ func needsUpdate(oldService *api.Service, newService *api.Service) bool { if !portsEqualForLB(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity { return true } + if !loadBalancerIPsAreEqual(oldService, newService) { + return true + } if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) { return true } @@ -689,3 +678,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, func wantsExternalLoadBalancer(service *api.Service) bool { return service.Spec.Type == api.ServiceTypeLoadBalancer } + +func loadBalancerIPsAreEqual(oldService, newService *api.Service) bool { + return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP +} diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index 90c2702bd56..0bb3b5e4884 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -23,8 +23,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 7de46edef14..c127d5f5955 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -24,8 +24,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" apierrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index 681af5ee3cf..bd7c08b4550 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -160,7 +160,7 @@ func NewTestFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return t.Printer, t.Err }, - Validator: func(validate bool) (validation.Schema, error) { + Validator: func(validate bool, cacheDir string) (validation.Schema, error) { return t.Validator, t.Err }, DefaultNamespace: func() (string, bool, error) { @@ -215,7 +215,7 @@ func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return t.Printer, t.Err }, - Validator: func(validate bool) (validation.Schema, error) { + Validator: func(validate bool, cacheDir string) (validation.Schema, error) { return t.Validator, t.Err }, DefaultNamespace: func() (string, bool, error) { diff --git a/pkg/kubectl/cmd/create.go b/pkg/kubectl/cmd/create.go index f0d43ffe6ba..4753340886e 100644 --- a/pkg/kubectl/cmd/create.go +++ b/pkg/kubectl/cmd/create.go @@ -65,7 +65,7 @@ func NewCmdCreate(f *cmdutil.Factory, out io.Writer) *cobra.Command { usage := "Filename, directory, or URL to file to use to create the resource" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) cmd.MarkFlagRequired("filename") - cmdutil.AddValidateFlag(cmd) + cmdutil.AddValidateFlags(cmd) cmdutil.AddOutputFlagsForMutation(cmd) return cmd } @@ -78,7 +78,7 @@ func ValidateArgs(cmd *cobra.Command, args []string) error { } func RunCreate(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, options *CreateOptions) error { - schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate")) + schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) if err != nil { return err } diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index fd17b30ad57..7baaba3967f 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -67,11 +67,16 @@ $ kubectl delete pods --all` ) func NewCmdDelete(f *cmdutil.Factory, out io.Writer) *cobra.Command { - p := kubectl.NewHumanReadablePrinter(false, false, false, false, []string{}) - validArgs := p.HandledResources() - options := &DeleteOptions{} + // retrieve a list of handled resources from printer as valid args + validArgs := []string{} + p, err := f.Printer(nil, false, false, false, false, []string{}) + cmdutil.CheckErr(err) + if p != nil { + validArgs = p.HandledResources() + } + cmd := &cobra.Command{ Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", Short: "Delete resources by filenames, stdin, resources and names, or by resources and label selector.", diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index 790683bd893..4a98991aaf3 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -57,7 +57,7 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &ExposeOptions{} cmd := &cobra.Command{ - Use: "expose (-f FILENAME | TYPE NAME) --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type]", + Use: "expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type]", Short: "Take a replicated application and expose it as Kubernetes Service", Long: expose_long, Example: expose_example, @@ -70,11 +70,11 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("generator", "service/v2", "The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.") cmd.Flags().String("protocol", "TCP", "The network protocol for the service to be created. Default is 'tcp'.") cmd.Flags().Int("port", -1, "The port that the service should serve on. Copied from the resource being exposed, if unspecified") - cmd.MarkFlagRequired("port") cmd.Flags().String("type", "", "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP'.") // TODO: remove create-external-load-balancer in code on or after Aug 25, 2016. cmd.Flags().Bool("create-external-load-balancer", false, "If true, create an external load balancer for this service (trumped by --type). Implementation is cloud provider dependent. Default is 'false'.") cmd.Flags().MarkDeprecated("create-external-load-balancer", "use --type=\"LoadBalancer\" instead") + cmd.Flags().String("load-balancer-ip", "", "IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud-provider specific).") cmd.Flags().String("selector", "", "A label selector to use for this service. If empty (the default) infer the selector from the replication controller.") cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.") cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without creating it.") diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index fea289d95dd..04d8fb96f07 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -74,10 +74,16 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7` // NewCmdGet creates a command object for the generic "get" action, which // retrieves one or more resources from a server. func NewCmdGet(f *cmdutil.Factory, out io.Writer) *cobra.Command { - p := kubectl.NewHumanReadablePrinter(false, false, false, false, []string{}) - validArgs := p.HandledResources() options := &GetOptions{} + // retrieve a list of handled resources from printer as valid args + validArgs := []string{} + p, err := f.Printer(nil, false, false, false, false, []string{}) + cmdutil.CheckErr(err) + if p != nil { + validArgs = p.HandledResources() + } + cmd := &cobra.Command{ Use: "get [(-o|--output=)json|yaml|wide|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]", Short: "Display one or many resources", diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 44255881358..6de412d8e71 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -65,6 +65,8 @@ $ kubectl label pods foo bar-` func NewCmdLabel(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &LabelOptions{} + + // retrieve a list of handled resources from printer as valid args validArgs := []string{} p, err := f.Printer(nil, false, false, false, false, []string{}) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/log.go b/pkg/kubectl/cmd/log.go index d5c601dd24b..b182f3c9e4c 100644 --- a/pkg/kubectl/cmd/log.go +++ b/pkg/kubectl/cmd/log.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + client "k8s.io/kubernetes/pkg/client/unversioned" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/util/sets" ) @@ -145,7 +146,10 @@ func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string if cmdutil.GetFlagBool(cmd, "previous") { previous = true } + return handleLog(client, namespace, podID, container, follow, previous, out) +} +func handleLog(client *client.Client, namespace, podID, container string, follow, previous bool, out io.Writer) error { readCloser, err := client.RESTClient.Get(). Namespace(namespace). Name(podID). diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index ad352707141..34a0fcc2e79 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -81,7 +81,7 @@ func NewCmdReplace(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Bool("cascade", false, "Only relevant during a force replace. If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController).") cmd.Flags().Int("grace-period", -1, "Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative.") cmd.Flags().Duration("timeout", 0, "Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object") - cmdutil.AddValidateFlag(cmd) + cmdutil.AddValidateFlags(cmd) cmdutil.AddOutputFlagsForMutation(cmd) return cmd } @@ -90,7 +90,7 @@ func RunReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []st if len(os.Args) > 1 && os.Args[1] == "update" { printDeprecationWarning("replace", "update") } - schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate")) + schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) if err != nil { return err } @@ -143,7 +143,7 @@ func RunReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []st } func forceReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool, options *ReplaceOptions) error { - schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate")) + schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index b85b0cf6887..1f86bc21b45 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -95,7 +95,7 @@ func NewCmdRollingUpdate(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("deployment-label-key", "deployment", "The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise") cmd.Flags().Bool("dry-run", false, "If true, print out the changes that would be made, but don't actually make them.") cmd.Flags().Bool("rollback", false, "If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout") - cmdutil.AddValidateFlag(cmd) + cmdutil.AddValidateFlags(cmd) cmdutil.AddPrinterFlags(cmd) return cmd } @@ -172,7 +172,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg mapper, typer := f.Object() if len(filename) != 0 { - schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate")) + schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) if err != nil { return err } diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 5d2730967e1..d79f40d3803 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -92,6 +92,8 @@ func NewCmdRun(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *c cmd.Flags().Bool("attach", false, "If true, wait for the Pod to start running, and then attach to the Pod as if 'kubectl attach ...' were called. Default false, unless '-i/--interactive' is set, in which case the default is true.") cmd.Flags().String("restart", "Always", "The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and --replicas must be 1. Default 'Always'") cmd.Flags().Bool("command", false, "If true and extra arguments are present, use them as the 'command' field in the container, rather than the 'args' field which is the default.") + cmd.Flags().String("requests", "", "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'") + cmd.Flags().String("limits", "", "The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'") return cmd } @@ -239,14 +241,15 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob return nil } -func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) error { +func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) (status api.PodPhase, err error) { for { pod, err := c.Pods(pod.Namespace).Get(pod.Name) if err != nil { - return err + return api.PodUnknown, err } + ready := false if pod.Status.Phase == api.PodRunning { - ready := true + ready = true for _, status := range pod.Status.ContainerStatuses { if !status.Ready { ready = false @@ -254,10 +257,13 @@ func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) error { } } if ready { - return nil + return api.PodRunning, nil } } - fmt.Fprintf(out, "Waiting for pod %s/%s to be running\n", pod.Namespace, pod.Name) + if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { + return pod.Status.Phase, nil + } + fmt.Fprintf(out, "Waiting for pod %s/%s to be running, status is %s, pod ready: %v\n", pod.Namespace, pod.Name, pod.Status.Phase, ready) time.Sleep(2 * time.Second) continue } @@ -280,9 +286,13 @@ func handleAttachReplicationController(c *client.Client, controller *api.Replica } func handleAttachPod(c *client.Client, pod *api.Pod, opts *AttachOptions) error { - if err := waitForPodRunning(c, pod, opts.Out); err != nil { + status, err := waitForPodRunning(c, pod, opts.Out) + if err != nil { return err } + if status == api.PodSucceeded || status == api.PodFailed { + return handleLog(c, pod.Namespace, pod.Name, pod.Spec.Containers[0].Name, false, false, opts.Out) + } opts.Client = c opts.PodName = pod.Name opts.Namespace = pod.Namespace diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index cdef2e17258..6020d967f7f 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -76,8 +76,8 @@ func NewCmdScale(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to scale.") cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.") cmd.Flags().Int("replicas", -1, "The new desired number of replicas. Required.") - cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a scale operation, zero means don't wait.") cmd.MarkFlagRequired("replicas") + cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a scale operation, zero means don't wait.") cmdutil.AddOutputFlagsForMutation(cmd) usage := "Filename, directory, or URL to a file identifying the replication controller to set a new size" @@ -93,7 +93,7 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri count := cmdutil.GetFlagInt(cmd, "replicas") if count < 0 { - return cmdutil.UsageError(cmd, "--replicas=COUNT TYPE NAME") + return cmdutil.UsageError(cmd, "--replicas=COUNT is required, and COUNT must be greater than or equal to 0") } cmdNamespace, enforceNamespace, err := f.DefaultNamespace() diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 3c16f49f4a5..2d90df2ade5 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -17,10 +17,13 @@ limitations under the License. package util import ( + "bytes" "flag" "fmt" "io" + "io/ioutil" "os" + "path" "strconv" "github.com/spf13/cobra" @@ -76,7 +79,7 @@ type Factory struct { // LabelsForObject returns the labels associated with the provided object LabelsForObject func(object runtime.Object) (map[string]string, error) // Returns a schema that can validate objects stored on disk. - Validator func(validate bool) (validation.Schema, error) + Validator func(validate bool, cacheDir string) (validation.Schema, error) // Returns the default namespace to use in cases where no // other namespace is specified and whether the namespace was // overriden. @@ -214,13 +217,25 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { } return kubectl.ReaperFor(mapping.Kind, client) }, - Validator: func(validate bool) (validation.Schema, error) { + Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } - return &clientSwaggerSchema{client, client.ExperimentalClient, api.Scheme}, nil + dir := cacheDir + if len(dir) > 0 { + version, err := client.ServerVersion() + if err != nil { + return nil, err + } + dir = path.Join(cacheDir, version.String()) + } + return &clientSwaggerSchema{ + c: client, + ec: client.ExperimentalClient, + cacheDir: dir, + }, nil } return validation.NullSchema{}, nil }, @@ -273,18 +288,49 @@ func getServicePorts(spec api.ServiceSpec) []string { } type clientSwaggerSchema struct { - c *client.Client - ec *client.ExperimentalClient - t runtime.ObjectTyper + c *client.Client + ec *client.ExperimentalClient + cacheDir string } -func getSchemaAndValidate(c *client.RESTClient, data []byte, group, version string) error { - schemaData, err := c.Get(). - AbsPath("/swaggerapi", group, version). - Do(). - Raw() - if err != nil { - return err +const schemaFileName = "schema.json" + +type schemaClient interface { + Get() *client.Request +} + +func getSchemaAndValidate(c schemaClient, data []byte, group, version, cacheDir string) (err error) { + var schemaData []byte + cacheFile := path.Join(cacheDir, group, version, schemaFileName) + + if len(cacheDir) != 0 { + if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) { + return err + } + } + if schemaData == nil { + schemaData, err = c.Get(). + AbsPath("/swaggerapi", group, version). + Do(). + Raw() + if err != nil { + return err + } + if len(cacheDir) != 0 { + if err = os.MkdirAll(path.Join(cacheDir, group, version), 0755); err != nil { + return err + } + tmpFile, err := ioutil.TempFile(cacheDir, "schema") + if err != nil { + return err + } + if _, err := io.Copy(tmpFile, bytes.NewBuffer(schemaData)); err != nil { + return err + } + if err := os.Link(tmpFile.Name(), cacheFile); err != nil && !os.IsExist(err) { + return err + } + } } schema, err := validation.NewSwaggerSchemaFromBytes(schemaData) if err != nil { @@ -305,9 +351,9 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { // If experimental fails, return error from stable api. // TODO: Figure out which group to try once multiple group support is merged // instead of trying everything. - err = getSchemaAndValidate(c.c.RESTClient, data, "api", version) + err = getSchemaAndValidate(c.c.RESTClient, data, "api", version, c.cacheDir) if err != nil && c.ec != nil { - errExp := getSchemaAndValidate(c.ec.RESTClient, data, "experimental", version) + errExp := getSchemaAndValidate(c.ec.RESTClient, data, "experimental", version, c.cacheDir) if errExp == nil { return nil } diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index f8213103c28..c4b5c917994 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -17,10 +17,20 @@ limitations under the License. package util import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "os" + "path" "sort" + "strings" "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/validation" + client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/kubectl" @@ -166,3 +176,101 @@ func TestFlagUnderscoreRenaming(t *testing.T) { t.Fatalf("Expected flag name to be valid-flag, got %s", factory.flags.Lookup("valid_flag").Name) } } + +func loadSchemaForTest() (validation.Schema, error) { + pathToSwaggerSpec := "../../../../api/swagger-spec/" + testapi.Default.Version() + ".json" + data, err := ioutil.ReadFile(pathToSwaggerSpec) + if err != nil { + return nil, err + } + return validation.NewSwaggerSchemaFromBytes(data) +} + +func TestValidateCachesSchema(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Error loading schema: %v", err) + t.FailNow() + } + output, err := json.Marshal(schema) + if err != nil { + t.Errorf("Error serializing schema: %v", err) + t.FailNow() + } + requests := map[string]int{} + + c := &client.FakeRESTClient{ + Codec: testapi.Default.Codec(), + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case strings.HasPrefix(p, "/swaggerapi") && m == "GET": + requests[p] = requests[p] + 1 + return &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + dir := os.TempDir() + "/schemaCache" + os.RemoveAll(dir) + + obj := &api.Pod{} + data, err := testapi.Default.Codec().Encode(obj) + if err != nil { + t.Errorf("unexpected error: %v", err) + t.FailNow() + } + + // Initial request, should use HTTP and write + if getSchemaAndValidate(c, data, "foo", "bar", dir); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if _, err := os.Stat(path.Join(dir, "foo", "bar", schemaFileName)); err != nil { + t.Errorf("unexpected missing cache file: %v", err) + } + if requests["/swaggerapi/foo/bar"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"]) + } + + // Same version and group, should skip HTTP + if getSchemaAndValidate(c, data, "foo", "bar", dir); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if requests["/swaggerapi/foo/bar"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"]) + } + + // Different API group, should go to HTTP and write + if getSchemaAndValidate(c, data, "foo", "baz", dir); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if _, err := os.Stat(path.Join(dir, "foo", "baz", schemaFileName)); err != nil { + t.Errorf("unexpected missing cache file: %v", err) + } + if requests["/swaggerapi/foo/baz"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/baz"]) + } + + // Different version, should go to HTTP and write + if getSchemaAndValidate(c, data, "foo2", "bar", dir); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if _, err := os.Stat(path.Join(dir, "foo2", "bar", schemaFileName)); err != nil { + t.Errorf("unexpected missing cache file: %v", err) + } + if requests["/swaggerapi/foo2/bar"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo2/bar"]) + } + + // No cache dir, should go straight to HTTP and not write + if getSchemaAndValidate(c, data, "foo", "blah", ""); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if requests["/swaggerapi/foo/blah"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/blah"]) + } + if _, err := os.Stat(path.Join(dir, "foo", "blah", schemaFileName)); err == nil || !os.IsNotExist(err) { + t.Errorf("unexpected cache file error: %v", err) + } +} diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 4b39f616f3b..3a2b07a42ee 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -268,8 +268,9 @@ func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { return d } -func AddValidateFlag(cmd *cobra.Command) { +func AddValidateFlags(cmd *cobra.Command) { cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it") + cmd.Flags().String("schema-cache-dir", "/tmp/kubectl.schema", "If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'") } func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { diff --git a/pkg/kubectl/custom_column_printer.go b/pkg/kubectl/custom_column_printer.go index bbbb16e1444..e48003fac60 100644 --- a/pkg/kubectl/custom_column_printer.go +++ b/pkg/kubectl/custom_column_printer.go @@ -17,9 +17,12 @@ limitations under the License. package kubectl import ( + "bufio" + "bytes" "fmt" "io" "reflect" + "regexp" "strings" "text/tabwriter" @@ -35,6 +38,105 @@ const ( flags = 0 ) +var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") + +// MassageJSONPath attempts to be flexible with JSONPath expressions, it accepts: +// * metadata.name (no leading '.' or curly brances '{...}' +// * {metadata.name} (no leading '.') +// * .metadata.name (no curly braces '{...}') +// * {.metadata.name} (complete expression) +// And transforms them all into a valid jsonpat expression: +// {.metadata.name} +func massageJSONPath(pathExpression string) (string, error) { + if len(pathExpression) == 0 { + return pathExpression, nil + } + submatches := jsonRegexp.FindStringSubmatch(pathExpression) + if submatches == nil { + return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") + } + if len(submatches) != 3 { + return "", fmt.Errorf("unexpected submatch list: %v", submatches) + } + var fieldSpec string + if len(submatches[1]) != 0 { + fieldSpec = submatches[1] + } else { + fieldSpec = submatches[2] + } + return fmt.Sprintf("{.%s}", fieldSpec), nil +} + +// NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of
: pairs. +// e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: +// +// NAME API_VERSION +// foo bar +func NewCustomColumnsPrinterFromSpec(spec string) (*CustomColumnsPrinter, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("custom-columns format specified but no custom columns given") + } + parts := strings.Split(spec, ",") + columns := make([]Column, len(parts)) + for ix := range parts { + colSpec := strings.Split(parts[ix], ":") + if len(colSpec) != 2 { + return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected
:", parts[ix]) + } + spec, err := massageJSONPath(colSpec[1]) + if err != nil { + return nil, err + } + columns[ix] = Column{Header: colSpec[0], FieldSpec: spec} + } + return &CustomColumnsPrinter{Columns: columns}, nil +} + +func splitOnWhitespace(line string) []string { + lineScanner := bufio.NewScanner(bytes.NewBufferString(line)) + lineScanner.Split(bufio.ScanWords) + result := []string{} + for lineScanner.Scan() { + result = append(result, lineScanner.Text()) + } + return result +} + +// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected +// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec +// For example the template below: +// NAME API_VERSION +// {metadata.name} {apiVersion} +func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader) (*CustomColumnsPrinter, error) { + scanner := bufio.NewScanner(templateReader) + if !scanner.Scan() { + return nil, fmt.Errorf("invalid template, missing header line. Expected format is one line of space separated headers, one line of space separated column specs.") + } + headers := splitOnWhitespace(scanner.Text()) + + if !scanner.Scan() { + return nil, fmt.Errorf("invalid template, missing spec line. Expected format is one line of space separated headers, one line of space separated column specs.") + } + specs := splitOnWhitespace(scanner.Text()) + + if len(headers) != len(specs) { + return nil, fmt.Errorf("number of headers (%d) and field specifications (%d) don't match", len(headers), len(specs)) + } + + columns := make([]Column, len(headers)) + for ix := range headers { + spec, err := massageJSONPath(specs[ix]) + if err != nil { + return nil, err + } + columns[ix] = Column{ + Header: headers[ix], + FieldSpec: spec, + } + } + return &CustomColumnsPrinter{Columns: columns}, nil +} + // Column represents a user specified column type Column struct { // The header to print above the column, general style is ALL_CAPS @@ -105,3 +207,7 @@ func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jso fmt.Fprintln(out, strings.Join(columns, "\t")) return nil } + +func (s *CustomColumnsPrinter) HandledResources() []string { + return []string{} +} diff --git a/pkg/kubectl/custom_column_printer_test.go b/pkg/kubectl/custom_column_printer_test.go index 62c3aea9dc6..ac9fc9699ea 100644 --- a/pkg/kubectl/custom_column_printer_test.go +++ b/pkg/kubectl/custom_column_printer_test.go @@ -18,12 +18,192 @@ package kubectl import ( "bytes" + "reflect" "testing" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" ) +func TestMassageJSONPath(t *testing.T) { + tests := []struct { + input string + expectedOutput string + expectErr bool + }{ + {input: "foo.bar", expectedOutput: "{.foo.bar}"}, + {input: "{foo.bar}", expectedOutput: "{.foo.bar}"}, + {input: ".foo.bar", expectedOutput: "{.foo.bar}"}, + {input: "{.foo.bar}", expectedOutput: "{.foo.bar}"}, + {input: "", expectedOutput: ""}, + {input: "{foo.bar", expectErr: true}, + {input: "foo.bar}", expectErr: true}, + {input: "{foo.bar}}", expectErr: true}, + {input: "{{foo.bar}", expectErr: true}, + } + for _, test := range tests { + output, err := massageJSONPath(test.input) + if err != nil && !test.expectErr { + t.Errorf("unexpected error: %v", err) + continue + } + if test.expectErr { + if err == nil { + t.Error("unexpected non-error") + } + continue + } + if output != test.expectedOutput { + t.Errorf("input: %s, expected: %s, saw: %s", test.input, test.expectedOutput, output) + } + } +} + +func TestNewColumnPrinterFromSpec(t *testing.T) { + tests := []struct { + spec string + expectedColumns []Column + expectErr bool + name string + }{ + { + spec: "", + expectErr: true, + name: "empty", + }, + { + spec: "invalid", + expectErr: true, + name: "invalid1", + }, + { + spec: "invalid=foobar", + expectErr: true, + name: "invalid2", + }, + { + spec: "invalid,foobar:blah", + expectErr: true, + name: "invalid3", + }, + { + spec: "NAME:metadata.name,API_VERSION:apiVersion", + name: "ok", + expectedColumns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + }, + } + for _, test := range tests { + printer, err := NewCustomColumnsPrinterFromSpec(test.spec) + if test.expectErr { + if err == nil { + t.Errorf("[%s] unexpected non-error", test.name) + } + continue + } + if !test.expectErr && err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + + if !reflect.DeepEqual(test.expectedColumns, printer.Columns) { + t.Errorf("[%s]\nexpected:\n%v\nsaw:\n%v\n", test.name, test.expectedColumns, printer.Columns) + } + + } +} + +const exampleTemplateOne = `NAME API_VERSION +{metadata.name} {apiVersion}` + +const exampleTemplateTwo = `NAME API_VERSION + {metadata.name} {apiVersion}` + +func TestNewColumnPrinterFromTemplate(t *testing.T) { + tests := []struct { + spec string + expectedColumns []Column + expectErr bool + name string + }{ + { + spec: "", + expectErr: true, + name: "empty", + }, + { + spec: "invalid", + expectErr: true, + name: "invalid1", + }, + { + spec: "invalid=foobar", + expectErr: true, + name: "invalid2", + }, + { + spec: "invalid,foobar:blah", + expectErr: true, + name: "invalid3", + }, + { + spec: exampleTemplateOne, + name: "ok", + expectedColumns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + }, + { + spec: exampleTemplateTwo, + name: "ok-2", + expectedColumns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + }, + } + for _, test := range tests { + reader := bytes.NewBufferString(test.spec) + printer, err := NewCustomColumnsPrinterFromTemplate(reader) + if test.expectErr { + if err == nil { + t.Errorf("[%s] unexpected non-error", test.name) + } + continue + } + if !test.expectErr && err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + + if !reflect.DeepEqual(test.expectedColumns, printer.Columns) { + t.Errorf("[%s]\nexpected:\n%v\nsaw:\n%v\n", test.name, test.expectedColumns, printer.Columns) + } + + } +} + func TestColumnPrint(t *testing.T) { tests := []struct { columns []Column diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 63a078fbf47..310c0acb230 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -31,6 +31,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/sets" @@ -460,6 +461,9 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec)) fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) + if pod.Status.StartTime != nil { + fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) + } fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(pod.Labels)) if pod.DeletionTimestamp != nil { fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z)) @@ -707,7 +711,17 @@ func describeContainers(pod *api.Pod, out io.Writer) { state := status.State fmt.Fprintf(out, " %v:\n", container.Name) + fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID) fmt.Fprintf(out, " Image:\t%s\n", container.Image) + fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID) + + resourceToQoS := qosutil.GetQoS(&container) + if len(resourceToQoS) > 0 { + fmt.Fprintf(out, " QoS Tier:\n") + } + for resource, qos := range resourceToQoS { + fmt.Fprintf(out, " %s:\t%s\n", resource, qos) + } if len(container.Resources.Limits) > 0 { fmt.Fprintf(out, " Limits:\n") @@ -716,6 +730,13 @@ func describeContainers(pod *api.Pod, out io.Writer) { fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) } + if len(container.Resources.Requests) > 0 { + fmt.Fprintf(out, " Requests:\n") + } + for name, quantity := range container.Resources.Requests { + fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) + } + describeStatus("State", state, out) if status.LastTerminationState.Terminated != nil { describeStatus("Last Termination State", status.LastTerminationState, out) @@ -1065,8 +1086,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin fmt.Fprintf(out, "Name:\t%s\n", node.Name) fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(node.Labels)) fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase) if len(node.Status.Conditions) > 0 { fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") + fmt.Fprint(out, " ────\t──────\t─────────────────\t──────────────────\t──────\t───────\n") for _, c := range node.Status.Conditions { fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", c.Type, @@ -1089,18 +1112,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin } } - runningPods := filterNonRunningPods(pods) - reqs, err := getPodsTotalRequests(runningPods) - if err != nil { - return err - } - fmt.Fprintf(out, "Allocated resources (total requests):\n") - for reqResource, reqValue := range reqs { - fmt.Fprintf(out, " %s:\t%s\n", reqResource, reqValue.String()) - } - fmt.Fprintf(out, " pods:\t%d\n", len(runningPods)) - - fmt.Fprintf(out, "Version:\n") + fmt.Fprintf(out, "System Info:\n") + fmt.Fprintf(out, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) + fmt.Fprintf(out, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) + fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OsImage) fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) @@ -1113,34 +1128,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin if len(node.Spec.ExternalID) > 0 { fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID) } - fmt.Fprintf(out, "Pods:\t(%d in total)\n", len(pods)) - fmt.Fprint(out, " Namespace\tName\t\tCPU(milliCPU)\t\tMemory(bytes)\n") - totalMilliCPU := int64(0) - totalMemory := int64(0) - fractionPodCPU := float64(0) - fractionPodMemory := float64(0) - fractionTotalCPU := float64(0) - fractionTotalMemory := float64(0) - for _, pod := range pods { - podTotalMilliCPU := int64(0) - podTotalMemory := int64(0) - - for ix := range pod.Spec.Containers { - limits := pod.Spec.Containers[ix].Resources.Limits - podTotalMilliCPU += limits.Cpu().MilliValue() - podTotalMemory += limits.Memory().Value() - } - totalMilliCPU += podTotalMilliCPU - totalMemory += podTotalMemory - fractionPodCPU = float64(podTotalMilliCPU) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionPodMemory = float64(podTotalMemory) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " %s\t%s\t\t%d (%d%% of total)\t\t%d (%d%% of total)\n", pod.Namespace, pod.Name, podTotalMilliCPU, int64(fractionPodCPU), podTotalMemory, int64(fractionPodMemory)) + if err := describeNodeResource(pods, node, out); err != nil { + return err } - fmt.Fprint(out, "TotalResourceLimits:\n") - fractionTotalCPU = float64(totalMilliCPU) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionTotalMemory = float64(totalMemory) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " CPU(milliCPU):\t\t%d (%d%% of total)\n", totalMilliCPU, int64(fractionTotalCPU)) - fmt.Fprintf(out, " Memory(bytes):\t\t%d (%d%% of total)\n", totalMemory, int64(fractionTotalMemory)) + if events != nil { DescribeEvents(events, out) } @@ -1197,7 +1188,44 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str }) } -func filterNonRunningPods(pods []*api.Pod) []*api.Pod { +func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) error { + nonTerminatedPods := filterTerminatedPods(pods) + fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nonTerminatedPods)) + fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, " ─────────\t────\t\t────────────\t──────────\t───────────────\t─────────────\n") + for _, pod := range nonTerminatedPods { + req, limit, err := getSinglePodTotalRequestsAndLimits(pod) + if err != nil { + return err + } + cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] + fractionCpuReq := float64(cpuReq.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionMemoryReq := float64(memoryReq.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fractionMemoryLimit := float64(memoryLimit.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fmt.Fprintf(out, " %s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, + cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) + } + + fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100%, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, " ────────────\t──────────\t───────────────\t─────────────\n") + reqs, limits, err := getPodsTotalRequestsAndLimits(nonTerminatedPods) + if err != nil { + return err + } + cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] + fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionMemoryReqs := float64(memoryReqs.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fractionMemoryLimits := float64(memoryLimits.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fmt.Fprintf(out, " %s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", + cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits), + memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + return nil +} + +func filterTerminatedPods(pods []*api.Pod) []*api.Pod { if len(pods) == 0 { return pods } @@ -1211,36 +1239,50 @@ func filterNonRunningPods(pods []*api.Pod) []*api.Pod { return result } -func getPodsTotalRequests(pods []*api.Pod) (map[api.ResourceName]resource.Quantity, error) { - reqs := map[api.ResourceName]resource.Quantity{} +func getPodsTotalRequestsAndLimits(pods []*api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { + reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} for _, pod := range pods { - podReqs, err := getSinglePodTotalRequests(pod) + podReqs, podLimits, err := getSinglePodTotalRequestsAndLimits(pod) if err != nil { - return nil, err + return nil, nil, err } for podReqName, podReqValue := range podReqs { if value, ok := reqs[podReqName]; !ok { - reqs[podReqName] = podReqValue + reqs[podReqName] = *podReqValue.Copy() } else if err = value.Add(podReqValue); err != nil { - return nil, err + return nil, nil, err + } + } + for podLimitName, podLimitValue := range podLimits { + if value, ok := limits[podLimitName]; !ok { + limits[podLimitName] = *podLimitValue.Copy() + } else if err = value.Add(podLimitValue); err != nil { + return nil, nil, err } } } - return reqs, nil + return } -func getSinglePodTotalRequests(pod *api.Pod) (map[api.ResourceName]resource.Quantity, error) { - reqs := map[api.ResourceName]resource.Quantity{} +func getSinglePodTotalRequestsAndLimits(pod *api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { + reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} for _, container := range pod.Spec.Containers { for name, quantity := range container.Resources.Requests { if value, ok := reqs[name]; !ok { - reqs[name] = quantity - } else if err := value.Add(quantity); err != nil { - return nil, err + reqs[name] = *quantity.Copy() + } else if err = value.Add(quantity); err != nil { + return nil, nil, err + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else if err = value.Add(quantity); err != nil { + return nil, nil, err } } } - return reqs, nil + return } func DescribeEvents(el *api.EventList, w io.Writer) { @@ -1250,6 +1292,7 @@ func DescribeEvents(el *api.EventList, w io.Writer) { } sort.Sort(SortableEvents(el.Items)) fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tReason\tMessage\n") + fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t──────\t───────\n") for _, e := range el.Items { fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\n", translateTimestamp(e.FirstTimestamp), diff --git a/pkg/kubectl/describe_test.go b/pkg/kubectl/describe_test.go index db79895fa3f..131bb7f2e2d 100644 --- a/pkg/kubectl/describe_test.go +++ b/pkg/kubectl/describe_test.go @@ -339,8 +339,8 @@ func TestDefaultDescribers(t *testing.T) { func TestGetPodsTotalRequests(t *testing.T) { testCases := []struct { - pods []*api.Pod - expectedReqs map[api.ResourceName]resource.Quantity + pods []*api.Pod + expectedReqs, expectedLimits map[api.ResourceName]resource.Quantity }{ { pods: []*api.Pod{ @@ -402,7 +402,7 @@ func TestGetPodsTotalRequests(t *testing.T) { } for _, testCase := range testCases { - reqs, err := getPodsTotalRequests(testCase.pods) + reqs, _, err := getPodsTotalRequestsAndLimits(testCase.pods) if err != nil { t.Errorf("Unexpected error %v", err) } diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index c847d7e4ed8..e8c18d13385 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "io/ioutil" + "os" "reflect" "sort" "strings" @@ -35,8 +36,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" @@ -100,6 +101,19 @@ func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { if err != nil { return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) } + case "custom-columns": + var err error + if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument); err != nil { + return nil, false, err + } + case "custom-columns-file": + file, err := os.Open(formatArgument) + if err != nil { + return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) + } + if printer, err = NewCustomColumnsPrinterFromTemplate(file); err != nil { + return nil, false, err + } case "wide": fallthrough case "": @@ -505,6 +519,10 @@ func translateTimestamp(timestamp util.Time) string { } func printPod(pod *api.Pod, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + return printPodBase(pod, w, withNamespace, wide, showAll, true, columnLabels) +} + +func printPodBase(pod *api.Pod, w io.Writer, withNamespace bool, wide bool, showAll bool, showIfTerminating bool, columnLabels []string) error { name := pod.Name namespace := pod.Namespace @@ -514,7 +532,7 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool, wide bool, showAll reason := string(pod.Status.Phase) // if not printing all pods, skip terminated pods (default) - if !showAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) { + if !showIfTerminating && !showAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) { return nil } if pod.Status.Reason != "" { @@ -574,7 +592,7 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool, wide bool, showAll func printPodList(podList *api.PodList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { for _, pod := range podList.Items { - if err := printPod(&pod, w, withNamespace, wide, showAll, columnLabels); err != nil { + if err := printPodBase(&pod, w, withNamespace, wide, showAll, false, columnLabels); err != nil { return err } } @@ -1118,7 +1136,7 @@ func printComponentStatusList(list *api.ComponentStatusList, w io.Writer, withNa return nil } -func printThirdPartyResource(rsrc *expapi.ThirdPartyResource, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { +func printThirdPartyResource(rsrc *experimental.ThirdPartyResource, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { versions := make([]string, len(rsrc.Versions)) for ix := range rsrc.Versions { version := &rsrc.Versions[ix] @@ -1131,7 +1149,7 @@ func printThirdPartyResource(rsrc *expapi.ThirdPartyResource, w io.Writer, withN return nil } -func printThirdPartyResourceList(list *expapi.ThirdPartyResourceList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { +func printThirdPartyResourceList(list *experimental.ThirdPartyResourceList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { for _, item := range list.Items { if err := printThirdPartyResource(&item, w, withNamespace, wide, showAll, columnLabels); err != nil { return err @@ -1141,7 +1159,7 @@ func printThirdPartyResourceList(list *expapi.ThirdPartyResourceList, w io.Write return nil } -func printDeployment(deployment *expapi.Deployment, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { +func printDeployment(deployment *experimental.Deployment, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { if withNamespace { if _, err := fmt.Fprintf(w, "%s\t", deployment.Namespace); err != nil { return err @@ -1157,7 +1175,7 @@ func printDeployment(deployment *expapi.Deployment, w io.Writer, withNamespace b return err } -func printDeploymentList(list *expapi.DeploymentList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { +func printDeploymentList(list *experimental.DeploymentList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { for _, item := range list.Items { if err := printDeployment(&item, w, withNamespace, wide, showAll, columnLabels); err != nil { return err @@ -1166,7 +1184,7 @@ func printDeploymentList(list *expapi.DeploymentList, w io.Writer, withNamespace return nil } -func printHorizontalPodAutoscaler(hpa *expapi.HorizontalPodAutoscaler, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { +func printHorizontalPodAutoscaler(hpa *experimental.HorizontalPodAutoscaler, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { namespace := hpa.Namespace name := hpa.Name reference := fmt.Sprintf("%s/%s/%s/%s", @@ -1203,7 +1221,7 @@ func printHorizontalPodAutoscaler(hpa *expapi.HorizontalPodAutoscaler, w io.Writ return err } -func printHorizontalPodAutoscalerList(list *expapi.HorizontalPodAutoscalerList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { +func printHorizontalPodAutoscalerList(list *experimental.HorizontalPodAutoscalerList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { for i := range list.Items { if err := printHorizontalPodAutoscaler(&list.Items[i], w, withNamespace, wide, showAll, columnLabels); err != nil { return err diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index bb76beb692d..72f24fc01b3 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" @@ -1235,22 +1235,22 @@ func TestTranslateTimestamp(t *testing.T) { func TestPrintDeployment(t *testing.T) { tests := []struct { - deployment expapi.Deployment + deployment experimental.Deployment expect string }{ { - expapi.Deployment{ + experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Name: "test1", CreationTimestamp: util.Time{Time: time.Now().Add(1.9e9)}, }, - Spec: expapi.DeploymentSpec{ + Spec: experimental.DeploymentSpec{ Replicas: 5, Template: &api.PodTemplateSpec{ Spec: api.PodSpec{Containers: make([]api.Container, 2)}, }, }, - Status: expapi.DeploymentStatus{ + Status: experimental.DeploymentStatus{ Replicas: 10, UpdatedReplicas: 2, }, diff --git a/pkg/kubectl/run.go b/pkg/kubectl/run.go index 233cbbe9ba9..be8e843f39f 100644 --- a/pkg/kubectl/run.go +++ b/pkg/kubectl/run.go @@ -22,6 +22,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" ) @@ -42,9 +43,51 @@ func (BasicReplicationController) ParamNames() []GeneratorParam { {"command", false}, {"args", false}, {"env", false}, + {"requests", false}, + {"limits", false}, } } +// populateResourceList takes strings of form =,= +func populateResourceList(spec string) (api.ResourceList, error) { + // empty input gets a nil response to preserve generator test expected behaviors + if spec == "" { + return nil, nil + } + + result := api.ResourceList{} + resourceStatements := strings.Split(spec, ",") + for _, resourceStatement := range resourceStatements { + parts := strings.Split(resourceStatement, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) + } + resourceName := api.ResourceName(parts[0]) + resourceQuantity, err := resource.ParseQuantity(parts[1]) + if err != nil { + return nil, err + } + result[resourceName] = *resourceQuantity + } + return result, nil +} + +// HandleResourceRequirements parses the limits and requests parameters if specified +func HandleResourceRequirements(params map[string]string) (api.ResourceRequirements, error) { + result := api.ResourceRequirements{} + limits, err := populateResourceList(params["limits"]) + if err != nil { + return result, err + } + result.Limits = limits + requests, err := populateResourceList(params["requests"]) + if err != nil { + return result, err + } + result.Requests = requests + return result, nil +} + func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) { stdin, err := GetBool(params, "stdin", false) if err != nil { @@ -56,13 +99,19 @@ func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) { return nil, err } + resourceRequirements, err := HandleResourceRequirements(params) + if err != nil { + return nil, err + } + spec := api.PodSpec{ Containers: []api.Container{ { - Name: name, - Image: params["image"], - Stdin: stdin, - TTY: tty, + Name: name, + Image: params["image"], + Stdin: stdin, + TTY: tty, + Resources: resourceRequirements, }, }, } @@ -223,6 +272,8 @@ func (BasicPod) ParamNames() []GeneratorParam { {"command", false}, {"args", false}, {"env", false}, + {"requests", false}, + {"limits", false}, } } @@ -288,6 +339,11 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, return nil, err } + resourceRequirements, err := HandleResourceRequirements(params) + if err != nil { + return nil, err + } + restartPolicy := api.RestartPolicy(params["restart"]) if len(restartPolicy) == 0 { restartPolicy = api.RestartPolicyAlways @@ -305,6 +361,7 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, ImagePullPolicy: api.PullIfNotPresent, Stdin: stdin, TTY: tty, + Resources: resourceRequirements, }, }, DNSPolicy: api.DNSClusterFirst, diff --git a/pkg/kubectl/run_test.go b/pkg/kubectl/run_test.go index 3b187eba801..e16f3edd329 100644 --- a/pkg/kubectl/run_test.go +++ b/pkg/kubectl/run_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" ) func TestGenerate(t *testing.T) { @@ -286,6 +287,92 @@ func TestGenerate(t *testing.T) { }, }, }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "hostport": "80", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu100m,memory=100Mi", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu=100m&memory=100Mi", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu=", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu=100m,memory=100Mi", + "limits": "cpu=400m,memory=200Mi", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"foo": "bar", "baz": "blah"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: api.ResourceList{ + api.ResourceCPU: resource.MustParse("400m"), + api.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, } generator := BasicReplicationController{} for _, test := range tests { diff --git a/pkg/kubectl/service.go b/pkg/kubectl/service.go index 927fa1e820a..172dc6721ca 100644 --- a/pkg/kubectl/service.go +++ b/pkg/kubectl/service.go @@ -55,6 +55,7 @@ func paramNames() []GeneratorParam { {"labels", false}, {"external-ip", false}, {"create-external-load-balancer", false}, + {"load-balancer-ip", false}, {"type", false}, {"protocol", false}, {"container-port", false}, // alias of target-port @@ -149,6 +150,9 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { if len(params["type"]) != 0 { service.Spec.Type = api.ServiceType(params["type"]) } + if service.Spec.Type == api.ServiceTypeLoadBalancer { + service.Spec.LoadBalancerIP = params["load-balancer-ip"] + } if len(params["session-affinity"]) != 0 { switch api.ServiceAffinity(params["session-affinity"]) { case api.ServiceAffinityNone: diff --git a/pkg/kubelet/config/apiserver.go b/pkg/kubelet/config/apiserver.go index 8e295ea33ee..c441b3f0d55 100644 --- a/pkg/kubelet/config/apiserver.go +++ b/pkg/kubelet/config/apiserver.go @@ -19,8 +19,8 @@ package config import ( "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubelet" ) diff --git a/pkg/kubelet/config/apiserver_test.go b/pkg/kubelet/config/apiserver_test.go index 3ffdcc438a8..11b3f982876 100644 --- a/pkg/kubelet/config/apiserver_test.go +++ b/pkg/kubelet/config/apiserver_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 5d0e5bc4722..c174e46ac18 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -24,7 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types" diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index c3d39202f8f..fac24eca7ab 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -21,7 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/securitycontext" diff --git a/pkg/kubelet/container/image_puller.go b/pkg/kubelet/container/image_puller.go index 95c0ae04c54..b9268a0adcf 100644 --- a/pkg/kubelet/container/image_puller.go +++ b/pkg/kubelet/container/image_puller.go @@ -21,7 +21,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" ) // imagePuller pulls the image using Runtime.PullImage(). @@ -64,11 +64,11 @@ func (puller *imagePuller) reportImagePull(ref *api.ObjectReference, event strin switch event { case "pulling": - puller.recorder.Eventf(ref, "pulling", "Pulling image %q", image) + puller.recorder.Eventf(ref, "Pulling", "Pulling image %q", image) case "pulled": - puller.recorder.Eventf(ref, "pulled", "Successfully pulled image %q", image) + puller.recorder.Eventf(ref, "Pulled", "Successfully pulled image %q", image) case "failed": - puller.recorder.Eventf(ref, "failed", "Failed to pull image %q: %v", image, pullError) + puller.recorder.Eventf(ref, "Failed", "Failed to pull image %q: %v", image, pullError) } } @@ -82,14 +82,14 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul present, err := puller.runtime.IsImagePresent(spec) if err != nil { if ref != nil { - puller.recorder.Eventf(ref, "failed", "Failed to inspect image %q: %v", container.Image, err) + puller.recorder.Eventf(ref, "Failed", "Failed to inspect image %q: %v", container.Image, err) } return fmt.Errorf("failed to inspect image %q: %v", container.Image, err) } if !shouldPullImage(container, present) { if present && ref != nil { - puller.recorder.Eventf(ref, "pulled", "Container image %q already present on machine", container.Image) + puller.recorder.Eventf(ref, "Pulled", "Container image %q already present on machine", container.Image) } return nil } diff --git a/pkg/kubelet/dockertools/docker.go b/pkg/kubelet/dockertools/docker.go index d96c201641e..e2632a4fa3e 100644 --- a/pkg/kubelet/dockertools/docker.go +++ b/pkg/kubelet/dockertools/docker.go @@ -130,7 +130,7 @@ func filterHTTPError(err error, image string) error { jerr.Code == http.StatusServiceUnavailable || jerr.Code == http.StatusGatewayTimeout) { glog.V(2).Infof("Pulling image %q failed: %v", image, err) - return fmt.Errorf("image pull failed for %s because the registry is temporarily unavailbe.", image) + return fmt.Errorf("image pull failed for %s because the registry is temporarily unavailable.", image) } else { return err } diff --git a/pkg/kubelet/dockertools/docker_test.go b/pkg/kubelet/dockertools/docker_test.go index 760b8f63a5c..867cae23cc3 100644 --- a/pkg/kubelet/dockertools/docker_test.go +++ b/pkg/kubelet/dockertools/docker_test.go @@ -29,7 +29,7 @@ import ( docker "github.com/fsouza/go-dockerclient" cadvisorApi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network" @@ -255,7 +255,7 @@ func TestPullWithJSONError(t *testing.T) { "Bad gateway": { "ubuntu", &jsonmessage.JSONError{Code: 502, Message: "\n\n \n \n \n

Oops, there was an error!

\n

We have been contacted of this error, feel free to check out status.docker.com\n to see if there is a bigger issue.

\n\n \n"}, - "because the registry is temporarily unavailbe", + "because the registry is temporarily unavailable", }, } for i, test := range tests { diff --git a/pkg/kubelet/dockertools/fake_manager.go b/pkg/kubelet/dockertools/fake_manager.go index 03bf8a6aec3..5a98c953599 100644 --- a/pkg/kubelet/dockertools/fake_manager.go +++ b/pkg/kubelet/dockertools/fake_manager.go @@ -18,7 +18,7 @@ package dockertools import ( cadvisorApi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network" "k8s.io/kubernetes/pkg/kubelet/prober" diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index 10c72fb81fc..ee609749102 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -37,11 +37,12 @@ import ( cadvisorApi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/latest" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/network/hairpin" "k8s.io/kubernetes/pkg/kubelet/prober" "k8s.io/kubernetes/pkg/kubelet/qos" kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -275,13 +276,13 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID, tail string var ( // ErrNoContainersInPod is returned when there are no containers for a given pod - ErrNoContainersInPod = errors.New("no containers exist for this pod") + ErrNoContainersInPod = errors.New("NoContainersInPod") // ErrNoPodInfraContainerInPod is returned when there is no pod infra container for a given pod - ErrNoPodInfraContainerInPod = errors.New("No pod infra container exists for this pod") + ErrNoPodInfraContainerInPod = errors.New("NoPodInfraContainerInPod") // ErrContainerCannotRun is returned when a container is created, but cannot run properly - ErrContainerCannotRun = errors.New("Container cannot run") + ErrContainerCannotRun = errors.New("ContainerCannotRun") ) // Internal information kept for containers from inspection @@ -333,17 +334,21 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string, } } else if !inspectResult.State.FinishedAt.IsZero() { reason := "" + message := "" // Note: An application might handle OOMKilled gracefully. // In that case, the container is oom killed, but the exit // code could be 0. if inspectResult.State.OOMKilled { - reason = "OOM Killed" + reason = "OOMKilled" } else { - reason = inspectResult.State.Error + reason = "Error" + message = inspectResult.State.Error } result.status.State.Terminated = &api.ContainerStateTerminated{ - ExitCode: inspectResult.State.ExitCode, - Reason: reason, + ExitCode: inspectResult.State.ExitCode, + Message: message, + Reason: reason, + StartedAt: util.NewTime(inspectResult.State.StartedAt), FinishedAt: util.NewTime(inspectResult.State.FinishedAt), ContainerID: DockerPrefix + dockerID, @@ -503,11 +508,13 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { _, err := dm.client.InspectImage(image) if err == nil { containerStatus.State.Waiting = &api.ContainerStateWaiting{ - Reason: fmt.Sprintf("Image: %s is ready, container is creating", image), + Message: fmt.Sprintf("Image: %s is ready, container is creating", image), + Reason: "ContainerCreating", } } else if err == docker.ErrNoSuchImage { containerStatus.State.Waiting = &api.ContainerStateWaiting{ - Reason: fmt.Sprintf("Image: %s is not ready on the node", image), + Message: fmt.Sprintf("Image: %s is not ready on the node", image), + Reason: "ImageNotReady", } } statuses[container.Name] = &containerStatus @@ -1728,6 +1735,16 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, podFullName) return err } + + // Setup the host interface (FIXME: move to networkPlugin when ready) + podInfraContainer, err := dm.client.InspectContainer(string(podInfraContainerID)) + if err != nil { + glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, podFullName) + return err + } + if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, "eth0"); err != nil { + glog.Warningf("Hairpin setup failed for pod %q: %v", podFullName, err) + } } // Start everything diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index 800233b4710..c711e7b268f 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -34,7 +34,7 @@ import ( cadvisorApi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network" kubeprober "k8s.io/kubernetes/pkg/kubelet/prober" @@ -885,7 +885,7 @@ func TestSyncPodCreateNetAndContainer(t *testing.T) { runSyncPod(t, dm, fakeDocker, pod, nil) verifyCalls(t, fakeDocker, []string{ // Create pod infra container. - "create", "start", "inspect_container", + "create", "start", "inspect_container", "inspect_container", // Create container. "create", "start", "inspect_container", }) @@ -934,7 +934,7 @@ func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) { verifyCalls(t, fakeDocker, []string{ // Create pod infra container. - "create", "start", "inspect_container", + "create", "start", "inspect_container", "inspect_container", // Create container. "create", "start", "inspect_container", }) @@ -1027,7 +1027,7 @@ func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) { // Kill the container since pod infra container is not running. "stop", // Create pod infra container. - "create", "start", "inspect_container", + "create", "start", "inspect_container", "inspect_container", // Create container. "create", "start", "inspect_container", }) @@ -1301,21 +1301,21 @@ func TestSyncPodWithPullPolicy(t *testing.T) { fakeDocker.Lock() eventSet := []string{ - `pulling Pulling image "pod_infra_image"`, - `pulled Successfully pulled image "pod_infra_image"`, - `pulling Pulling image "pull_always_image"`, - `pulled Successfully pulled image "pull_always_image"`, - `pulling Pulling image "pull_if_not_present_image"`, - `pulled Successfully pulled image "pull_if_not_present_image"`, - `pulled Container image "existing_one" already present on machine`, - `pulled Container image "want:latest" already present on machine`, + `Pulling Pulling image "pod_infra_image"`, + `Pulled Successfully pulled image "pod_infra_image"`, + `Pulling Pulling image "pull_always_image"`, + `Pulled Successfully pulled image "pull_always_image"`, + `Pulling Pulling image "pull_if_not_present_image"`, + `Pulled Successfully pulled image "pull_if_not_present_image"`, + `Pulled Container image "existing_one" already present on machine`, + `Pulled Container image "want:latest" already present on machine`, } recorder := dm.recorder.(*record.FakeRecorder) var actualEvents []string for _, ev := range recorder.Events { - if strings.HasPrefix(ev, "pull") { + if strings.HasPrefix(ev, "Pull") { actualEvents = append(actualEvents, ev) } } @@ -2093,7 +2093,7 @@ func TestSyncPodWithTerminationLog(t *testing.T) { runSyncPod(t, dm, fakeDocker, pod, nil) verifyCalls(t, fakeDocker, []string{ // Create pod infra container. - "create", "start", "inspect_container", + "create", "start", "inspect_container", "inspect_container", // Create container. "create", "start", "inspect_container", }) @@ -2132,7 +2132,7 @@ func TestSyncPodWithHostNetwork(t *testing.T) { verifyCalls(t, fakeDocker, []string{ // Create pod infra container. - "create", "start", "inspect_container", + "create", "start", "inspect_container", "inspect_container", // Create container. "create", "start", "inspect_container", }) diff --git a/pkg/kubelet/image_manager.go b/pkg/kubelet/image_manager.go index e17c647328b..a198644f347 100644 --- a/pkg/kubelet/image_manager.go +++ b/pkg/kubelet/image_manager.go @@ -25,7 +25,7 @@ import ( docker "github.com/fsouza/go-dockerclient" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/util" diff --git a/pkg/kubelet/image_manager_test.go b/pkg/kubelet/image_manager_test.go index 1b96807d7a1..2cff1944c70 100644 --- a/pkg/kubelet/image_manager_test.go +++ b/pkg/kubelet/image_manager_test.go @@ -25,7 +25,7 @@ import ( cadvisorApiV2 "github.com/google/cadvisor/info/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/util/sets" diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index f93edf0e80e..569769c1e09 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -39,9 +39,9 @@ import ( apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" @@ -164,6 +164,7 @@ func NewMainKubelet( cgroupRoot string, containerRuntime string, rktPath string, + rktStage1Image string, mounter mount.Interface, dockerDaemonContainer string, systemContainer string, @@ -207,7 +208,11 @@ func NewMainKubelet( fieldSelector := fields.Set{client.ObjectNameField: nodeName}.AsSelector() listWatch := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { - return kubeClient.Nodes().List(labels.Everything(), fieldSelector) + obj, err := kubeClient.Nodes().Get(nodeName) + if err != nil { + return nil, err + } + return &api.NodeList{Items: []api.Node{*obj}}, nil }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.Nodes().Watch(labels.Everything(), fieldSelector, resourceVersion) @@ -331,6 +336,7 @@ func NewMainKubelet( case "rkt": conf := &rkt.Config{ Path: rktPath, + Stage1Image: rktStage1Image, InsecureSkipVerify: true, } rktRuntime, err := rkt.New( @@ -714,17 +720,7 @@ func (kl *Kubelet) GetNode() (*api.Node, error) { if kl.standaloneMode { return nil, errors.New("no node entry for kubelet in standalone mode") } - l, err := kl.nodeLister.List() - if err != nil { - return nil, errors.New("cannot list nodes") - } - nodeName := kl.nodeName - for _, n := range l.Items { - if n.Name == nodeName { - return &n, nil - } - } - return nil, fmt.Errorf("node %v not found", nodeName) + return kl.nodeLister.GetNodeInfo(kl.nodeName) } // Starts garbage collection threads. @@ -1337,7 +1333,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont } if egress != nil || ingress != nil { if pod.Spec.HostNetwork { - kl.recorder.Event(pod, "host network not supported", "Bandwidth shaping is not currently supported on the host network") + kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network") } else if kl.shaper != nil { status, found := kl.statusManager.GetPodStatus(pod.UID) if !found { @@ -1352,7 +1348,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress) } } else { - kl.recorder.Event(pod, "nil shaper", "Pod requests bandwidth shaping, but the shaper is undefined") + kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined") } } @@ -2344,21 +2340,24 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionTrue, - Reason: "kubelet is posting ready status", + Reason: "KubeletReady", + Message: "kubelet is posting ready status", LastHeartbeatTime: currentTime, } } else { var reasons []string + var messages []string if !containerRuntimeUp { - reasons = append(reasons, "container runtime is down") + messages = append(messages, "container runtime is down") } if !networkConfigured { - reasons = append(reasons, "network not configured correctly") + messages = append(reasons, "network not configured correctly") } newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionFalse, - Reason: strings.Join(reasons, ","), + Reason: "KubeletNotReady", + Message: strings.Join(messages, ","), LastHeartbeatTime: currentTime, } } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 87fba50f571..17deade5395 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -18,7 +18,6 @@ package kubelet import ( "bytes" - "errors" "fmt" "io" "io/ioutil" @@ -39,7 +38,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/capabilities" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/container" @@ -966,7 +965,12 @@ type testNodeLister struct { } func (ls testNodeLister) GetNodeInfo(id string) (*api.Node, error) { - return nil, errors.New("not implemented") + for _, node := range ls.nodes { + if node.Name == id { + return &node, nil + } + } + return nil, fmt.Errorf("Node with name: %s does not exist", id) } func (ls testNodeLister) List() (api.NodeList, error) { @@ -2380,7 +2384,8 @@ func TestUpdateNewNodeStatus(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionTrue, - Reason: fmt.Sprintf("kubelet is posting ready status"), + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: util.Time{}, LastTransitionTime: util.Time{}, }, @@ -2448,7 +2453,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionTrue, - Reason: fmt.Sprintf("kubelet is posting ready status"), + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, @@ -2484,7 +2490,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionTrue, - Reason: fmt.Sprintf("kubelet is posting ready status"), + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: util.Time{}, // placeholder LastTransitionTime: util.Time{}, // placeholder }, @@ -2578,7 +2585,8 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionFalse, - Reason: fmt.Sprintf("container runtime is down"), + Reason: "KubeletNotReady", + Message: fmt.Sprintf("container runtime is down"), LastHeartbeatTime: util.Time{}, LastTransitionTime: util.Time{}, }, diff --git a/pkg/kubelet/network/hairpin/hairpin.go b/pkg/kubelet/network/hairpin/hairpin.go new file mode 100644 index 00000000000..508f04ad22f --- /dev/null +++ b/pkg/kubelet/network/hairpin/hairpin.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hairpin + +import ( + "fmt" + "io/ioutil" + "net" + "path" + "regexp" + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util/exec" +) + +const ( + sysfsNetPath = "/sys/devices/virtual/net" + hairpinModeRelativePath = "brport/hairpin_mode" + hairpinEnable = "1" +) + +var ( + ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)") +) + +func SetUpContainer(containerPid int, containerInterfaceName string) error { + e := exec.New() + return setUpContainerInternal(e, containerPid, containerInterfaceName) +} + +func setUpContainerInternal(e exec.Interface, containerPid int, containerInterfaceName string) error { + hostIfName, err := findPairInterfaceOfContainerInterface(e, containerPid, containerInterfaceName) + if err != nil { + glog.Infof("Unable to find pair interface, setting up all interfaces: %v", err) + return setUpAllInterfaces() + } + return setUpInterface(hostIfName) +} + +func findPairInterfaceOfContainerInterface(e exec.Interface, containerPid int, containerInterfaceName string) (string, error) { + nsenterPath, err := e.LookPath("nsenter") + if err != nil { + return "", err + } + ethtoolPath, err := e.LookPath("ethtool") + if err != nil { + return "", err + } + // Get container's interface index + output, err := e.Command(nsenterPath, "-t", fmt.Sprintf("%d", containerPid), "-n", "-F", "--", ethtoolPath, "--statistics", containerInterfaceName).CombinedOutput() + if err != nil { + return "", fmt.Errorf("Unable to query interface %s of container %d: %v", containerInterfaceName, containerPid, err) + } + // look for peer_ifindex + match := ethtoolOutputRegex.FindSubmatch(output) + if match == nil { + return "", fmt.Errorf("No peer_ifindex in interface statistics for %s of container %d", containerInterfaceName, containerPid) + } + peerIfIndex, err := strconv.Atoi(string(match[1])) + if err != nil { // seems impossible (\d+ not numeric) + return "", fmt.Errorf("peer_ifindex wasn't numeric: %s: %v", match[1], err) + } + iface, err := net.InterfaceByIndex(peerIfIndex) + if err != nil { + return "", err + } + return iface.Name, nil +} + +func setUpAllInterfaces() error { + interfaces, err := net.Interfaces() + if err != nil { + return err + } + for _, netIf := range interfaces { + setUpInterface(netIf.Name) // ignore errors + } + return nil +} + +func setUpInterface(ifName string) error { + glog.V(3).Infof("Enabling hairpin on interface %s", ifName) + hairpinModeFile := path.Join(sysfsNetPath, ifName, hairpinModeRelativePath) + return ioutil.WriteFile(hairpinModeFile, []byte(hairpinEnable), 0644) +} diff --git a/pkg/kubelet/network/hairpin/hairpin_test.go b/pkg/kubelet/network/hairpin/hairpin_test.go new file mode 100644 index 00000000000..1cc3ee3ae99 --- /dev/null +++ b/pkg/kubelet/network/hairpin/hairpin_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hairpin + +import ( + "errors" + "fmt" + "net" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/util/exec" +) + +func TestFindPairInterfaceOfContainerInterface(t *testing.T) { + // there should be at least "lo" on any system + interfaces, _ := net.Interfaces() + validOutput := fmt.Sprintf("garbage\n peer_ifindex: %d", interfaces[0].Index) + invalidOutput := fmt.Sprintf("garbage\n unknown: %d", interfaces[0].Index) + + tests := []struct { + output string + err error + expectedName string + expectErr bool + }{ + { + output: validOutput, + expectedName: interfaces[0].Name, + }, + { + output: invalidOutput, + expectErr: true, + }, + { + output: validOutput, + err: errors.New("error"), + expectErr: true, + }, + } + for _, test := range tests { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(test.output), test.err }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { + return exec.InitFakeCmd(&fcmd, cmd, args...) + }, + }, + LookPathFunc: func(file string) (string, error) { + return fmt.Sprintf("/fake-bin/%s", file), nil + }, + } + name, err := findPairInterfaceOfContainerInterface(&fexec, 123, "eth0") + if test.expectErr { + if err == nil { + t.Errorf("unexpected non-error") + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + if name != test.expectedName { + t.Errorf("unexpected name: %s (expected: %s)", name, test.expectedName) + } + } +} + +func TestSetUpInterface(t *testing.T) { + err := setUpInterface("non-existent") + if err == nil { + t.Errorf("unexpected non-error") + } + hairpinModeFile := fmt.Sprintf("%s/%s/%s", sysfsNetPath, "non-existent", hairpinModeRelativePath) + if !strings.Contains(fmt.Sprintf("%v", err), hairpinModeFile) { + t.Errorf("should have tried to open %s", hairpinModeFile) + } +} diff --git a/pkg/kubelet/oom_watcher.go b/pkg/kubelet/oom_watcher.go index b70682b5e6b..d3aea9827d0 100644 --- a/pkg/kubelet/oom_watcher.go +++ b/pkg/kubelet/oom_watcher.go @@ -21,7 +21,7 @@ import ( "github.com/google/cadvisor/events" cadvisorApi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/util" ) diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index e508e32488f..85cd333e263 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -22,7 +22,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index fd0bfaa16e9..aa0561bd5b4 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -26,7 +26,7 @@ import ( docker "github.com/fsouza/go-dockerclient" cadvisorApi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/network" diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 3a621d71f4b..01f010095d9 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -25,7 +25,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/probe" execprobe "k8s.io/kubernetes/pkg/probe/exec" diff --git a/pkg/kubelet/qos/memory_policy.go b/pkg/kubelet/qos/memory_policy.go index ac542407a90..95e4830f2ac 100644 --- a/pkg/kubelet/qos/memory_policy.go +++ b/pkg/kubelet/qos/memory_policy.go @@ -37,9 +37,9 @@ func isMemoryBestEffort(container *api.Container) bool { func isMemoryGuaranteed(container *api.Container) bool { // A container is memory guaranteed if its memory request == memory limit. // If memory request == memory limit, the user is very confident of resource consumption. - memoryRequestValue := container.Resources.Requests.Memory().Value() - memoryLimitValue := container.Resources.Limits.Memory().Value() - return memoryRequestValue == memoryLimitValue && memoryRequestValue != 0 + memoryRequest := container.Resources.Requests.Memory() + memoryLimit := container.Resources.Limits.Memory() + return (*memoryRequest).Cmp(*memoryLimit) == 0 && memoryRequest.Value() != 0 } // GetContainerOomAdjust returns the amount by which the OOM score of all processes in the diff --git a/pkg/kubelet/qos/util/qos.go b/pkg/kubelet/qos/util/qos.go new file mode 100644 index 00000000000..0ebb1c84fa1 --- /dev/null +++ b/pkg/kubelet/qos/util/qos.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/kubernetes/pkg/api" +) + +const ( + Guaranteed = "Guaranteed" + Burstable = "Burstable" + BestEffort = "Best-Effort" +) + +// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed. +func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { + // A container resource is guaranteed if its request == limit. + // If request == limit, the user is very confident of resource consumption. + req, hasReq := container.Resources.Requests[resource] + limit, hasLimit := container.Resources.Limits[resource] + if !hasReq || !hasLimit { + return false + } + return req.Cmp(limit) == 0 && req.Value() != 0 +} + +// isResourceBestEffort returns true if the container's resource requirements are best-effort. +func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool { + // A container resource is best-effort if its request is unspecified or 0. + // If a request is specified, then the user expects some kind of resource guarantee. + req, hasReq := container.Resources.Requests[resource] + return !hasReq || req.Value() == 0 +} + +// GetQos returns a mapping of resource name to QoS class of a container +func GetQoS(container *api.Container) map[api.ResourceName]string { + resourceToQoS := map[api.ResourceName]string{} + for resource := range allResources(container) { + switch { + case isResourceGuaranteed(container, resource): + resourceToQoS[resource] = Guaranteed + case isResourceBestEffort(container, resource): + resourceToQoS[resource] = BestEffort + default: + resourceToQoS[resource] = Burstable + } + } + return resourceToQoS +} + +// allResources returns a set of resources the container has +func allResources(container *api.Container) map[api.ResourceName]bool { + resources := map[api.ResourceName]bool{} + for resource := range container.Resources.Requests { + resources[resource] = true + } + for resource := range container.Resources.Limits { + resources[resource] = true + } + return resources +} diff --git a/pkg/kubelet/rkt/config.go b/pkg/kubelet/rkt/config.go index c59aa51ebee..91dd3d558f2 100644 --- a/pkg/kubelet/rkt/config.go +++ b/pkg/kubelet/rkt/config.go @@ -23,6 +23,8 @@ import "fmt" type Config struct { // The absolute path to the binary, or leave empty to find it in $PATH. Path string + // The image to use as stage1. + Stage1Image string // The debug flag for rkt. Debug bool // The rkt data directory. diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 7c8a9249e55..265c27018b1 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -27,7 +27,6 @@ import ( "path" "strconv" "strings" - "syscall" "time" appcschema "github.com/appc/spec/schema" @@ -38,10 +37,11 @@ import ( docker "github.com/fsouza/go-dockerclient" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober" + kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/probe" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" @@ -467,7 +467,7 @@ func (r *runtime) makePodManifest(pod *api.Pod, pullSecrets []api.Secret) (*appc volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID) if !ok { - return nil, fmt.Errorf("cannot get the volumes for pod %q", kubecontainer.GetPodFullName(pod)) + return nil, fmt.Errorf("cannot get the volumes for pod %q", kubeletUtil.FormatPodName(pod)) } // Set global volumes. @@ -533,7 +533,7 @@ func serviceFilePath(serviceName string) string { // preparePod will: // // 1. Invoke 'rkt prepare' to prepare the pod, and get the rkt pod uuid. -// 2. Creates the unit file and save it under systemdUnitDir. +// 2. Create the unit file and save it under systemdUnitDir. // // On success, it will return a string that represents name of the unit file // and the runtime pod. @@ -566,6 +566,9 @@ func (r *runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k // Run 'rkt prepare' to get the rkt UUID. cmds := []string{"prepare", "--quiet", "--pod-manifest", manifestFile.Name()} + if r.config.Stage1Image != "" { + cmds = append(cmds, "--stage1-image", r.config.Stage1Image) + } output, err := r.runCommand(cmds...) if err != nil { return "", nil, err @@ -596,6 +599,8 @@ func (r *runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k // This makes the service show up for 'systemctl list-units' even if it exits successfully. newUnitOption("Service", "RemainAfterExit", "true"), newUnitOption("Service", "ExecStart", runPrepared), + // This enables graceful stop. + newUnitOption("Service", "KillMode", "mixed"), } // Check if there's old rkt pod corresponding to the same pod, if so, update the restart count. @@ -615,7 +620,7 @@ func (r *runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k } units = append(units, newUnitOption(unitKubernetesSection, unitRestartCount, strconv.Itoa(restartCount))) - glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, pod.Name) + glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, kubeletUtil.FormatPodName(pod)) serviceFile, err := os.Create(serviceFilePath(serviceName)) if err != nil { return "", nil, err @@ -674,7 +679,7 @@ func (r *runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f // RunPod first creates the unit file for a pod, and then // starts the unit over d-bus. func (r *runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { - glog.V(4).Infof("Rkt starts to run pod: name %q.", pod.Name) + glog.V(4).Infof("Rkt starts to run pod: name %q.", kubeletUtil.FormatPodName(pod)) name, runtimePod, prepareErr := r.preparePod(pod, pullSecrets) @@ -684,7 +689,7 @@ func (r *runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { for i, c := range pod.Spec.Containers { ref, err := kubecontainer.GenerateContainerRef(pod, &c) if err != nil { - glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, c.Name, err) + glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", kubeletUtil.FormatPodName(pod), c.Name, err) continue } if prepareErr != nil { @@ -800,8 +805,11 @@ func (r *runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error { r.containerRefManager.ClearRef(id) } - // TODO(yifan): More graceful stop. Replace with StopUnit and wait for a timeout. - r.systemd.KillUnit(serviceName, int32(syscall.SIGKILL)) + // Since all service file have 'KillMode=mixed', the processes in + // the unit's cgroup will receive a SIGKILL if the normal stop timeouts. + if _, err := r.systemd.StopUnit(serviceName, "replace"); err != nil { + return err + } // Remove the systemd service file as well. return os.Remove(serviceFilePath(serviceName)) } @@ -961,7 +969,7 @@ func (r *runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) { // SyncPod syncs the running pod to match the specified desired pod. func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error { - podFullName := kubecontainer.GetPodFullName(pod) + podFullName := kubeletUtil.FormatPodName(pod) if len(runningPod.Containers) == 0 { glog.V(4).Infof("Pod %q is not running, will start it", podFullName) return r.RunPod(pod, pullSecrets) @@ -1036,6 +1044,8 @@ func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus // // In rkt runtime's implementation, per container log is get via 'journalctl -M [rkt-$UUID] -u [APP_NAME]'. // See https://github.com/coreos/rkt/blob/master/Documentation/commands.md#logging for more details. +// +// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail. func (r *runtime) GetContainerLogs(pod *api.Pod, containerID string, tail string, follow bool, stdout, stderr io.Writer) error { id, err := parseContainerID(containerID) if err != nil { @@ -1072,6 +1082,7 @@ func (r *runtime) GarbageCollect() error { // Note: In rkt, the container ID is in the form of "UUID:appName", where // appName is the container name. +// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail. func (r *runtime) RunInContainer(containerID string, cmd []string) ([]byte, error) { glog.V(4).Infof("Rkt running in container.") @@ -1092,6 +1103,7 @@ func (r *runtime) AttachContainer(containerID string, stdin io.Reader, stdout, s // Note: In rkt, the container ID is in the form of "UUID:appName", where UUID is // the rkt UUID, and appName is the container name. +// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail. func (r *runtime) ExecInContainer(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { glog.V(4).Infof("Rkt execing in container.") @@ -1150,7 +1162,7 @@ func (r *runtime) findRktID(pod *kubecontainer.Pod) (string, error) { f, err := os.Open(serviceFilePath(serviceName)) if err != nil { if os.IsNotExist(err) { - return "", fmt.Errorf("no service file %v for pod %q, UID %q", serviceName, pod.Name, pod.ID) + return "", fmt.Errorf("no service file %v for runtime pod %q, ID %q", serviceName, pod.Name, pod.ID) } return "", err } @@ -1179,6 +1191,7 @@ func (r *runtime) findRktID(pod *kubecontainer.Pod) (string, error) { // - should we support nsenter + socat in a container, running with elevated privs and --pid=host? // // TODO(yifan): Merge with the same function in dockertools. +// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail. func (r *runtime) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { glog.V(4).Infof("Rkt port forwarding in container.") diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 6f8d21bed14..4d6435cd332 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -25,7 +25,7 @@ import ( docker "github.com/fsouza/go-dockerclient" cadvisorApi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockertools" @@ -147,6 +147,13 @@ func TestRunOnce(t *testing.T) { State: docker.State{Running: true, Pid: 42}, }, }, + { + label: "syncPod", + container: docker.Container{ + Config: &docker.Config{Image: "someimage"}, + State: docker.State{Running: true, Pid: 42}, + }, + }, }, t: t, } diff --git a/pkg/master/master.go b/pkg/master/master.go index e3344954838..a90a2f3ade4 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -37,13 +37,13 @@ import ( "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/rest" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/experimental" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/apiserver" "k8s.io/kubernetes/pkg/auth/authenticator" "k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/auth/handlers" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/expapi" - explatest "k8s.io/kubernetes/pkg/expapi/latest" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/healthz" "k8s.io/kubernetes/pkg/labels" @@ -55,6 +55,7 @@ import ( endpointsetcd "k8s.io/kubernetes/pkg/registry/endpoint/etcd" eventetcd "k8s.io/kubernetes/pkg/registry/event/etcd" expcontrolleretcd "k8s.io/kubernetes/pkg/registry/experimental/controller/etcd" + jobetcd "k8s.io/kubernetes/pkg/registry/job/etcd" limitrangeetcd "k8s.io/kubernetes/pkg/registry/limitrange/etcd" "k8s.io/kubernetes/pkg/registry/namespace" namespaceetcd "k8s.io/kubernetes/pkg/registry/namespace/etcd" @@ -571,7 +572,7 @@ func (m *Master) init(c *Config) { apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions) if m.exp { - expVersion := m.expapi(c) + expVersion := m.experimental(c) if err := expVersion.InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup experimental api: %v", err) } @@ -774,7 +775,7 @@ func (m *Master) api_v1() *apiserver.APIGroupVersion { return version } -func (m *Master) InstallThirdPartyAPI(rsrc *expapi.ThirdPartyResource) error { +func (m *Master) InstallThirdPartyAPI(rsrc *experimental.ThirdPartyResource) error { kind, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) if err != nil { return err @@ -820,13 +821,14 @@ func (m *Master) thirdpartyapi(group, kind, version string) *apiserver.APIGroupV } } -// expapi returns the resources and codec for the experimental api -func (m *Master) expapi(c *Config) *apiserver.APIGroupVersion { +// experimental returns the resources and codec for the experimental api +func (m *Master) experimental(c *Config) *apiserver.APIGroupVersion { controllerStorage := expcontrolleretcd.NewStorage(c.ExpDatabaseStorage) autoscalerStorage := horizontalpodautoscaleretcd.NewREST(c.ExpDatabaseStorage) thirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(c.ExpDatabaseStorage) daemonSetStorage := daemonetcd.NewREST(c.ExpDatabaseStorage) deploymentStorage := deploymentetcd.NewREST(c.ExpDatabaseStorage) + jobStorage := jobetcd.NewREST(c.ExpDatabaseStorage) storage := map[string]rest.Storage{ strings.ToLower("replicationControllers"): controllerStorage.ReplicationController, @@ -835,6 +837,7 @@ func (m *Master) expapi(c *Config) *apiserver.APIGroupVersion { strings.ToLower("thirdpartyresources"): thirdPartyResourceStorage, strings.ToLower("daemonsets"): daemonSetStorage, strings.ToLower("deployments"): deploymentStorage, + strings.ToLower("jobs"): jobStorage, } return &apiserver.APIGroupVersion{ @@ -1024,6 +1027,7 @@ func (m *Master) setupSecureProxy(user, privateKeyfile, publicKeyfile string) { } func (m *Master) generateSSHKey(user, privateKeyfile, publicKeyfile string) error { + // TODO: user is not used. Consider removing it as an input to the function. private, public, err := util.GenerateKey(2048) if err != nil { return err diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 898d92385ab..b8a0cc78bf7 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -19,25 +19,41 @@ package master import ( "bytes" "encoding/json" + "errors" + "fmt" "io/ioutil" + "net" "net/http" "net/http/httptest" + "os" + "path/filepath" "reflect" "testing" + "time" + "github.com/emicklei/go-restful" + "github.com/stretchr/testify/assert" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/latest" - "k8s.io/kubernetes/pkg/expapi" - explatest "k8s.io/kubernetes/pkg/expapi/latest" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/experimental" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" + "k8s.io/kubernetes/pkg/apiserver" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/endpoint" + "k8s.io/kubernetes/pkg/registry/namespace" "k8s.io/kubernetes/pkg/registry/registrytest" etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" - - "github.com/emicklei/go-restful" + "k8s.io/kubernetes/pkg/util" ) -func TestGetServersToValidate(t *testing.T) { +// setUp is a convience function for setting up for (most) tests. +func setUp(t *testing.T) (Master, Config, *assert.Assertions) { master := Master{} config := Config{} fakeClient := tools.NewFakeEtcdClient(t) @@ -47,11 +63,64 @@ func TestGetServersToValidate(t *testing.T) { master.nodeRegistry = registrytest.NewNodeRegistry([]string{"node1", "node2"}, api.NodeResources{}) + return master, config, assert.New(t) +} + +// TestNew verifies that the New function returns a Master +// using the configuration properly. +func TestNew(t *testing.T) { + _, config, assert := setUp(t) + config.KubeletClient = client.FakeKubeletClient{} + master := New(&config) + + // Verify many of the variables match their config counterparts + assert.Equal(master.enableCoreControllers, config.EnableCoreControllers) + assert.Equal(master.enableLogsSupport, config.EnableLogsSupport) + assert.Equal(master.enableUISupport, config.EnableUISupport) + assert.Equal(master.enableSwaggerSupport, config.EnableSwaggerSupport) + assert.Equal(master.enableSwaggerSupport, config.EnableSwaggerSupport) + assert.Equal(master.enableProfiling, config.EnableProfiling) + assert.Equal(master.apiPrefix, config.APIPrefix) + assert.Equal(master.expAPIPrefix, config.ExpAPIPrefix) + assert.Equal(master.corsAllowedOriginList, config.CorsAllowedOriginList) + assert.Equal(master.authenticator, config.Authenticator) + assert.Equal(master.authorizer, config.Authorizer) + assert.Equal(master.admissionControl, config.AdmissionControl) + assert.Equal(master.v1, !config.DisableV1) + assert.Equal(master.exp, config.EnableExp) + assert.Equal(master.requestContextMapper, config.RequestContextMapper) + assert.Equal(master.cacheTimeout, config.CacheTimeout) + assert.Equal(master.masterCount, config.MasterCount) + assert.Equal(master.externalHost, config.ExternalHost) + assert.Equal(master.clusterIP, config.PublicAddress) + assert.Equal(master.publicReadWritePort, config.ReadWritePort) + assert.Equal(master.serviceReadWriteIP, config.ServiceReadWriteIP) + assert.Equal(master.installSSHKey, config.InstallSSHKey) +} + +// TestNewEtcdStorage verifies that the usage of NewEtcdStorage reacts properly when +// the correct data is input +func TestNewEtcdStorage(t *testing.T) { + assert := assert.New(t) + fakeClient := tools.NewFakeEtcdClient(t) + // Pass case + _, err := NewEtcdStorage(fakeClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix()) + assert.NoError(err, "Unable to create etcdstorage: %s", err) + + // Fail case + errorFunc := func(apiVersion string) (*meta.VersionInterfaces, error) { return nil, errors.New("ERROR") } + _, err = NewEtcdStorage(fakeClient, errorFunc, latest.Version, etcdtest.PathPrefix()) + assert.Error(err, "NewEtcdStorage should have failed") + +} + +// TestGetServersToValidate verifies the unexported getServersToValidate function +func TestGetServersToValidate(t *testing.T) { + master, config, assert := setUp(t) servers := master.getServersToValidate(&config) - if len(servers) != 5 { - t.Errorf("unexpected server list: %#v", servers) - } + assert.Equal(5, len(servers), "unexpected server list: %#v", servers) + for _, server := range []string{"scheduler", "controller-manager", "etcd-0", "etcd-1", "etcd-2"} { if _, ok := servers[server]; !ok { t.Errorf("server list missing: %s", server) @@ -59,7 +128,10 @@ func TestGetServersToValidate(t *testing.T) { } } +// TestFindExternalAddress verifies both pass and fail cases for the unexported +// findExternalAddress function func TestFindExternalAddress(t *testing.T) { + assert := assert.New(t) expectedIP := "172.0.0.1" nodes := []*api.Node{new(api.Node), new(api.Node), new(api.Node)} @@ -67,20 +139,287 @@ func TestFindExternalAddress(t *testing.T) { nodes[1].Status.Addresses = []api.NodeAddress{{"LegacyHostIP", expectedIP}} nodes[2].Status.Addresses = []api.NodeAddress{{"ExternalIP", expectedIP}, {"LegacyHostIP", "172.0.0.2"}} + // Pass Case for _, node := range nodes { ip, err := findExternalAddress(node) - if err != nil { - t.Errorf("error getting node external address: %s", err) - } - if ip != expectedIP { - t.Errorf("expected ip to be %s, but was %s", expectedIP, ip) + assert.NoError(err, "error getting node external address") + assert.Equal(expectedIP, ip, "expected ip to be %s, but was %s", expectedIP, ip) + } + + // Fail case + _, err := findExternalAddress(new(api.Node)) + assert.Error(err, "expected findExternalAddress to fail on a node with missing ip information") +} + +// TestApi_v1 verifies that the unexported api_v1 function does indeed +// utilize the correct Version and Codec. +func TestApi_v1(t *testing.T) { + master, _, assert := setUp(t) + version := master.api_v1() + assert.Equal("v1", version.Version, "Version was not v1: %s", version.Version) + assert.Equal(v1.Codec, version.Codec, "version.Codec was not for v1: %s", version.Codec) + for k, v := range master.storage { + assert.Contains(version.Storage, v, "Value %s not found (key: %s)", k, v) + } +} + +// TestNewBootstrapController verifies master fields are properly copied into controller +func TestNewBootstrapController(t *testing.T) { + // Tests a subset of inputs to ensure they are set properly in the controller + master, _, assert := setUp(t) + portRange := util.PortRange{Base: 10, Size: 10} + + master.namespaceRegistry = namespace.NewRegistry(nil) + master.serviceRegistry = registrytest.NewServiceRegistry() + master.endpointRegistry = endpoint.NewRegistry(nil) + + master.serviceNodePortRange = portRange + master.masterCount = 1 + master.serviceReadWritePort = 1000 + master.publicReadWritePort = 1010 + + controller := master.NewBootstrapController() + + assert.Equal(controller.NamespaceRegistry, master.namespaceRegistry) + assert.Equal(controller.EndpointRegistry, master.endpointRegistry) + assert.Equal(controller.ServiceRegistry, master.serviceRegistry) + assert.Equal(controller.ServiceNodePortRange, portRange) + assert.Equal(controller.MasterCount, master.masterCount) + assert.Equal(controller.ServicePort, master.serviceReadWritePort) + assert.Equal(controller.PublicServicePort, master.publicReadWritePort) +} + +// TestNewHandlerContainer verifies that NewHandlerContainer uses the +// mux provided +func TestNewHandlerContainer(t *testing.T) { + assert := assert.New(t) + mux := http.NewServeMux() + container := NewHandlerContainer(mux) + assert.Equal(mux, container.ServeMux, "ServerMux's do not match") +} + +// TestHandleWithAuth verifies HandleWithAuth adds the path +// to the muxHelper.RegisteredPaths. +func TestHandleWithAuth(t *testing.T) { + master, _, assert := setUp(t) + mh := apiserver.MuxHelper{Mux: http.NewServeMux()} + master.muxHelper = &mh + handler := func(r http.ResponseWriter, w *http.Request) { w.Write(nil) } + master.HandleWithAuth("/test", http.HandlerFunc(handler)) + + assert.Contains(master.muxHelper.RegisteredPaths, "/test", "Path not found in muxHelper") +} + +// TestHandleFuncWithAuth verifies HandleFuncWithAuth adds the path +// to the muxHelper.RegisteredPaths. +func TestHandleFuncWithAuth(t *testing.T) { + master, _, assert := setUp(t) + mh := apiserver.MuxHelper{Mux: http.NewServeMux()} + master.muxHelper = &mh + handler := func(r http.ResponseWriter, w *http.Request) { w.Write(nil) } + master.HandleFuncWithAuth("/test", handler) + + assert.Contains(master.muxHelper.RegisteredPaths, "/test", "Path not found in muxHelper") +} + +// TestInstallSwaggerAPI verifies that the swagger api is added +// at the proper endpoint. +func TestInstallSwaggerAPI(t *testing.T) { + master, _, assert := setUp(t) + mux := http.NewServeMux() + master.handlerContainer = NewHandlerContainer(mux) + + // Ensure swagger isn't installed without the call + ws := master.handlerContainer.RegisteredWebServices() + if !assert.Equal(len(ws), 0) { + for x := range ws { + assert.NotEqual("/swaggerapi", ws[x].RootPath(), "SwaggerAPI was installed without a call to InstallSwaggerAPI()") } } - _, err := findExternalAddress(new(api.Node)) - if err == nil { - t.Errorf("expected findExternalAddress to fail on a node with missing ip information") + // Install swagger and test + master.InstallSwaggerAPI() + ws = master.handlerContainer.RegisteredWebServices() + if assert.NotEqual(0, len(ws), "SwaggerAPI not installed.") { + assert.Equal("/swaggerapi/", ws[0].RootPath(), "SwaggerAPI did not install to the proper path. %s != /swaggerapi", ws[0].RootPath()) } + + // Empty externalHost verification + mux = http.NewServeMux() + master.handlerContainer = NewHandlerContainer(mux) + master.externalHost = "" + master.clusterIP = net.IPv4(10, 10, 10, 10) + master.publicReadWritePort = 1010 + master.InstallSwaggerAPI() + if assert.NotEqual(0, len(ws), "SwaggerAPI not installed.") { + assert.Equal("/swaggerapi/", ws[0].RootPath(), "SwaggerAPI did not install to the proper path. %s != /swaggerapi", ws[0].RootPath()) + } +} + +// TestDefaultAPIGroupVersion verifies that the unexported defaultAPIGroupVersion +// creates the expected APIGroupVersion based off of master. +func TestDefaultAPIGroupVersion(t *testing.T) { + master, _, assert := setUp(t) + master.dialer = func(network, addr string) (net.Conn, error) { return nil, nil } + + apiGroup := master.defaultAPIGroupVersion() + + assert.Equal(apiGroup.Root, master.apiPrefix) + assert.Equal(apiGroup.Admit, master.admissionControl) + assert.Equal(apiGroup.Context, master.requestContextMapper) + assert.Equal(apiGroup.MinRequestTimeout, master.minRequestTimeout) + + // These functions should be different instances of the same function + groupDialerFunc := fmt.Sprintf("%+v", apiGroup.ProxyDialerFn) + masterDialerFunc := fmt.Sprintf("%+v", master.dialer) + assert.Equal(groupDialerFunc, masterDialerFunc) +} + +// TestExpapi verifies that the unexported exapi creates +// the an experimental api APIGroupVersion. +func TestExpapi(t *testing.T) { + master, config, assert := setUp(t) + + expAPIGroup := master.experimental(&config) + assert.Equal(expAPIGroup.Root, master.expAPIPrefix) + assert.Equal(expAPIGroup.Mapper, explatest.RESTMapper) + assert.Equal(expAPIGroup.Codec, explatest.Codec) + assert.Equal(expAPIGroup.Linker, explatest.SelfLinker) + assert.Equal(expAPIGroup.Version, explatest.Version) +} + +// TestSecondsSinceSync verifies that proper results are returned +// when checking the time between syncs +func TestSecondsSinceSync(t *testing.T) { + master, _, assert := setUp(t) + master.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix() + + // Nano Second. No difference. + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)} + assert.Equal(int64(0), master.secondsSinceSync()) + + // Second + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)} + assert.Equal(int64(1), master.secondsSinceSync()) + + // Minute + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)} + assert.Equal(int64(60), master.secondsSinceSync()) + + // Hour + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)} + assert.Equal(int64(3600), master.secondsSinceSync()) + + // Day + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)} + assert.Equal(int64(86400), master.secondsSinceSync()) + + // Month + master.clock = &util.FakeClock{Time: time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)} + assert.Equal(int64(2678400), master.secondsSinceSync()) + + // Future Month. Should be -Month. + master.lastSync = time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix() + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC)} + assert.Equal(int64(-2678400), master.secondsSinceSync()) +} + +// TestGetNodeAddresses verifies that proper results are returned +// when requesting node addresses. +func TestGetNodeAddresses(t *testing.T) { + master, _, assert := setUp(t) + + // Fail case (no addresses associated with nodes) + nodes, _ := master.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) + addrs, err := master.getNodeAddresses() + + assert.Error(err, "getNodeAddresses should have caused an error as there are no addresses.") + assert.Equal([]string(nil), addrs) + + // Pass case with External type IP + nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) + for index := range nodes.Items { + nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeExternalIP, Address: "127.0.0.1"}} + } + addrs, err = master.getNodeAddresses() + assert.NoError(err, "getNodeAddresses should not have returned an error.") + assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) + + // Pass case with LegacyHost type IP + nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) + for index := range nodes.Items { + nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.2"}} + } + addrs, err = master.getNodeAddresses() + assert.NoError(err, "getNodeAddresses failback should not have returned an error.") + assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs) +} + +// TestRefreshTunnels verifies that the function errors when no addresses +// are associated with nodes +func TestRefreshTunnels(t *testing.T) { + master, _, assert := setUp(t) + + // Fail case (no addresses associated with nodes) + assert.Error(master.refreshTunnels("test", "/tmp/undefined")) + + // TODO: pass case without needing actual connections? +} + +// TestIsTunnelSyncHealthy verifies that the 600 second lag test +// is honored. +func TestIsTunnelSyncHealthy(t *testing.T) { + master, _, assert := setUp(t) + + // Pass case: 540 second lag + master.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix() + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 1, 9, 1, 1, time.UTC)} + err := master.IsTunnelSyncHealthy(nil) + assert.NoError(err, "IsTunnelSyncHealthy() should not have returned an error.") + + // Fail case: 720 second lag + master.clock = &util.FakeClock{Time: time.Date(2015, time.January, 1, 1, 12, 1, 1, time.UTC)} + err = master.IsTunnelSyncHealthy(nil) + assert.Error(err, "IsTunnelSyncHealthy() should have returned an error.") +} + +// generateTempFile creates a temporary file path +func generateTempFilePath(prefix string) string { + tmpPath, _ := filepath.Abs(fmt.Sprintf("%s/%s-%d", os.TempDir(), prefix, time.Now().Unix())) + return tmpPath +} + +// TestGenerateSSHKey verifies that SSH key generation does indeed +// generate keys even with keys already exist. +func TestGenerateSSHKey(t *testing.T) { + master, _, assert := setUp(t) + + privateKey := generateTempFilePath("private") + publicKey := generateTempFilePath("public") + + // Make sure we have no test keys laying around + os.Remove(privateKey) + os.Remove(publicKey) + + // Pass case: Sunny day case + err := master.generateSSHKey("unused", privateKey, publicKey) + assert.NoError(err, "generateSSHKey should not have retuend an error: %s", err) + + // Pass case: PrivateKey exists test case + os.Remove(publicKey) + err = master.generateSSHKey("unused", privateKey, publicKey) + assert.NoError(err, "generateSSHKey should not have retuend an error: %s", err) + + // Pass case: PublicKey exists test case + os.Remove(privateKey) + err = master.generateSSHKey("unused", privateKey, publicKey) + assert.NoError(err, "generateSSHKey should not have retuend an error: %s", err) + + // Make sure we have no test keys laying around + os.Remove(privateKey) + os.Remove(publicKey) + + // TODO: testing error cases where the file can not be removed? } var versionsToTest = []string{"v1", "v3"} @@ -100,13 +439,14 @@ type FooList struct { items []Foo `json:"items"` } -func initThirdParty(t *testing.T, version string) (*tools.FakeEtcdClient, *httptest.Server) { - master := &Master{} - api := &expapi.ThirdPartyResource{ +func initThirdParty(t *testing.T, version string) (*tools.FakeEtcdClient, *httptest.Server, *assert.Assertions) { + master, _, assert := setUp(t) + + api := &experimental.ThirdPartyResource{ ObjectMeta: api.ObjectMeta{ Name: "foo.company.com", }, - Versions: []expapi.APIVersion{ + Versions: []experimental.APIVersion{ { APIGroup: "group", Name: version, @@ -119,13 +459,12 @@ func initThirdParty(t *testing.T, version string) (*tools.FakeEtcdClient, *httpt fakeClient.Machines = []string{"http://machine1:4001", "http://machine2", "http://machine3:4003"} master.thirdPartyStorage = etcdstorage.NewEtcdStorage(fakeClient, explatest.Codec, etcdtest.PathPrefix()) - if err := master.InstallThirdPartyAPI(api); err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(master.InstallThirdPartyAPI(api)) { t.FailNow() } server := httptest.NewServer(master.handlerContainer.ServeMux) - return fakeClient, server + return fakeClient, server, assert } func TestInstallThirdPartyAPIList(t *testing.T) { @@ -135,31 +474,27 @@ func TestInstallThirdPartyAPIList(t *testing.T) { } func testInstallThirdPartyAPIListVersion(t *testing.T, version string) { - fakeClient, server := initThirdParty(t, version) + fakeClient, server, assert := initThirdParty(t, version) defer server.Close() fakeClient.ExpectNotFoundGet(etcdtest.PathPrefix() + "/ThirdPartyResourceData/company.com/foos/default") resp, err := http.Get(server.URL + "/thirdparty/company.com/" + version + "/namespaces/default/foos") - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { return } + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected status: %v", resp) - } + assert.Equal(http.StatusOK, resp.StatusCode) data, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Errorf("unexpected error: %v", err) - } + assert.NoError(err) list := FooList{} - if err := json.Unmarshal(data, &list); err != nil { - t.Errorf("unexpected error: %v", err) - } + err = json.Unmarshal(data, &list) + assert.NoError(err) + } func encodeToThirdParty(name string, obj interface{}) ([]byte, error) { @@ -167,7 +502,7 @@ func encodeToThirdParty(name string, obj interface{}) ([]byte, error) { if err != nil { return nil, err } - thirdPartyData := expapi.ThirdPartyResourceData{ + thirdPartyData := experimental.ThirdPartyResourceData{ ObjectMeta: api.ObjectMeta{Name: name}, Data: serial, } @@ -204,7 +539,7 @@ func TestInstallThirdPartyAPIGet(t *testing.T) { } func testInstallThirdPartyAPIGetVersion(t *testing.T, version string) { - fakeClient, server := initThirdParty(t, version) + fakeClient, server, assert := initThirdParty(t, version) defer server.Close() expectedObj := Foo{ @@ -218,28 +553,26 @@ func testInstallThirdPartyAPIGetVersion(t *testing.T, version string) { SomeField: "test field", OtherField: 10, } - if err := storeToEtcd(fakeClient, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj); err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(storeToEtcd(fakeClient, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj)) { t.FailNow() return } resp, err := http.Get(server.URL + "/thirdparty/company.com/" + version + "/namespaces/default/foos/test") - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { return } - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected status: %v", resp) - } + assert.Equal(http.StatusOK, resp.StatusCode) + item := Foo{} - if err := decodeResponse(resp, &item); err != nil { - t.Errorf("unexpected error: %v", err) + assert.NoError(decodeResponse(resp, &item)) + if !assert.False(reflect.DeepEqual(item, expectedObj)) { + t.Errorf("expected objects to not be equal:\n%v\nsaw:\n%v\n", expectedObj, item) } // Fill in data that the apiserver injects expectedObj.SelfLink = item.SelfLink - if !reflect.DeepEqual(item, expectedObj) { + if !assert.True(reflect.DeepEqual(item, expectedObj)) { t.Errorf("expected:\n%#v\nsaw:\n%#v\n", expectedObj, item) } } @@ -251,7 +584,7 @@ func TestInstallThirdPartyAPIPost(t *testing.T) { } func testInstallThirdPartyAPIPostForVersion(t *testing.T, version string) { - fakeClient, server := initThirdParty(t, version) + fakeClient, server, assert := initThirdParty(t, version) defer server.Close() inputObj := Foo{ @@ -266,25 +599,19 @@ func testInstallThirdPartyAPIPostForVersion(t *testing.T, version string) { OtherField: 10, } data, err := json.Marshal(inputObj) - if err != nil { - t.Errorf("unexpected error: %v") + if !assert.NoError(err) { return } resp, err := http.Post(server.URL+"/thirdparty/company.com/"+version+"/namespaces/default/foos", "application/json", bytes.NewBuffer(data)) - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { return } - if resp.StatusCode != http.StatusCreated { - t.Errorf("unexpected status: %v", resp) - } + assert.Equal(http.StatusCreated, resp.StatusCode) item := Foo{} - if err := decodeResponse(resp, &item); err != nil { - t.Errorf("unexpected error: %v", err) - } + assert.NoError(decodeResponse(resp, &item)) // fill in fields set by the apiserver expectedObj := inputObj @@ -292,29 +619,26 @@ func testInstallThirdPartyAPIPostForVersion(t *testing.T, version string) { expectedObj.Namespace = item.Namespace expectedObj.UID = item.UID expectedObj.CreationTimestamp = item.CreationTimestamp - if !reflect.DeepEqual(item, expectedObj) { + if !assert.True(reflect.DeepEqual(item, expectedObj)) { t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) } etcdResp, err := fakeClient.Get(etcdtest.PathPrefix()+"/ThirdPartyResourceData/company.com/foos/default/test", false, false) - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { t.FailNow() } + obj, err := explatest.Codec.Decode([]byte(etcdResp.Node.Value)) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - thirdPartyObj, ok := obj.(*expapi.ThirdPartyResourceData) - if !ok { + assert.NoError(err) + + thirdPartyObj, ok := obj.(*experimental.ThirdPartyResourceData) + if !assert.True(ok) { t.Errorf("unexpected object: %v", obj) } item = Foo{} - if err := json.Unmarshal(thirdPartyObj.Data, &item); err != nil { - t.Errorf("unexpected error: %v", err) - } + assert.NoError(json.Unmarshal(thirdPartyObj.Data, &item)) - if !reflect.DeepEqual(item, inputObj) { + if !assert.True(reflect.DeepEqual(item, inputObj)) { t.Errorf("expected:\n%v\nsaw:\n%v\n", inputObj, item) } } @@ -326,7 +650,7 @@ func TestInstallThirdPartyAPIDelete(t *testing.T) { } func testInstallThirdPartyAPIDeleteVersion(t *testing.T, version string) { - fakeClient, server := initThirdParty(t, version) + fakeClient, server, assert := initThirdParty(t, version) defer server.Close() expectedObj := Foo{ @@ -340,55 +664,45 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, version string) { SomeField: "test field", OtherField: 10, } - if err := storeToEtcd(fakeClient, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj); err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(storeToEtcd(fakeClient, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj)) { t.FailNow() return } resp, err := http.Get(server.URL + "/thirdparty/company.com/" + version + "/namespaces/default/foos/test") - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { return } - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected status: %v", resp) - } + assert.Equal(http.StatusOK, resp.StatusCode) item := Foo{} - if err := decodeResponse(resp, &item); err != nil { - t.Errorf("unexpected error: %v", err) - } + assert.NoError(decodeResponse(resp, &item)) // Fill in fields set by the apiserver expectedObj.SelfLink = item.SelfLink expectedObj.Namespace = item.Namespace - if !reflect.DeepEqual(item, expectedObj) { + if !assert.True(reflect.DeepEqual(item, expectedObj)) { t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) } resp, err = httpDelete(server.URL + "/thirdparty/company.com/" + version + "/namespaces/default/foos/test") - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { return } - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected status: %v", resp) - } + assert.Equal(http.StatusOK, resp.StatusCode) resp, err = http.Get(server.URL + "/thirdparty/company.com/" + version + "/namespaces/default/foos/test") - if err != nil { - t.Errorf("unexpected error: %v", err) + if !assert.NoError(err) { return } - if resp.StatusCode != http.StatusNotFound { - t.Errorf("unexpected status: %v", resp) - } + assert.Equal(http.StatusNotFound, resp.StatusCode) + expectDeletedKeys := []string{etcdtest.PathPrefix() + "/ThirdPartyResourceData/company.com/foos/default/test"} - if !reflect.DeepEqual(fakeClient.DeletedKeys, expectDeletedKeys) { + + if !assert.True(reflect.DeepEqual(fakeClient.DeletedKeys, expectDeletedKeys)) { t.Errorf("unexpected deleted keys: %v", fakeClient.DeletedKeys) } } diff --git a/pkg/probe/http/http_test.go b/pkg/probe/http/http_test.go index 987f3103f88..65cd0398282 100644 --- a/pkg/probe/http/http_test.go +++ b/pkg/probe/http/http_test.go @@ -29,6 +29,15 @@ import ( "k8s.io/kubernetes/pkg/probe" ) +func containsAny(s string, substrs []string) bool { + for _, substr := range substrs { + if strings.Contains(s, substr) { + return true + } + } + return false +} + func TestHTTPProbeChecker(t *testing.T) { handleReq := func(s int, body string) func(w http.ResponseWriter) { return func(w http.ResponseWriter) { @@ -41,12 +50,31 @@ func TestHTTPProbeChecker(t *testing.T) { testCases := []struct { handler func(w http.ResponseWriter) health probe.Result - body string + // go1.5: error message changed for timeout, need to support + // both old and new + accBodies []string }{ // The probe will be filled in below. This is primarily testing that an HTTP GET happens. - {handleReq(http.StatusOK, "ok body"), probe.Success, "ok body"}, - {handleReq(-1, "fail body"), probe.Failure, "fail body"}, - {func(w http.ResponseWriter) { time.Sleep(3 * time.Second) }, probe.Failure, "use of closed network connection"}, + { + handleReq(http.StatusOK, "ok body"), + probe.Success, + []string{"ok body"}, + }, + { + handleReq(-1, "fail body"), + probe.Failure, + []string{"fail body"}, + }, + { + func(w http.ResponseWriter) { + time.Sleep(3 * time.Second) + }, + probe.Failure, + []string{ + "use of closed network connection", + "request canceled (Client.Timeout exceeded while awaiting headers)", + }, + }, } for _, test := range testCases { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -74,8 +102,8 @@ func TestHTTPProbeChecker(t *testing.T) { if health != test.health { t.Errorf("Expected %v, got %v", test.health, health) } - if !strings.Contains(output, test.body) { - t.Errorf("Expected %v, got %v", test.body, output) + if !containsAny(output, test.accBodies) { + t.Errorf("Expected one of %#v, got %v", test.accBodies, output) } } } diff --git a/pkg/probe/tcp/tcp_test.go b/pkg/probe/tcp/tcp_test.go index d30f088a78f..8b4c80e03c0 100644 --- a/pkg/probe/tcp/tcp_test.go +++ b/pkg/probe/tcp/tcp_test.go @@ -29,17 +29,28 @@ import ( "k8s.io/kubernetes/pkg/probe" ) +func containsAny(s string, substrs []string) bool { + for _, substr := range substrs { + if strings.Contains(s, substr) { + return true + } + } + return false +} + func TestTcpHealthChecker(t *testing.T) { prober := New() tests := []struct { expectedStatus probe.Result usePort bool expectError bool - output string + // Some errors are different depending on your system, make + // the test pass on all of them + accOutputs []string }{ // The probe will be filled in below. This is primarily testing that a connection is made. - {probe.Success, true, false, ""}, - {probe.Failure, false, false, "tcp: unknown port"}, + {probe.Success, true, false, []string{""}}, + {probe.Failure, false, false, []string{"unknown port", "Servname not supported for ai_socktype"}}, } server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -72,8 +83,8 @@ func TestTcpHealthChecker(t *testing.T) { if err == nil && test.expectError { t.Errorf("unexpected non-error.") } - if !strings.Contains(output, test.output) { - t.Errorf("expected %s, got %s", test.output, output) + if !containsAny(output, test.accOutputs) { + t.Errorf("expected one of %#v, got %s", test.accOutputs, output) } } } diff --git a/pkg/proxy/config/api.go b/pkg/proxy/config/api.go index 65c3a502e7a..cfd53c5f8b8 100644 --- a/pkg/proxy/config/api.go +++ b/pkg/proxy/config/api.go @@ -20,8 +20,8 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" ) diff --git a/pkg/proxy/config/api_test.go b/pkg/proxy/config/api_test.go index 7080bb2484a..fa4fa122b2e 100644 --- a/pkg/proxy/config/api_test.go +++ b/pkg/proxy/config/api_test.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" ) diff --git a/pkg/registry/daemonset/etcd/etcd.go b/pkg/registry/daemonset/etcd/etcd.go index f68b20345a3..d126968a2b1 100644 --- a/pkg/registry/daemonset/etcd/etcd.go +++ b/pkg/registry/daemonset/etcd/etcd.go @@ -18,7 +18,7 @@ package etcd import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/daemonset" @@ -39,10 +39,10 @@ var daemonPrefix = "/daemonsets" // NewREST returns a RESTStorage object that will work against DaemonSets. func NewREST(s storage.Interface) *REST { store := &etcdgeneric.Etcd{ - NewFunc: func() runtime.Object { return &expapi.DaemonSet{} }, + NewFunc: func() runtime.Object { return &experimental.DaemonSet{} }, // NewListFunc returns an object capable of storing results of an etcd list. - NewListFunc: func() runtime.Object { return &expapi.DaemonSetList{} }, + NewListFunc: func() runtime.Object { return &experimental.DaemonSetList{} }, // Produces a path that etcd understands, to the root of the resource // by combining the namespace in the context with the given prefix KeyRootFunc: func(ctx api.Context) string { @@ -55,7 +55,7 @@ func NewREST(s storage.Interface) *REST { }, // Retrieve the name field of a daemon set ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*expapi.DaemonSet).Name, nil + return obj.(*experimental.DaemonSet).Name, nil }, // Used to match objects based on labels/fields for list and watch PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { diff --git a/pkg/registry/daemonset/etcd/etcd_test.go b/pkg/registry/daemonset/etcd/etcd_test.go index 341ddd01830..5e3059d0a8a 100755 --- a/pkg/registry/daemonset/etcd/etcd_test.go +++ b/pkg/registry/daemonset/etcd/etcd_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" @@ -33,13 +33,13 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage), fakeClient } -func newValidDaemonSet() *expapi.DaemonSet { - return &expapi.DaemonSet{ +func newValidDaemonSet() *experimental.DaemonSet { + return &experimental.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: expapi.DaemonSetSpec{ + Spec: experimental.DaemonSetSpec{ Selector: map[string]string{"a": "b"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -72,8 +72,8 @@ func TestCreate(t *testing.T) { // valid ds, // invalid (invalid selector) - &expapi.DaemonSet{ - Spec: expapi.DaemonSetSpec{ + &experimental.DaemonSet{ + Spec: experimental.DaemonSetSpec{ Selector: map[string]string{}, Template: validDaemonSet.Spec.Template, }, @@ -89,28 +89,28 @@ func TestUpdate(t *testing.T) { newValidDaemonSet(), // updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.DaemonSet) + object := obj.(*experimental.DaemonSet) object.Spec.Template.Spec.NodeSelector = map[string]string{"c": "d"} return object }, // invalid updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.DaemonSet) + object := obj.(*experimental.DaemonSet) object.UID = "newUID" return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.DaemonSet) + object := obj.(*experimental.DaemonSet) object.Name = "" return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.DaemonSet) + object := obj.(*experimental.DaemonSet) object.Spec.Template.Spec.RestartPolicy = api.RestartPolicyOnFailure return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.DaemonSet) + object := obj.(*experimental.DaemonSet) object.Spec.Selector = map[string]string{} return object }, diff --git a/pkg/registry/daemonset/strategy.go b/pkg/registry/daemonset/strategy.go index 9861ea464c1..7369afe51ff 100644 --- a/pkg/registry/daemonset/strategy.go +++ b/pkg/registry/daemonset/strategy.go @@ -21,8 +21,8 @@ import ( "reflect" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/validation" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/validation" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -46,16 +46,16 @@ func (daemonSetStrategy) NamespaceScoped() bool { // PrepareForCreate clears the status of a daemon set before creation. func (daemonSetStrategy) PrepareForCreate(obj runtime.Object) { - daemonSet := obj.(*expapi.DaemonSet) - daemonSet.Status = expapi.DaemonSetStatus{} + daemonSet := obj.(*experimental.DaemonSet) + daemonSet.Status = experimental.DaemonSetStatus{} daemonSet.Generation = 1 } // PrepareForUpdate clears fields that are not allowed to be set by end users on update. func (daemonSetStrategy) PrepareForUpdate(obj, old runtime.Object) { - newDaemonSet := obj.(*expapi.DaemonSet) - oldDaemonSet := old.(*expapi.DaemonSet) + newDaemonSet := obj.(*experimental.DaemonSet) + oldDaemonSet := old.(*experimental.DaemonSet) // Any changes to the spec increment the generation number, any changes to the // status should reflect the generation number of the corresponding object. We push @@ -75,7 +75,7 @@ func (daemonSetStrategy) PrepareForUpdate(obj, old runtime.Object) { // Validate validates a new daemon set. func (daemonSetStrategy) Validate(ctx api.Context, obj runtime.Object) fielderrors.ValidationErrorList { - daemonSet := obj.(*expapi.DaemonSet) + daemonSet := obj.(*experimental.DaemonSet) return validation.ValidateDaemonSet(daemonSet) } @@ -87,8 +87,8 @@ func (daemonSetStrategy) AllowCreateOnUpdate() bool { // ValidateUpdate is the default update validation for an end user. func (daemonSetStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { - validationErrorList := validation.ValidateDaemonSet(obj.(*expapi.DaemonSet)) - updateErrorList := validation.ValidateDaemonSetUpdate(old.(*expapi.DaemonSet), obj.(*expapi.DaemonSet)) + validationErrorList := validation.ValidateDaemonSet(obj.(*experimental.DaemonSet)) + updateErrorList := validation.ValidateDaemonSetUpdate(old.(*experimental.DaemonSet), obj.(*experimental.DaemonSet)) return append(validationErrorList, updateErrorList...) } @@ -98,7 +98,7 @@ func (daemonSetStrategy) AllowUnconditionalUpdate() bool { } // DaemonSetToSelectableFields returns a field set that represents the object. -func DaemonSetToSelectableFields(daemon *expapi.DaemonSet) fields.Set { +func DaemonSetToSelectableFields(daemon *experimental.DaemonSet) fields.Set { return fields.Set{ "metadata.name": daemon.Name, } @@ -112,7 +112,7 @@ func MatchDaemonSet(label labels.Selector, field fields.Selector) generic.Matche Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { - ds, ok := obj.(*expapi.DaemonSet) + ds, ok := obj.(*experimental.DaemonSet) if !ok { return nil, nil, fmt.Errorf("given object is not a ds.") } diff --git a/pkg/registry/deployment/etcd/etcd.go b/pkg/registry/deployment/etcd/etcd.go index 114579d8398..8eace5bcfc6 100644 --- a/pkg/registry/deployment/etcd/etcd.go +++ b/pkg/registry/deployment/etcd/etcd.go @@ -18,7 +18,7 @@ package etcd import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/deployment" @@ -36,9 +36,9 @@ type REST struct { func NewREST(s storage.Interface) *REST { prefix := "/deployments" store := &etcdgeneric.Etcd{ - NewFunc: func() runtime.Object { return &expapi.Deployment{} }, + NewFunc: func() runtime.Object { return &experimental.Deployment{} }, // NewListFunc returns an object capable of storing results of an etcd list. - NewListFunc: func() runtime.Object { return &expapi.DeploymentList{} }, + NewListFunc: func() runtime.Object { return &experimental.DeploymentList{} }, // Produces a path that etcd understands, to the root of the resource // by combining the namespace in the context with the given prefix. KeyRootFunc: func(ctx api.Context) string { @@ -51,7 +51,7 @@ func NewREST(s storage.Interface) *REST { }, // Retrieve the name field of a deployment. ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*expapi.Deployment).Name, nil + return obj.(*experimental.Deployment).Name, nil }, // Used to match objects based on labels/fields for list. PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { diff --git a/pkg/registry/deployment/etcd/etcd_test.go b/pkg/registry/deployment/etcd/etcd_test.go index 0878d10883d..e11d113a633 100755 --- a/pkg/registry/deployment/etcd/etcd_test.go +++ b/pkg/registry/deployment/etcd/etcd_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" @@ -33,13 +33,13 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage), fakeClient } -func validNewDeployment() *expapi.Deployment { - return &expapi.Deployment{ +func validNewDeployment() *experimental.Deployment { + return &experimental.Deployment{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: expapi.DeploymentSpec{ + Spec: experimental.DeploymentSpec{ Selector: map[string]string{"a": "b"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -73,8 +73,8 @@ func TestCreate(t *testing.T) { // valid deployment, // invalid (invalid selector) - &expapi.Deployment{ - Spec: expapi.DeploymentSpec{ + &experimental.Deployment{ + Spec: experimental.DeploymentSpec{ Selector: map[string]string{}, Template: validDeployment.Spec.Template, }, @@ -90,28 +90,28 @@ func TestUpdate(t *testing.T) { validNewDeployment(), // updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Deployment) + object := obj.(*experimental.Deployment) object.Spec.Template.Spec.NodeSelector = map[string]string{"c": "d"} return object }, // invalid updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Deployment) + object := obj.(*experimental.Deployment) object.UID = "newUID" return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Deployment) + object := obj.(*experimental.Deployment) object.Name = "" return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Deployment) + object := obj.(*experimental.Deployment) object.Spec.Template.Spec.RestartPolicy = api.RestartPolicyOnFailure return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Deployment) + object := obj.(*experimental.Deployment) object.Spec.Selector = map[string]string{} return object }, diff --git a/pkg/registry/deployment/strategy.go b/pkg/registry/deployment/strategy.go index d00db2f0f51..c865927f751 100644 --- a/pkg/registry/deployment/strategy.go +++ b/pkg/registry/deployment/strategy.go @@ -20,8 +20,8 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/validation" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/validation" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -50,7 +50,7 @@ func (deploymentStrategy) PrepareForCreate(obj runtime.Object) { // Validate validates a new deployment. func (deploymentStrategy) Validate(ctx api.Context, obj runtime.Object) errs.ValidationErrorList { - deployment := obj.(*expapi.Deployment) + deployment := obj.(*experimental.Deployment) return validation.ValidateDeployment(deployment) } @@ -65,7 +65,7 @@ func (deploymentStrategy) PrepareForUpdate(obj, old runtime.Object) { // ValidateUpdate is the default update validation for an end user. func (deploymentStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) errs.ValidationErrorList { - return validation.ValidateDeploymentUpdate(old.(*expapi.Deployment), obj.(*expapi.Deployment)) + return validation.ValidateDeploymentUpdate(old.(*experimental.Deployment), obj.(*experimental.Deployment)) } func (deploymentStrategy) AllowUnconditionalUpdate() bool { @@ -73,7 +73,7 @@ func (deploymentStrategy) AllowUnconditionalUpdate() bool { } // DeploymentToSelectableFields returns a field set that represents the object. -func DeploymentToSelectableFields(deployment *expapi.Deployment) fields.Set { +func DeploymentToSelectableFields(deployment *experimental.Deployment) fields.Set { return fields.Set{ "metadata.name": deployment.Name, } @@ -87,7 +87,7 @@ func MatchDeployment(label labels.Selector, field fields.Selector) generic.Match Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { - deployment, ok := obj.(*expapi.Deployment) + deployment, ok := obj.(*experimental.Deployment) if !ok { return nil, nil, fmt.Errorf("given object is not a deployment.") } diff --git a/pkg/registry/experimental/controller/etcd/etcd.go b/pkg/registry/experimental/controller/etcd/etcd.go index 9cd5e99114e..134f42f457f 100644 --- a/pkg/registry/experimental/controller/etcd/etcd.go +++ b/pkg/registry/experimental/controller/etcd/etcd.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/registry/controller" "k8s.io/kubernetes/pkg/registry/controller/etcd" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" ) // Container includes dummy storage for RC pods and experimental storage for Scale. @@ -55,7 +55,7 @@ var _ = rest.Patcher(&ScaleREST{}) // New creates a new Scale object func (r *ScaleREST) New() runtime.Object { - return &expapi.Scale{} + return &experimental.Scale{} } func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) { @@ -63,16 +63,16 @@ func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) { if err != nil { return nil, errors.NewNotFound("scale", name) } - return &expapi.Scale{ + return &experimental.Scale{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: rc.Namespace, CreationTimestamp: rc.CreationTimestamp, }, - Spec: expapi.ScaleSpec{ + Spec: experimental.ScaleSpec{ Replicas: rc.Spec.Replicas, }, - Status: expapi.ScaleStatus{ + Status: experimental.ScaleStatus{ Replicas: rc.Status.Replicas, Selector: rc.Spec.Selector, }, @@ -83,7 +83,7 @@ func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } - scale, ok := obj.(*expapi.Scale) + scale, ok := obj.(*experimental.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj)) } @@ -96,16 +96,16 @@ func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, if err != nil { return nil, false, errors.NewConflict("scale", scale.Name, err) } - return &expapi.Scale{ + return &experimental.Scale{ ObjectMeta: api.ObjectMeta{ Name: rc.Name, Namespace: rc.Namespace, CreationTimestamp: rc.CreationTimestamp, }, - Spec: expapi.ScaleSpec{ + Spec: experimental.ScaleSpec{ Replicas: rc.Spec.Replicas, }, - Status: expapi.ScaleStatus{ + Status: experimental.ScaleStatus{ Replicas: rc.Status.Replicas, Selector: rc.Spec.Selector, }, @@ -116,5 +116,5 @@ func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, type RcREST struct{} func (r *RcREST) New() runtime.Object { - return &expapi.ReplicationControllerDummy{} + return &experimental.ReplicationControllerDummy{} } diff --git a/pkg/registry/experimental/controller/etcd/etcd_test.go b/pkg/registry/experimental/controller/etcd/etcd_test.go index 192da9dfba1..c5bed83b350 100644 --- a/pkg/registry/experimental/controller/etcd/etcd_test.go +++ b/pkg/registry/experimental/controller/etcd/etcd_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" @@ -66,12 +66,12 @@ var validController = api.ReplicationController{ Spec: validControllerSpec, } -var validScale = expapi.Scale{ +var validScale = experimental.Scale{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, - Spec: expapi.ScaleSpec{ + Spec: experimental.ScaleSpec{ Replicas: validReplicas, }, - Status: expapi.ScaleStatus{ + Status: experimental.ScaleStatus{ Replicas: 0, Selector: validPodTemplate.Template.Labels, }, @@ -88,7 +88,7 @@ func TestGet(t *testing.T) { expect := &validScale obj, err := storage.Get(ctx, "foo") - scale := obj.(*expapi.Scale) + scale := obj.(*experimental.Scale) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -106,9 +106,9 @@ func TestUpdate(t *testing.T) { t.Fatalf("unexpected error: %v", err) } replicas := 12 - update := expapi.Scale{ + update := experimental.Scale{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, - Spec: expapi.ScaleSpec{ + Spec: experimental.ScaleSpec{ Replicas: replicas, }, } diff --git a/pkg/registry/horizontalpodautoscaler/etcd/etcd.go b/pkg/registry/horizontalpodautoscaler/etcd/etcd.go index f3447c171e5..b620cfe9d89 100644 --- a/pkg/registry/horizontalpodautoscaler/etcd/etcd.go +++ b/pkg/registry/horizontalpodautoscaler/etcd/etcd.go @@ -18,7 +18,7 @@ package etcd import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -36,9 +36,9 @@ type REST struct { func NewREST(s storage.Interface) *REST { prefix := "/horizontalpodautoscalers" store := &etcdgeneric.Etcd{ - NewFunc: func() runtime.Object { return &expapi.HorizontalPodAutoscaler{} }, + NewFunc: func() runtime.Object { return &experimental.HorizontalPodAutoscaler{} }, // NewListFunc returns an object capable of storing results of an etcd list. - NewListFunc: func() runtime.Object { return &expapi.HorizontalPodAutoscalerList{} }, + NewListFunc: func() runtime.Object { return &experimental.HorizontalPodAutoscalerList{} }, // Produces a path that etcd understands, to the root of the resource // by combining the namespace in the context with the given prefix KeyRootFunc: func(ctx api.Context) string { @@ -51,7 +51,7 @@ func NewREST(s storage.Interface) *REST { }, // Retrieve the name field of an autoscaler ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*expapi.HorizontalPodAutoscaler).Name, nil + return obj.(*experimental.HorizontalPodAutoscaler).Name, nil }, // Used to match objects based on labels/fields for list PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { diff --git a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go index a5ab4768565..91e531049c0 100644 --- a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go +++ b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go @@ -21,9 +21,9 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/expapi" - // Ensure that expapi/v1 package is initialized. - _ "k8s.io/kubernetes/pkg/expapi/v1" + "k8s.io/kubernetes/pkg/apis/experimental" + // Ensure that experimental/v1 package is initialized. + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" @@ -36,19 +36,19 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage), fakeClient } -func validNewHorizontalPodAutoscaler(name string) *expapi.HorizontalPodAutoscaler { - return &expapi.HorizontalPodAutoscaler{ +func validNewHorizontalPodAutoscaler(name string) *experimental.HorizontalPodAutoscaler { + return &experimental.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: api.NamespaceDefault, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Subresource: "scale", }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, } } @@ -62,7 +62,7 @@ func TestCreate(t *testing.T) { // valid autoscaler, // invalid - &expapi.HorizontalPodAutoscaler{}, + &experimental.HorizontalPodAutoscaler{}, ) } @@ -74,7 +74,7 @@ func TestUpdate(t *testing.T) { validNewHorizontalPodAutoscaler("foo"), // updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.HorizontalPodAutoscaler) + object := obj.(*experimental.HorizontalPodAutoscaler) object.Spec.MaxCount = object.Spec.MaxCount + 1 return object }, diff --git a/pkg/registry/horizontalpodautoscaler/strategy.go b/pkg/registry/horizontalpodautoscaler/strategy.go index e0de495bcdf..88aa5c8f54c 100644 --- a/pkg/registry/horizontalpodautoscaler/strategy.go +++ b/pkg/registry/horizontalpodautoscaler/strategy.go @@ -20,8 +20,8 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/validation" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/validation" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -46,12 +46,12 @@ func (autoscalerStrategy) NamespaceScoped() bool { // PrepareForCreate clears fields that are not allowed to be set by end users on creation. func (autoscalerStrategy) PrepareForCreate(obj runtime.Object) { - _ = obj.(*expapi.HorizontalPodAutoscaler) + _ = obj.(*experimental.HorizontalPodAutoscaler) } // Validate validates a new autoscaler. func (autoscalerStrategy) Validate(ctx api.Context, obj runtime.Object) errs.ValidationErrorList { - autoscaler := obj.(*expapi.HorizontalPodAutoscaler) + autoscaler := obj.(*experimental.HorizontalPodAutoscaler) return validation.ValidateHorizontalPodAutoscaler(autoscaler) } @@ -62,19 +62,19 @@ func (autoscalerStrategy) AllowCreateOnUpdate() bool { // PrepareForUpdate clears fields that are not allowed to be set by end users on update. func (autoscalerStrategy) PrepareForUpdate(obj, old runtime.Object) { - _ = obj.(*expapi.HorizontalPodAutoscaler) + _ = obj.(*experimental.HorizontalPodAutoscaler) } // ValidateUpdate is the default update validation for an end user. func (autoscalerStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) errs.ValidationErrorList { - return validation.ValidateHorizontalPodAutoscalerUpdate(obj.(*expapi.HorizontalPodAutoscaler), old.(*expapi.HorizontalPodAutoscaler)) + return validation.ValidateHorizontalPodAutoscalerUpdate(obj.(*experimental.HorizontalPodAutoscaler), old.(*experimental.HorizontalPodAutoscaler)) } func (autoscalerStrategy) AllowUnconditionalUpdate() bool { return true } -func AutoscalerToSelectableFields(limitRange *expapi.HorizontalPodAutoscaler) fields.Set { +func AutoscalerToSelectableFields(limitRange *experimental.HorizontalPodAutoscaler) fields.Set { return fields.Set{} } @@ -83,7 +83,7 @@ func MatchAutoscaler(label labels.Selector, field fields.Selector) generic.Match Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { - hpa, ok := obj.(*expapi.HorizontalPodAutoscaler) + hpa, ok := obj.(*experimental.HorizontalPodAutoscaler) if !ok { return nil, nil, fmt.Errorf("given object is not a horizontal pod autoscaler.") } diff --git a/pkg/registry/job/doc.go b/pkg/registry/job/doc.go new file mode 100644 index 00000000000..a76a224cbff --- /dev/null +++ b/pkg/registry/job/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controller provides Registry interface and it's RESTStorage +// implementation for storing Job api objects. +package job diff --git a/pkg/registry/job/etcd/etcd.go b/pkg/registry/job/etcd/etcd.go new file mode 100644 index 00000000000..b6ff7bd7750 --- /dev/null +++ b/pkg/registry/job/etcd/etcd.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" + "k8s.io/kubernetes/pkg/registry/job" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" +) + +// rest implements a RESTStorage for jobs against etcd +type REST struct { + *etcdgeneric.Etcd +} + +// jobPrefix is the location for jobs in etcd, only exposed +// for testing +var jobPrefix = "/jobs" + +// NewREST returns a RESTStorage object that will work against Jobs. +func NewREST(s storage.Interface) *REST { + store := &etcdgeneric.Etcd{ + NewFunc: func() runtime.Object { return &experimental.Job{} }, + + // NewListFunc returns an object capable of storing results of an etcd list. + NewListFunc: func() runtime.Object { return &experimental.JobList{} }, + // Produces a path that etcd understands, to the root of the resource + // by combining the namespace in the context with the given prefix + KeyRootFunc: func(ctx api.Context) string { + return etcdgeneric.NamespaceKeyRootFunc(ctx, jobPrefix) + }, + // Produces a path that etcd understands, to the resource by combining + // the namespace in the context with the given prefix + KeyFunc: func(ctx api.Context, name string) (string, error) { + return etcdgeneric.NamespaceKeyFunc(ctx, jobPrefix, name) + }, + // Retrieve the name field of a job + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*experimental.Job).Name, nil + }, + // Used to match objects based on labels/fields for list and watch + PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { + return job.MatchJob(label, field) + }, + EndpointName: "jobs", + + // Used to validate job creation + CreateStrategy: job.Strategy, + + // Used to validate job updates + UpdateStrategy: job.Strategy, + + Storage: s, + } + + return &REST{store} +} diff --git a/pkg/registry/job/etcd/etcd_test.go b/pkg/registry/job/etcd/etcd_test.go new file mode 100644 index 00000000000..2f04c1fa6b3 --- /dev/null +++ b/pkg/registry/job/etcd/etcd_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/experimental" + // Ensure that experimental/v1 package is initialized. + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/registrytest" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/tools" +) + +func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") + return NewREST(etcdStorage), fakeClient +} + +func validNewJob() *experimental.Job { + completions := 1 + parallelism := 1 + return &experimental.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: experimental.JobSpec{ + Completions: &completions, + Parallelism: ¶llelism, + Selector: map[string]string{"a": "b"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "test", + Image: "test_image", + ImagePullPolicy: api.PullIfNotPresent, + }, + }, + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + }, + }, + }, + } +} + +func TestCreate(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + validJob := validNewJob() + validJob.ObjectMeta = api.ObjectMeta{} + test.TestCreate( + // valid + validJob, + // invalid (empty selector) + &experimental.Job{ + Spec: experimental.JobSpec{ + Completions: validJob.Spec.Completions, + Selector: map[string]string{}, + Template: validJob.Spec.Template, + }, + }, + ) +} + +func TestUpdate(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + completions := 2 + test.TestUpdate( + // valid + validNewJob(), + // updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*experimental.Job) + object.Spec.Completions = &completions + return object + }, + // invalid updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*experimental.Job) + object.Spec.Selector = map[string]string{} + return object + }, + ) +} + +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewJob()) +} + +func TestGet(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestGet(validNewJob()) +} + +func TestList(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestList(validNewJob()) +} + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewJob(), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"x": "y"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "xyz"}, + {"name": "foo"}, + }, + ) +} diff --git a/pkg/registry/job/registry.go b/pkg/registry/job/registry.go new file mode 100644 index 00000000000..de6e892157d --- /dev/null +++ b/pkg/registry/job/registry.go @@ -0,0 +1,99 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package job + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// Registry is an interface for things that know how to store Jobs. +type Registry interface { + // ListJobs obtains a list of Jobs having labels and fields which match selector. + ListJobs(ctx api.Context, label labels.Selector, field fields.Selector) (*experimental.JobList, error) + // WatchJobs watch for new/changed/deleted Jobs. + WatchJobs(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) + // GetJobs gets a specific Job. + GetJob(ctx api.Context, name string) (*experimental.Job, error) + // CreateJob creates a Job based on a specification. + CreateJob(ctx api.Context, job *experimental.Job) (*experimental.Job, error) + // UpdateJob updates an existing Job. + UpdateJob(ctx api.Context, job *experimental.Job) (*experimental.Job, error) + // DeleteJob deletes an existing Job. + DeleteJob(ctx api.Context, name string) error +} + +// storage puts strong typing around storage calls +type storage struct { + rest.StandardStorage +} + +// NewRegistry returns a new Registry interface for the given Storage. Any mismatched +// types will panic. +func NewRegistry(s rest.StandardStorage) Registry { + return &storage{s} +} + +func (s *storage) ListJobs(ctx api.Context, label labels.Selector, field fields.Selector) (*experimental.JobList, error) { + if !field.Empty() { + return nil, fmt.Errorf("field selector not supported yet") + } + obj, err := s.List(ctx, label, field) + if err != nil { + return nil, err + } + return obj.(*experimental.JobList), err +} + +func (s *storage) WatchJobs(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { + return s.Watch(ctx, label, field, resourceVersion) +} + +func (s *storage) GetJob(ctx api.Context, name string) (*experimental.Job, error) { + obj, err := s.Get(ctx, name) + if err != nil { + return nil, err + } + return obj.(*experimental.Job), nil +} + +func (s *storage) CreateJob(ctx api.Context, job *experimental.Job) (*experimental.Job, error) { + obj, err := s.Create(ctx, job) + if err != nil { + return nil, err + } + return obj.(*experimental.Job), nil +} + +func (s *storage) UpdateJob(ctx api.Context, job *experimental.Job) (*experimental.Job, error) { + obj, _, err := s.Update(ctx, job) + if err != nil { + return nil, err + } + return obj.(*experimental.Job), nil +} + +func (s *storage) DeleteJob(ctx api.Context, name string) error { + _, err := s.Delete(ctx, name, nil) + return err +} diff --git a/pkg/registry/job/strategy.go b/pkg/registry/job/strategy.go new file mode 100644 index 00000000000..4394d57744f --- /dev/null +++ b/pkg/registry/job/strategy.go @@ -0,0 +1,105 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package job + +import ( + "fmt" + "strconv" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/validation" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/fielderrors" +) + +// jobStrategy implements verification logic for Replication Controllers. +type jobStrategy struct { + runtime.ObjectTyper + api.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating Replication Controller objects. +var Strategy = jobStrategy{api.Scheme, api.SimpleNameGenerator} + +// NamespaceScoped returns true because all jobs need to be within a namespace. +func (jobStrategy) NamespaceScoped() bool { + return true +} + +// PrepareForCreate clears the status of a job before creation. +func (jobStrategy) PrepareForCreate(obj runtime.Object) { + job := obj.(*experimental.Job) + job.Status = experimental.JobStatus{} +} + +// PrepareForUpdate clears fields that are not allowed to be set by end users on update. +func (jobStrategy) PrepareForUpdate(obj, old runtime.Object) { + newJob := obj.(*experimental.Job) + oldJob := old.(*experimental.Job) + newJob.Status = oldJob.Status +} + +// Validate validates a new job. +func (jobStrategy) Validate(ctx api.Context, obj runtime.Object) fielderrors.ValidationErrorList { + job := obj.(*experimental.Job) + return validation.ValidateJob(job) +} + +func (jobStrategy) AllowUnconditionalUpdate() bool { + return true +} + +// AllowCreateOnUpdate is false for jobs; this means a POST is needed to create one. +func (jobStrategy) AllowCreateOnUpdate() bool { + return false +} + +// ValidateUpdate is the default update validation for an end user. +func (jobStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { + validationErrorList := validation.ValidateJob(obj.(*experimental.Job)) + updateErrorList := validation.ValidateJobUpdate(old.(*experimental.Job), obj.(*experimental.Job)) + return append(validationErrorList, updateErrorList...) +} + +// JobSelectableFields returns a field set that represents the object for matching purposes. +func JobToSelectableFields(job *experimental.Job) fields.Set { + return fields.Set{ + "metadata.name": job.Name, + "status.successful": strconv.Itoa(job.Status.Successful), + } +} + +// MatchJob is the filter used by the generic etcd backend to route +// watch events from etcd to clients of the apiserver only interested in specific +// labels/fields. +func MatchJob(label labels.Selector, field fields.Selector) generic.Matcher { + return &generic.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + job, ok := obj.(*experimental.Job) + if !ok { + return nil, nil, fmt.Errorf("Given object is not a job.") + } + return labels.Set(job.ObjectMeta.Labels), JobToSelectableFields(job), nil + }, + } +} diff --git a/pkg/registry/thirdpartyresource/etcd/etcd.go b/pkg/registry/thirdpartyresource/etcd/etcd.go index d72e7816a82..087a32938e8 100644 --- a/pkg/registry/thirdpartyresource/etcd/etcd.go +++ b/pkg/registry/thirdpartyresource/etcd/etcd.go @@ -18,7 +18,7 @@ package etcd import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -38,8 +38,8 @@ func NewREST(s storage.Interface) *REST { prefix := "/thirdpartyresources" store := &etcdgeneric.Etcd{ - NewFunc: func() runtime.Object { return &expapi.ThirdPartyResource{} }, - NewListFunc: func() runtime.Object { return &expapi.ThirdPartyResourceList{} }, + NewFunc: func() runtime.Object { return &experimental.ThirdPartyResource{} }, + NewListFunc: func() runtime.Object { return &experimental.ThirdPartyResourceList{} }, KeyRootFunc: func(ctx api.Context) string { return etcdgeneric.NamespaceKeyRootFunc(ctx, prefix) }, @@ -47,7 +47,7 @@ func NewREST(s storage.Interface) *REST { return etcdgeneric.NamespaceKeyFunc(ctx, prefix, id) }, ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*expapi.ThirdPartyResource).Name, nil + return obj.(*experimental.ThirdPartyResource).Name, nil }, PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { return thirdpartyresource.Matcher(label, field) diff --git a/pkg/registry/thirdpartyresource/etcd/etcd_test.go b/pkg/registry/thirdpartyresource/etcd/etcd_test.go index 14e31c5f402..cb2ee1d72d3 100644 --- a/pkg/registry/thirdpartyresource/etcd/etcd_test.go +++ b/pkg/registry/thirdpartyresource/etcd/etcd_test.go @@ -20,9 +20,9 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" - // Ensure that expapi/v1 package is initialized. - _ "k8s.io/kubernetes/pkg/expapi/v1" + "k8s.io/kubernetes/pkg/apis/experimental" + // Ensure that experimental/v1 package is initialized. + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" @@ -35,13 +35,13 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage), fakeClient } -func validNewThirdPartyResource(name string) *expapi.ThirdPartyResource { - return &expapi.ThirdPartyResource{ +func validNewThirdPartyResource(name string) *experimental.ThirdPartyResource { + return &experimental.ThirdPartyResource{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: api.NamespaceDefault, }, - Versions: []expapi.APIVersion{ + Versions: []experimental.APIVersion{ { Name: "stable/v1", }, @@ -58,7 +58,7 @@ func TestCreate(t *testing.T) { // valid rsrc, // invalid - &expapi.ThirdPartyResource{}, + &experimental.ThirdPartyResource{}, ) } @@ -70,7 +70,7 @@ func TestUpdate(t *testing.T) { validNewThirdPartyResource("foo"), // updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.ThirdPartyResource) + object := obj.(*experimental.ThirdPartyResource) object.Description = "new description" return object }, diff --git a/pkg/registry/thirdpartyresource/strategy.go b/pkg/registry/thirdpartyresource/strategy.go index cbdea16a107..733a1ea56de 100644 --- a/pkg/registry/thirdpartyresource/strategy.go +++ b/pkg/registry/thirdpartyresource/strategy.go @@ -21,8 +21,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/validation" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/validation" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -52,7 +52,7 @@ func (strategy) PrepareForCreate(obj runtime.Object) { } func (strategy) Validate(ctx api.Context, obj runtime.Object) fielderrors.ValidationErrorList { - return validation.ValidateThirdPartyResource(obj.(*expapi.ThirdPartyResource)) + return validation.ValidateThirdPartyResource(obj.(*experimental.ThirdPartyResource)) } func (strategy) AllowCreateOnUpdate() bool { @@ -63,7 +63,7 @@ func (strategy) PrepareForUpdate(obj, old runtime.Object) { } func (strategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { - return validation.ValidateThirdPartyResourceUpdate(old.(*expapi.ThirdPartyResource), obj.(*expapi.ThirdPartyResource)) + return validation.ValidateThirdPartyResourceUpdate(old.(*experimental.ThirdPartyResource), obj.(*experimental.ThirdPartyResource)) } func (strategy) AllowUnconditionalUpdate() bool { @@ -73,7 +73,7 @@ func (strategy) AllowUnconditionalUpdate() bool { // Matcher returns a generic matcher for a given label and field selector. func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { - sa, ok := obj.(*expapi.ThirdPartyResource) + sa, ok := obj.(*experimental.ThirdPartyResource) if !ok { return false, fmt.Errorf("not a ThirdPartyResource") } @@ -83,6 +83,6 @@ func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { } // SelectableFields returns a label set that can be used for filter selection -func SelectableFields(obj *expapi.ThirdPartyResource) labels.Set { +func SelectableFields(obj *experimental.ThirdPartyResource) labels.Set { return labels.Set{} } diff --git a/pkg/registry/thirdpartyresourcedata/codec.go b/pkg/registry/thirdpartyresourcedata/codec.go index 5ecf9aaa963..18c668cde0d 100644 --- a/pkg/registry/thirdpartyresourcedata/codec.go +++ b/pkg/registry/thirdpartyresourcedata/codec.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/latest" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/runtime" ) @@ -86,7 +86,7 @@ func NewCodec(codec runtime.Codec, kind string) runtime.Codec { return &thirdPartyResourceDataCodec{codec, kind} } -func (t *thirdPartyResourceDataCodec) populate(objIn *expapi.ThirdPartyResourceData, data []byte) error { +func (t *thirdPartyResourceDataCodec) populate(objIn *experimental.ThirdPartyResourceData, data []byte) error { var obj interface{} if err := json.Unmarshal(data, &obj); err != nil { fmt.Printf("Invalid JSON:\n%s\n", string(data)) @@ -99,7 +99,7 @@ func (t *thirdPartyResourceDataCodec) populate(objIn *expapi.ThirdPartyResourceD return t.populateFromObject(objIn, mapObj, data) } -func (t *thirdPartyResourceDataCodec) populateFromObject(objIn *expapi.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { +func (t *thirdPartyResourceDataCodec) populateFromObject(objIn *experimental.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { typeMeta := api.TypeMeta{} if err := json.Unmarshal(data, &typeMeta); err != nil { return err @@ -127,7 +127,7 @@ func (t *thirdPartyResourceDataCodec) populateFromObject(objIn *expapi.ThirdPart } func (t *thirdPartyResourceDataCodec) Decode(data []byte) (runtime.Object, error) { - result := &expapi.ThirdPartyResourceData{} + result := &experimental.ThirdPartyResourceData{} if err := t.populate(result, data); err != nil { return nil, err } @@ -148,7 +148,7 @@ func (t *thirdPartyResourceDataCodec) DecodeToVersion(data []byte, version strin } func (t *thirdPartyResourceDataCodec) DecodeInto(data []byte, obj runtime.Object) error { - thirdParty, ok := obj.(*expapi.ThirdPartyResourceData) + thirdParty, ok := obj.(*experimental.ThirdPartyResourceData) if !ok { return fmt.Errorf("unexpected object: %#v", obj) } @@ -156,7 +156,7 @@ func (t *thirdPartyResourceDataCodec) DecodeInto(data []byte, obj runtime.Object } func (t *thirdPartyResourceDataCodec) DecodeIntoWithSpecifiedVersionKind(data []byte, obj runtime.Object, version, kind string) error { - thirdParty, ok := obj.(*expapi.ThirdPartyResourceData) + thirdParty, ok := obj.(*experimental.ThirdPartyResourceData) if !ok { return fmt.Errorf("unexpected object: %#v", obj) } @@ -207,7 +207,7 @@ const template = `{ "items": [ %s ] }` -func encodeToJSON(obj *expapi.ThirdPartyResourceData) ([]byte, error) { +func encodeToJSON(obj *experimental.ThirdPartyResourceData) ([]byte, error) { var objOut interface{} if err := json.Unmarshal(obj.Data, &objOut); err != nil { return nil, err @@ -222,9 +222,9 @@ func encodeToJSON(obj *expapi.ThirdPartyResourceData) ([]byte, error) { func (t *thirdPartyResourceDataCodec) Encode(obj runtime.Object) (data []byte, err error) { switch obj := obj.(type) { - case *expapi.ThirdPartyResourceData: + case *experimental.ThirdPartyResourceData: return encodeToJSON(obj) - case *expapi.ThirdPartyResourceDataList: + case *experimental.ThirdPartyResourceDataList: // TODO: There must be a better way to do this... buff := &bytes.Buffer{} dataStrings := make([]string, len(obj.Items)) @@ -259,9 +259,9 @@ func (t *thirdPartyResourceDataCreator) New(version, kind string) (out runtime.O } switch kind { case "ThirdPartyResourceData": - return &expapi.ThirdPartyResourceData{}, nil + return &experimental.ThirdPartyResourceData{}, nil case "ThirdPartyResourceDataList": - return &expapi.ThirdPartyResourceDataList{}, nil + return &experimental.ThirdPartyResourceDataList{}, nil default: return t.delegate.New(latest.Version, kind) } diff --git a/pkg/registry/thirdpartyresourcedata/codec_test.go b/pkg/registry/thirdpartyresourcedata/codec_test.go index 72fc1e5b709..61cd10d8de4 100644 --- a/pkg/registry/thirdpartyresourcedata/codec_test.go +++ b/pkg/registry/thirdpartyresourcedata/codec_test.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/util" ) @@ -101,7 +101,7 @@ func TestCodec(t *testing.T) { } continue } - rsrcObj, ok := obj.(*expapi.ThirdPartyResourceData) + rsrcObj, ok := obj.(*experimental.ThirdPartyResourceData) if !ok { t.Errorf("[%s] unexpected object: %v", test.name, obj) continue diff --git a/pkg/registry/thirdpartyresourcedata/etcd/etcd.go b/pkg/registry/thirdpartyresourcedata/etcd/etcd.go index 497f730ec5c..c6f1ecf8a13 100644 --- a/pkg/registry/thirdpartyresourcedata/etcd/etcd.go +++ b/pkg/registry/thirdpartyresourcedata/etcd/etcd.go @@ -20,7 +20,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -40,8 +40,8 @@ func NewREST(s storage.Interface, group, kind string) *REST { prefix := "/ThirdPartyResourceData/" + group + "/" + strings.ToLower(kind) + "s" store := &etcdgeneric.Etcd{ - NewFunc: func() runtime.Object { return &expapi.ThirdPartyResourceData{} }, - NewListFunc: func() runtime.Object { return &expapi.ThirdPartyResourceDataList{} }, + NewFunc: func() runtime.Object { return &experimental.ThirdPartyResourceData{} }, + NewListFunc: func() runtime.Object { return &experimental.ThirdPartyResourceDataList{} }, KeyRootFunc: func(ctx api.Context) string { return etcdgeneric.NamespaceKeyRootFunc(ctx, prefix) }, @@ -49,7 +49,7 @@ func NewREST(s storage.Interface, group, kind string) *REST { return etcdgeneric.NamespaceKeyFunc(ctx, prefix, id) }, ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*expapi.ThirdPartyResourceData).Name, nil + return obj.(*experimental.ThirdPartyResourceData).Name, nil }, PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { return thirdpartyresourcedata.Matcher(label, field) diff --git a/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go b/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go index ea0f7505ca4..975ce09d2aa 100644 --- a/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go +++ b/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go @@ -20,9 +20,9 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" - // Ensure that expapi/v1 package is initialized. - _ "k8s.io/kubernetes/pkg/expapi/v1" + "k8s.io/kubernetes/pkg/apis/experimental" + // Ensure that experimental/v1 package is initialized. + _ "k8s.io/kubernetes/pkg/apis/experimental/v1" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" @@ -35,8 +35,8 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage, "foo", "bar"), fakeClient } -func validNewThirdPartyResourceData(name string) *expapi.ThirdPartyResourceData { - return &expapi.ThirdPartyResourceData{ +func validNewThirdPartyResourceData(name string) *experimental.ThirdPartyResourceData { + return &experimental.ThirdPartyResourceData{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: api.NamespaceDefault, @@ -54,7 +54,7 @@ func TestCreate(t *testing.T) { // valid rsrc, // invalid - &expapi.ThirdPartyResourceData{}, + &experimental.ThirdPartyResourceData{}, ) } @@ -66,7 +66,7 @@ func TestUpdate(t *testing.T) { validNewThirdPartyResourceData("foo"), // updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.ThirdPartyResourceData) + object := obj.(*experimental.ThirdPartyResourceData) object.Data = []byte("new description") return object }, diff --git a/pkg/registry/thirdpartyresourcedata/registry.go b/pkg/registry/thirdpartyresourcedata/registry.go index b1be42354bb..a1ae201453e 100644 --- a/pkg/registry/thirdpartyresourcedata/registry.go +++ b/pkg/registry/thirdpartyresourcedata/registry.go @@ -19,7 +19,7 @@ package thirdpartyresourcedata import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -28,15 +28,15 @@ import ( // Registry is an interface implemented by things that know how to store ThirdPartyResourceData objects. type Registry interface { // ListThirdPartyResourceData obtains a list of ThirdPartyResourceData having labels which match selector. - ListThirdPartyResourceData(ctx api.Context, selector labels.Selector) (*expapi.ThirdPartyResourceDataList, error) + ListThirdPartyResourceData(ctx api.Context, selector labels.Selector) (*experimental.ThirdPartyResourceDataList, error) // Watch for new/changed/deleted ThirdPartyResourceData WatchThirdPartyResourceData(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) // Get a specific ThirdPartyResourceData - GetThirdPartyResourceData(ctx api.Context, name string) (*expapi.ThirdPartyResourceData, error) + GetThirdPartyResourceData(ctx api.Context, name string) (*experimental.ThirdPartyResourceData, error) // Create a ThirdPartyResourceData based on a specification. - CreateThirdPartyResourceData(ctx api.Context, resource *expapi.ThirdPartyResourceData) (*expapi.ThirdPartyResourceData, error) + CreateThirdPartyResourceData(ctx api.Context, resource *experimental.ThirdPartyResourceData) (*experimental.ThirdPartyResourceData, error) // Update an existing ThirdPartyResourceData - UpdateThirdPartyResourceData(ctx api.Context, resource *expapi.ThirdPartyResourceData) (*expapi.ThirdPartyResourceData, error) + UpdateThirdPartyResourceData(ctx api.Context, resource *experimental.ThirdPartyResourceData) (*experimental.ThirdPartyResourceData, error) // Delete an existing ThirdPartyResourceData DeleteThirdPartyResourceData(ctx api.Context, name string) error } @@ -52,34 +52,34 @@ func NewRegistry(s rest.StandardStorage) Registry { return &storage{s} } -func (s *storage) ListThirdPartyResourceData(ctx api.Context, label labels.Selector) (*expapi.ThirdPartyResourceDataList, error) { +func (s *storage) ListThirdPartyResourceData(ctx api.Context, label labels.Selector) (*experimental.ThirdPartyResourceDataList, error) { obj, err := s.List(ctx, label, fields.Everything()) if err != nil { return nil, err } - return obj.(*expapi.ThirdPartyResourceDataList), nil + return obj.(*experimental.ThirdPartyResourceDataList), nil } func (s *storage) WatchThirdPartyResourceData(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { return s.Watch(ctx, label, field, resourceVersion) } -func (s *storage) GetThirdPartyResourceData(ctx api.Context, name string) (*expapi.ThirdPartyResourceData, error) { +func (s *storage) GetThirdPartyResourceData(ctx api.Context, name string) (*experimental.ThirdPartyResourceData, error) { obj, err := s.Get(ctx, name) if err != nil { return nil, err } - return obj.(*expapi.ThirdPartyResourceData), nil + return obj.(*experimental.ThirdPartyResourceData), nil } -func (s *storage) CreateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *expapi.ThirdPartyResourceData) (*expapi.ThirdPartyResourceData, error) { +func (s *storage) CreateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *experimental.ThirdPartyResourceData) (*experimental.ThirdPartyResourceData, error) { obj, err := s.Create(ctx, ThirdPartyResourceData) - return obj.(*expapi.ThirdPartyResourceData), err + return obj.(*experimental.ThirdPartyResourceData), err } -func (s *storage) UpdateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *expapi.ThirdPartyResourceData) (*expapi.ThirdPartyResourceData, error) { +func (s *storage) UpdateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *experimental.ThirdPartyResourceData) (*experimental.ThirdPartyResourceData, error) { obj, _, err := s.Update(ctx, ThirdPartyResourceData) - return obj.(*expapi.ThirdPartyResourceData), err + return obj.(*experimental.ThirdPartyResourceData), err } func (s *storage) DeleteThirdPartyResourceData(ctx api.Context, name string) error { diff --git a/pkg/registry/thirdpartyresourcedata/strategy.go b/pkg/registry/thirdpartyresourcedata/strategy.go index fc97d47a3fb..d1e09fb6c49 100644 --- a/pkg/registry/thirdpartyresourcedata/strategy.go +++ b/pkg/registry/thirdpartyresourcedata/strategy.go @@ -21,8 +21,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/validation" + "k8s.io/kubernetes/pkg/apis/experimental" + "k8s.io/kubernetes/pkg/apis/experimental/validation" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic" @@ -52,7 +52,7 @@ func (strategy) PrepareForCreate(obj runtime.Object) { } func (strategy) Validate(ctx api.Context, obj runtime.Object) fielderrors.ValidationErrorList { - return validation.ValidateThirdPartyResourceData(obj.(*expapi.ThirdPartyResourceData)) + return validation.ValidateThirdPartyResourceData(obj.(*experimental.ThirdPartyResourceData)) } func (strategy) AllowCreateOnUpdate() bool { @@ -63,7 +63,7 @@ func (strategy) PrepareForUpdate(obj, old runtime.Object) { } func (strategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { - return validation.ValidateThirdPartyResourceDataUpdate(old.(*expapi.ThirdPartyResourceData), obj.(*expapi.ThirdPartyResourceData)) + return validation.ValidateThirdPartyResourceDataUpdate(old.(*experimental.ThirdPartyResourceData), obj.(*experimental.ThirdPartyResourceData)) } func (strategy) AllowUnconditionalUpdate() bool { @@ -73,7 +73,7 @@ func (strategy) AllowUnconditionalUpdate() bool { // Matcher returns a generic matcher for a given label and field selector. func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { - sa, ok := obj.(*expapi.ThirdPartyResourceData) + sa, ok := obj.(*experimental.ThirdPartyResourceData) if !ok { return false, fmt.Errorf("not a ThirdPartyResourceData") } @@ -83,6 +83,6 @@ func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { } // SelectableFields returns a label set that can be used for filter selection -func SelectableFields(obj *expapi.ThirdPartyResourceData) labels.Set { +func SelectableFields(obj *experimental.ThirdPartyResourceData) labels.Set { return labels.Set{} } diff --git a/pkg/registry/thirdpartyresourcedata/util.go b/pkg/registry/thirdpartyresourcedata/util.go index e31c54cd54f..db62f6282ab 100644 --- a/pkg/registry/thirdpartyresourcedata/util.go +++ b/pkg/registry/thirdpartyresourcedata/util.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" ) func convertToCamelCase(input string) string { @@ -40,7 +40,7 @@ func convertToCamelCase(input string) string { return result } -func ExtractApiGroupAndKind(rsrc *expapi.ThirdPartyResource) (kind string, group string, err error) { +func ExtractApiGroupAndKind(rsrc *experimental.ThirdPartyResource) (kind string, group string, err error) { parts := strings.Split(rsrc.Name, ".") if len(parts) < 3 { return "", "", fmt.Errorf("unexpectedly short resource name: %s, expected at least ..", rsrc.Name) diff --git a/pkg/registry/thirdpartyresourcedata/util_test.go b/pkg/registry/thirdpartyresourcedata/util_test.go index 43db4464c94..977e717997b 100644 --- a/pkg/registry/thirdpartyresourcedata/util_test.go +++ b/pkg/registry/thirdpartyresourcedata/util_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" ) func TestExtractAPIGroupAndKind(t *testing.T) { @@ -47,7 +47,7 @@ func TestExtractAPIGroupAndKind(t *testing.T) { } for _, test := range tests { - kind, group, err := ExtractApiGroupAndKind(&expapi.ThirdPartyResource{ObjectMeta: api.ObjectMeta{Name: test.input}}) + kind, group, err := ExtractApiGroupAndKind(&experimental.ThirdPartyResource{ObjectMeta: api.ObjectMeta{Name: test.input}}) if err != nil && !test.expectErr { t.Errorf("unexpected error: %v", err) continue diff --git a/pkg/storage/cacher.go b/pkg/storage/cacher.go index ee76b029a1f..730571d7dba 100644 --- a/pkg/storage/cacher.go +++ b/pkg/storage/cacher.go @@ -24,7 +24,7 @@ import ( "sync" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" diff --git a/pkg/storage/watch_cache.go b/pkg/storage/watch_cache.go index d41a2b7960b..52f04eeddd8 100644 --- a/pkg/storage/watch_cache.go +++ b/pkg/storage/watch_cache.go @@ -23,7 +23,7 @@ import ( "sync" "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" ) diff --git a/pkg/storage/watch_cache_test.go b/pkg/storage/watch_cache_test.go index 6ab467e8ba1..48442b745fa 100644 --- a/pkg/storage/watch_cache_test.go +++ b/pkg/storage/watch_cache_test.go @@ -21,7 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" diff --git a/pkg/util/exec/exec.go b/pkg/util/exec/exec.go index fbe15accc9e..7a2eda26b25 100644 --- a/pkg/util/exec/exec.go +++ b/pkg/util/exec/exec.go @@ -27,6 +27,9 @@ type Interface interface { // Command returns a Cmd instance which can be used to run a single command. // This follows the pattern of package os/exec. Command(cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) } // Cmd is an interface that presents an API that is very similar to Cmd from os/exec. @@ -62,6 +65,11 @@ func (executor *executor) Command(cmd string, args ...string) Cmd { return (*cmdWrapper)(osexec.Command(cmd, args...)) } +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + return osexec.LookPath(file) +} + // Wraps exec.Cmd so we can capture errors. type cmdWrapper osexec.Cmd diff --git a/pkg/util/exec/exec_test.go b/pkg/util/exec/exec_test.go index 6f88f48e2e2..49a432b36da 100644 --- a/pkg/util/exec/exec_test.go +++ b/pkg/util/exec/exec_test.go @@ -17,6 +17,7 @@ limitations under the License. package exec import ( + osexec "os/exec" "testing" ) @@ -81,3 +82,13 @@ func TestExecutorWithArgs(t *testing.T) { t.Errorf("unexpected output: %q", string(out)) } } + +func TestLookPath(t *testing.T) { + ex := New() + + shExpected, _ := osexec.LookPath("sh") + sh, _ := ex.LookPath("sh") + if sh != shExpected { + t.Errorf("unexpected result for LookPath: got %s, expected %s", sh, shExpected) + } +} diff --git a/pkg/util/exec/fake_exec.go b/pkg/util/exec/fake_exec.go index 2b5590e053e..e69ed55d809 100644 --- a/pkg/util/exec/fake_exec.go +++ b/pkg/util/exec/fake_exec.go @@ -24,6 +24,7 @@ import ( type FakeExec struct { CommandScript []FakeCommandAction CommandCalls int + LookPathFunc func(string) (string, error) } type FakeCommandAction func(cmd string, args ...string) Cmd @@ -37,6 +38,10 @@ func (fake *FakeExec) Command(cmd string, args ...string) Cmd { return fake.CommandScript[i](cmd, args...) } +func (fake *FakeExec) LookPath(file string) (string, error) { + return fake.LookPathFunc(file) +} + // A simple scripted Cmd type. type FakeCmd struct { Argv []string diff --git a/pkg/util/jsonpath/jsonpath.go b/pkg/util/jsonpath/jsonpath.go index 817af6307e1..7d4b7620995 100644 --- a/pkg/util/jsonpath/jsonpath.go +++ b/pkg/util/jsonpath/jsonpath.go @@ -217,7 +217,7 @@ func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect. value, isNil := template.Indirect(value) if isNil || (value.Kind() != reflect.Array && value.Kind() != reflect.Slice) { - return input, fmt.Errorf("%v is not array or slice", value) + return input, fmt.Errorf("%v is not array or slice", value.Type()) } params := node.Params if !params[0].Known { diff --git a/pkg/util/jsonpath/jsonpath_test.go b/pkg/util/jsonpath/jsonpath_test.go index 9922a8fee21..6a79d9fdfc6 100644 --- a/pkg/util/jsonpath/jsonpath_test.go +++ b/pkg/util/jsonpath/jsonpath_test.go @@ -162,7 +162,7 @@ func TestStructInput(t *testing.T) { failStoreTests := []jsonpathTest{ {"invalid identfier", "{hello}", storeData, "unrecongnized identifier hello"}, {"nonexistent field", "{.hello}", storeData, "hello is not found"}, - {"invalid array", "{.Labels[0]}", storeData, " is not array or slice"}, + {"invalid array", "{.Labels[0]}", storeData, "map[string]int is not array or slice"}, {"invalid filter operator", "{.Book[?(@.Price<>10)]}", storeData, "unrecognized filter operator <>"}, {"redundent end", "{range .Labels.*}{@}{end}{end}", storeData, "not in range, nothing to end"}, } diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go index 25cd056e88a..4da9f6da097 100644 --- a/pkg/util/rand/rand.go +++ b/pkg/util/rand/rand.go @@ -54,3 +54,11 @@ func Seed(seed int64) { rng.rand = rand.New(rand.NewSource(seed)) } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Perm(n) +} diff --git a/pkg/util/rand/rand_test.go b/pkg/util/rand/rand_test.go index 0c76885674e..5ad109a3fa2 100644 --- a/pkg/util/rand/rand_test.go +++ b/pkg/util/rand/rand_test.go @@ -17,6 +17,7 @@ limitations under the License. package rand import ( + "math/rand" "strings" "testing" ) @@ -35,3 +36,17 @@ func TestString(t *testing.T) { } } } + +func TestPerm(t *testing.T) { + Seed(5) + rand.Seed(5) + for i := 1; i < 20; i++ { + actual := Perm(i) + expected := rand.Perm(i) + for j := 0; j < i; j++ { + if actual[j] != expected[j] { + t.Errorf("Perm call result is unexpected") + } + } + } +} diff --git a/pkg/util/slice/slice.go b/pkg/util/slice/slice.go index 281c2021c00..a9ee3ea62ea 100644 --- a/pkg/util/slice/slice.go +++ b/pkg/util/slice/slice.go @@ -18,7 +18,7 @@ limitations under the License. package slice import ( - "math/rand" + utilrand "k8s.io/kubernetes/pkg/util/rand" "sort" ) @@ -41,7 +41,7 @@ func SortStrings(s []string) []string { // order. It returns a new slice. func ShuffleStrings(s []string) []string { shuffled := make([]string, len(s)) - perm := rand.Perm(len(s)) + perm := utilrand.Perm(len(s)) for i, j := range perm { shuffled[j] = s[i] } diff --git a/pkg/util/strategicpatch/patch.go b/pkg/util/strategicpatch/patch.go index 2bb6494d901..34f6a70713c 100644 --- a/pkg/util/strategicpatch/patch.go +++ b/pkg/util/strategicpatch/patch.go @@ -31,28 +31,357 @@ import ( // lists should be merged or replaced. // // For more information, see the PATCH section of docs/api-conventions.md. -func StrategicMergePatchData(original, patch []byte, dataStruct interface{}) ([]byte, error) { - var o map[string]interface{} - err := json.Unmarshal(original, &o) +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const specialKey = "$patch" +const specialValue = "delete" + +var errBadJSONDoc = fmt.Errorf("Invalid JSON document") +var errNoListOfLists = fmt.Errorf("Lists of lists are not supported") + +// CreateStrategicMergePatch creates a patch that can be passed to StrategicMergePatch. +// The original and modified documents must be passed to the method as json encoded content. +// It will return a mergeable json document with differences from original to modified, or an error +// if either of the two documents is invalid. +func CreateStrategicMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) { + originalMap := map[string]interface{}{} + err := json.Unmarshal(original, &originalMap) + if err != nil { + return nil, errBadJSONDoc + } + + modifiedMap := map[string]interface{}{} + err = json.Unmarshal(modified, &modifiedMap) + if err != nil { + return nil, errBadJSONDoc + } + + t, err := getTagStructType(dataStruct) if err != nil { return nil, err } - var p map[string]interface{} - err = json.Unmarshal(patch, &p) + patchMap, err := diffMaps(originalMap, modifiedMap, t, false, false) if err != nil { return nil, err } - t := reflect.TypeOf(dataStruct) + return json.Marshal(patchMap) +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreAdditions, ignoreChangesAndDeletions bool) (map[string]interface{}, error) { + patch := map[string]interface{}{} if t.Kind() == reflect.Ptr { t = t.Elem() } - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("strategic merge patch needs a struct, %s received instead", t.Kind().String()) + + for key, modifiedValue := range modified { + originalValue, ok := original[key] + // value was added + if !ok { + if !ignoreAdditions { + patch[key] = modifiedValue + } + + continue + } + + if key == specialKey { + originalString, ok := originalValue.(string) + if !ok { + return nil, fmt.Errorf("invalid value for special key: %s", specialKey) + } + + modifiedString, ok := modifiedValue.(string) + if !ok { + return nil, fmt.Errorf("invalid value for special key: %s", specialKey) + } + + if modifiedString != originalString { + patch[key] = modifiedValue + } + + continue + } + + if !ignoreChangesAndDeletions { + // If types have changed, replace completely + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + patch[key] = modifiedValue + continue + } + } + + // Types are the same, compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + fieldType, _, _, err := forkedjson.LookupPatchMetadata(t, key) + if err != nil { + return nil, err + } + + patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreAdditions, ignoreChangesAndDeletions) + if err != nil { + return nil, err + } + + if len(patchValue) > 0 { + patch[key] = patchValue + } + + continue + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == "merge" { + patchValue, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreAdditions, ignoreChangesAndDeletions) + if err != nil { + return nil, err + } + + if len(patchValue) > 0 { + patch[key] = patchValue + } + + continue + } + } + + if !ignoreChangesAndDeletions { + if !reflect.DeepEqual(originalValue, modifiedValue) { + patch[key] = modifiedValue + } + } } - result, err := mergeMap(o, p, t) + if !ignoreChangesAndDeletions { + // Now add all deleted values as nil + for key := range original { + _, found := modified[key] + if !found { + patch[key] = nil + } + } + } + + return patch, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists with merge semantics. +func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreAdditions, ignoreChangesAndDeletions bool) ([]interface{}, error) { + if len(original) == 0 { + if len(modified) == 0 || ignoreAdditions { + return nil, nil + } + + return modified, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, err + } + + var patch []interface{} + + // If the elements are not maps... + if elementType.Kind() == reflect.Map { + patch, err = diffListsOfMaps(original, modified, t, mergeKey, ignoreAdditions, ignoreChangesAndDeletions) + } else { + patch, err = diffListsOfScalars(original, modified, ignoreAdditions) + } + + if err != nil { + return nil, err + } + + return patch, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists of scalars with merge semantics. +func diffListsOfScalars(original, modified []interface{}, ignoreAdditions bool) ([]interface{}, error) { + if len(modified) == 0 { + // There is no need to check the length of original because there is no way to create + // a patch that deletes a scalar from a list of scalars with merge semantics. + return nil, nil + } + + patch := []interface{}{} + + originalScalars := uniqifyAndSortScalars(original) + modifiedScalars := uniqifyAndSortScalars(modified) + originalIndex, modifiedIndex := 0, 0 + +loopB: + for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { + for ; originalIndex < len(originalScalars); originalIndex++ { + originalString := fmt.Sprintf("%v", original[originalIndex]) + modifiedString := fmt.Sprintf("%v", modified[modifiedIndex]) + if originalString >= modifiedString { + if originalString != modifiedString { + if !ignoreAdditions { + patch = append(patch, modified[modifiedIndex]) + } + } + + continue loopB + } + // There is no else clause because there is no way to create a patch that deletes + // a scalar from a list of scalars with merge semantics. + } + + break + } + + if !ignoreAdditions { + // Add any remaining items found only in modified + for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { + patch = append(patch, modified[modifiedIndex]) + } + } + + return patch, nil +} + +var errNoMergeKeyFmt = "map: %v does not contain declared merge key: %s" +var errBadArgTypeFmt = "expected a %s, but received a %t" + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists of maps with merge semantics. +func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreAdditions, ignoreChangesAndDeletions bool) ([]interface{}, error) { + patch := make([]interface{}, 0) + + originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) + if err != nil { + return nil, err + } + + modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) + if err != nil { + return nil, err + } + + originalIndex, modifiedIndex := 0, 0 + +loopB: + for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { + modifiedMap, ok := modifiedSorted[modifiedIndex].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", modifiedSorted[modifiedIndex]) + } + + modifiedValue, ok := modifiedMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, modifiedMap, mergeKey) + } + + for ; originalIndex < len(originalSorted); originalIndex++ { + originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", originalSorted[originalIndex]) + } + + originalValue, ok := originalMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) + } + + // Assume that the merge key values are comparable strings + originalString := fmt.Sprintf("%v", originalValue) + modifiedString := fmt.Sprintf("%v", modifiedValue) + if originalString >= modifiedString { + if originalString == modifiedString { + patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreAdditions, ignoreChangesAndDeletions) + if err != nil { + return nil, err + } + + originalIndex++ + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedValue + patch = append(patch, patchValue) + } + } else if !ignoreAdditions { + patch = append(patch, modifiedMap) + } + + continue loopB + } + + if !ignoreChangesAndDeletions { + patch = append(patch, map[string]interface{}{mergeKey: originalValue, specialKey: specialValue}) + } + } + + break + } + + if !ignoreChangesAndDeletions { + // Delete any remaining items found only in original + for ; originalIndex < len(originalSorted); originalIndex++ { + originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", originalSorted[originalIndex]) + } + + originalValue, ok := originalMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) + } + + patch = append(patch, map[string]interface{}{mergeKey: originalValue, specialKey: specialValue}) + } + } + + if !ignoreAdditions { + // Add any remaining items found only in modified + for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { + patch = append(patch, modified[modifiedIndex]) + } + } + + return patch, nil +} + +// StrategicMergePatchData applies a patch using strategic merge patch semantics. +// Deprecated: StrategicMergePatchData is deprecated. Use the synonym StrategicMergePatch, +// instead, which follows the naming convention of evanphx/json-patch. +func StrategicMergePatchData(original, patch []byte, dataStruct interface{}) ([]byte, error) { + return StrategicMergePatch(original, patch, dataStruct) +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + originalMap := map[string]interface{}{} + err := json.Unmarshal(original, &originalMap) + if err != nil { + return nil, errBadJSONDoc + } + + patchMap := map[string]interface{}{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, errBadJSONDoc + } + + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + + result, err := mergeMap(originalMap, patchMap, t) if err != nil { return nil, err } @@ -60,7 +389,20 @@ func StrategicMergePatchData(original, patch []byte, dataStruct interface{}) ([] return json.Marshal(result) } -const specialKey = "$patch" +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + t := reflect.TypeOf(dataStruct) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf(errBadArgTypeFmt, "struct", t.Kind().String()) + } + + return t, nil +} + +var errBadPatchTypeFmt = "unknown patch type: %s in map: %v" // Merge fields from a patch map into the original map. Note: This may modify // both the original map and the patch because getting a deep copy of a map in @@ -74,7 +416,8 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin delete(patch, specialKey) return patch, nil } - return nil, fmt.Errorf("unknown patch type found: %s", v) + + return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch) } // nil is an accepted value for original to simplify logic in other places. @@ -92,29 +435,32 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin if _, ok := original[k]; ok { delete(original, k) } + continue } + _, ok := original[k] if !ok { // If it's not in the original document, just take the patch value. original[k] = patchV continue } + // If the data type is a pointer, resolve the element. if t.Kind() == reflect.Ptr { t = t.Elem() } // If they're both maps or lists, recurse into the value. - // First find the fieldPatchStrategy and fieldPatchMergeKey. - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - originalType := reflect.TypeOf(original[k]) patchType := reflect.TypeOf(patchV) if originalType == patchType { + // First find the fieldPatchStrategy and fieldPatchMergeKey. + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) + if err != nil { + return nil, err + } + if originalType.Kind() == reflect.Map && fieldPatchStrategy != "replace" { typedOriginal := original[k].(map[string]interface{}) typedPatch := patchV.(map[string]interface{}) @@ -123,8 +469,10 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin if err != nil { return nil, err } + continue } + if originalType.Kind() == reflect.Slice && fieldPatchStrategy == "merge" { elemType := fieldType.Elem() typedOriginal := original[k].([]interface{}) @@ -134,6 +482,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[strin if err != nil { return nil, err } + continue } } @@ -154,15 +503,13 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s if len(original) == 0 && len(patch) == 0 { return original, nil } + // All the values must be of the same type, but not a list. t, err := sliceElementType(original, patch) if err != nil { - return nil, fmt.Errorf("types of list elements need to be the same, type: %s: %v", - elemType.Kind().String(), err) - } - if t.Kind() == reflect.Slice { - return nil, fmt.Errorf("not supporting merging lists of lists yet") + return nil, err } + // If the elements are not maps, merge the slices of scalars. if t.Kind() != reflect.Map { // Maybe in the future add a "concat" mode that doesn't @@ -182,10 +529,14 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s typedV := v.(map[string]interface{}) patchType, ok := typedV[specialKey] if ok { - if patchType == "delete" { + if patchType == specialValue { mergeValue, ok := typedV[mergeKey] if ok { - _, originalKey, found := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + if found { // Delete the element at originalKey. original = append(original[:originalKey], original[originalKey+1:]...) @@ -199,7 +550,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s } else if patchType == "merge" { return nil, fmt.Errorf("merging lists cannot yet be specified in the patch") } else { - return nil, fmt.Errorf("unknown patch type found: %s", patchType) + return nil, fmt.Errorf(errBadPatchTypeFmt, patchType, typedV) } } else { patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) @@ -218,11 +569,16 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s typedV := v.(map[string]interface{}) mergeValue, ok := typedV[mergeKey] if !ok { - return nil, fmt.Errorf("all list elements need the merge key %s", mergeKey) + return nil, fmt.Errorf(errNoMergeKeyFmt, typedV, mergeKey) } + // If we find a value with this merge key value in original, merge the // maps. Otherwise append onto original. - originalMap, originalKey, found := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + if found { var mergedMaps interface{} var err error @@ -231,6 +587,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s if err != nil { return nil, err } + original[originalKey] = mergedMaps } else { original = append(original, v) @@ -240,16 +597,21 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s return original, nil } -// This panics if any element of the slice is not a map. -func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool) { +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { for k, v := range m { - typedV := v.(map[string]interface{}) + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) + } + valueToMatch, ok := typedV[key] if ok && valueToMatch == value { - return typedV, k, true + return typedV, k, true, nil } } - return nil, 0, false + + return nil, 0, false, nil } // This function takes a JSON map and sorts all the lists that should be merged @@ -274,43 +636,46 @@ func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { newS := map[string]interface{}{} for k, v := range s { - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - // If v is a map or a merge slice, recurse. - if typedV, ok := v.(map[string]interface{}); ok { - var err error - v, err = sortMergeListsByNameMap(typedV, fieldType) + if k != specialKey { + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) if err != nil { return nil, err } - } else if typedV, ok := v.([]interface{}); ok { - if fieldPatchStrategy == "merge" { + + // If v is a map or a merge slice, recurse. + if typedV, ok := v.(map[string]interface{}); ok { var err error - v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey) + v, err = sortMergeListsByNameMap(typedV, fieldType) if err != nil { return nil, err } + } else if typedV, ok := v.([]interface{}); ok { + if fieldPatchStrategy == "merge" { + var err error + v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) + if err != nil { + return nil, err + } + } } } + newS[k] = v } + return newS, nil } -func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string) ([]interface{}, error) { +func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { if len(s) == 0 { return s, nil } + // We don't support lists of lists yet. t, err := sliceElementType(s) if err != nil { return nil, err } - if t.Kind() == reflect.Slice { - return nil, fmt.Errorf("not supporting lists of lists yet") - } // If the elements are not maps... if t.Kind() != reflect.Map { @@ -319,15 +684,20 @@ func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey } // Elements are maps - if one of the keys of the map is a map or a - // list, we need to recurse into it. + // list, we may need to recurse into it. newS := []interface{}{} for _, elem := range s { - typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, elemType) - if err != nil { - return nil, err + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, elemType) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) } - newS = append(newS, newElem) } // Sort the maps. @@ -336,19 +706,32 @@ func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey } func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { - mapM := []map[string]interface{}{} - for _, v := range m { - mapM = append(mapM, v.(map[string]interface{})) - } + mapM := mapSliceFromSlice(m) ss := SortableSliceOfMaps{mapM, fieldName} sort.Sort(ss) - newM := []interface{}{} - for _, v := range ss.s { - newM = append(newM, v) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) } + return newM } +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + type SortableSliceOfMaps struct { s []map[string]interface{} k string // key to sort on @@ -391,6 +774,7 @@ func uniqifyScalars(s []interface{}) []interface{} { } } } + return s } @@ -415,7 +799,7 @@ func (ss SortableSliceOfScalars) Swap(i, j int) { } // Returns the type of the elements of N slice(s). If the type is different, -// returns an error. +// another slice or undefined, returns an error. func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { var prevType reflect.Type for _, s := range slices { @@ -424,16 +808,22 @@ func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { currentType := reflect.TypeOf(v) if prevType == nil { prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, errNoListOfLists + } } else { if prevType != currentType { - return nil, fmt.Errorf("at least two types found: %s and %s", prevType, currentType) + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) } prevType = currentType } } } + if prevType == nil { - return nil, fmt.Errorf("no elements in any given slices") + return nil, fmt.Errorf("no elements in any of the given slices") } + return prevType, nil } diff --git a/pkg/util/strategicpatch/patch_test.go b/pkg/util/strategicpatch/patch_test.go index 3aeea84b2ec..08411114d04 100644 --- a/pkg/util/strategicpatch/patch_test.go +++ b/pkg/util/strategicpatch/patch_test.go @@ -26,19 +26,26 @@ import ( "github.com/ghodss/yaml" ) -type TestCases struct { - StrategicMergePatchCases []StrategicMergePatchCase - SortMergeListTestCases []SortMergeListCase +type StrategicMergePatchTestCases struct { + TestCases []StrategicMergePatchTestCase } -type StrategicMergePatchCase struct { +type SortMergeListTestCases struct { + TestCases []SortMergeListTestCase +} + +type StrategicMergePatchTestCaseData struct { + Original map[string]interface{} + Patch map[string]interface{} + Modified map[string]interface{} +} + +type StrategicMergePatchTestCase struct { Description string - Patch map[string]interface{} - Original map[string]interface{} - Result map[string]interface{} + StrategicMergePatchTestCaseData } -type SortMergeListCase struct { +type SortMergeListTestCase struct { Description string Original map[string]interface{} Sorted map[string]interface{} @@ -47,6 +54,7 @@ type SortMergeListCase struct { type MergeItem struct { Name string Value string + Other string MergingList []MergeItem `patchStrategy:"merge" patchMergeKey:"name"` NonMergingList []MergeItem MergingIntList []int `patchStrategy:"merge"` @@ -55,220 +63,10 @@ type MergeItem struct { SimpleMap map[string]string } -var testCaseData = []byte(` -strategicMergePatchCases: - - description: add new field - original: - name: 1 - patch: - value: 1 - result: - name: 1 - value: 1 - - description: remove field and add new field - original: - name: 1 - patch: - name: null - value: 1 - result: - value: 1 - - description: merge arrays of scalars - original: - mergingIntList: - - 1 - - 2 - patch: - mergingIntList: - - 2 - - 3 - result: - mergingIntList: - - 1 - - 2 - - 3 - - description: replace arrays of scalars - original: - nonMergingIntList: - - 1 - - 2 - patch: - nonMergingIntList: - - 2 - - 3 - result: - nonMergingIntList: - - 2 - - 3 - - description: update param of list that should be merged but had element added serverside - original: - mergingList: - - name: 1 - value: 1 - - name: 2 - value: 2 - patch: - mergingList: - - name: 1 - value: a - result: - mergingList: - - name: 1 - value: a - - name: 2 - value: 2 - - description: delete field when field is nested in a map - original: - simpleMap: - key1: 1 - key2: 1 - patch: - simpleMap: - key2: null - result: - simpleMap: - key1: 1 - - description: update nested list when nested list should not be merged - original: - mergingList: - - name: 1 - nonMergingList: - - name: 1 - - name: 2 - value: 2 - - name: 2 - patch: - mergingList: - - name: 1 - nonMergingList: - - name: 1 - value: 1 - result: - mergingList: - - name: 1 - nonMergingList: - - name: 1 - value: 1 - - name: 2 - - description: update nested list when nested list should be merged - original: - mergingList: - - name: 1 - mergingList: - - name: 1 - - name: 2 - value: 2 - - name: 2 - patch: - mergingList: - - name: 1 - mergingList: - - name: 1 - value: 1 - result: - mergingList: - - name: 1 - mergingList: - - name: 1 - value: 1 - - name: 2 - value: 2 - - name: 2 - - description: update map when map should be replaced - original: - name: 1 - value: 1 - patch: - value: 1 - $patch: replace - result: - value: 1 - - description: merge empty merge lists - original: - mergingList: [] - patch: - mergingList: [] - result: - mergingList: [] - - description: delete others in a map - original: - name: 1 - value: 1 - patch: - $patch: replace - result: {} - - description: delete item from a merge list - original: - mergingList: - - name: 1 - - name: 2 - patch: - mergingList: - - $patch: delete - name: 1 - result: - mergingList: - - name: 2 - - description: add and delete item from a merge list - original: - merginglist: - - name: 1 - - name: 2 - patch: - merginglist: - - name: 3 - - $patch: delete - name: 1 - result: - merginglist: - - name: 2 - - name: 3 - - description: delete all items from a merge list - original: - mergingList: - - name: 1 - - name: 2 - patch: - mergingList: - - $patch: replace - result: - mergingList: [] - - description: add new field inside pointers - original: - mergeItemPtr: - - name: 1 - patch: - mergeItemPtr: - - name: 2 - result: - mergeItemPtr: - - name: 1 - - name: 2 - - description: update nested pointers - original: - mergeItemPtr: - - name: 1 - mergeItemPtr: - - name: 1 - - name: 2 - value: 2 - - name: 2 - patch: - mergeItemPtr: - - name: 1 - mergeItemPtr: - - name: 1 - value: 1 - result: - mergeItemPtr: - - name: 1 - mergeItemPtr: - - name: 1 - value: 1 - - name: 2 - value: 2 - - name: 2 -sortMergeListTestCases: +// These are test cases for SortMergeList, used to assert that it (recursively) +// sorts both merging and non merging lists correctly. +var sortMergeListTestCaseData = []byte(` +testCases: - description: sort one list of maps original: mergingList: @@ -327,7 +125,7 @@ sortMergeListTestCases: - name: 1 - name: 2 - name: 3 - - description: merging list should NOT sort when nested in a non merging list + - description: merging list should NOT sort when nested in non merging list original: nonMergingList: - name: 2 @@ -350,7 +148,7 @@ sortMergeListTestCases: mergingList: - name: 2 - name: 1 - - description: sort a very nested list of maps + - description: sort very nested list of maps fieldTypes: original: mergingList: @@ -410,7 +208,7 @@ sortMergeListTestCases: - 1 - 2 - 3 - - description: sort one pointer of maps + - description: sort merging list by pointer original: mergeItemPtr: - name: 1 @@ -423,81 +221,546 @@ sortMergeListTestCases: - name: 3 `) -func TestStrategicMergePatch(t *testing.T) { - tc := TestCases{} - err := yaml.Unmarshal(testCaseData, &tc) - if err != nil { - t.Errorf("can't unmarshal test cases: %v", err) - return - } - - var e MergeItem - for _, c := range tc.StrategicMergePatchCases { - result, err := StrategicMergePatchData(toJSON(c.Original), toJSON(c.Patch), e) - if err != nil { - t.Errorf("error patching: %v:\noriginal:\n%s\npatch:\n%s", - err, toYAML(c.Original), toYAML(c.Patch)) - } - - // Sort the lists that have merged maps, since order is not significant. - result, err = sortMergeListsByName(result, e) - if err != nil { - t.Errorf("error sorting result object: %v", err) - } - cResult, err := sortMergeListsByName(toJSON(c.Result), e) - if err != nil { - t.Errorf("error sorting result object: %v", err) - } - - if !reflect.DeepEqual(result, cResult) { - t.Errorf("patching failed: %s\noriginal:\n%s\npatch:\n%s\nexpected result:\n%s\ngot result:\n%s", - c.Description, toYAML(c.Original), toYAML(c.Patch), jsonToYAML(cResult), jsonToYAML(result)) - } - } -} - func TestSortMergeLists(t *testing.T) { - tc := TestCases{} - err := yaml.Unmarshal(testCaseData, &tc) + tc := SortMergeListTestCases{} + err := yaml.Unmarshal(sortMergeListTestCaseData, &tc) if err != nil { t.Errorf("can't unmarshal test cases: %v", err) return } var e MergeItem - for _, c := range tc.SortMergeListTestCases { - sorted, err := sortMergeListsByName(toJSON(c.Original), e) + for _, c := range tc.TestCases { + sorted, err := sortMergeListsByName(toJSONOrFail(c.Original, t), e) if err != nil { t.Errorf("sort arrays returned error: %v", err) } - if !reflect.DeepEqual(sorted, toJSON(c.Sorted)) { - t.Errorf("sorting failed: %s\ntried to sort:\n%s\nexpected:\n%s\ngot:\n%s", - c.Description, toYAML(c.Original), toYAML(c.Sorted), jsonToYAML(sorted)) + if !reflect.DeepEqual(sorted, toJSONOrFail(c.Sorted, t)) { + t.Errorf("sorting failed: %v\ntried to sort:\n%vexpected:\n%vgot:\n%v", + c.Description, toYAMLOrError(c.Original), toYAMLOrError(c.Sorted), jsonToYAMLOrError(sorted)) } } } -func toYAML(v interface{}) string { - y, err := yaml.Marshal(v) +// These are test cases for StrategicMergePatch that cannot be generated using +// CreateStrategicMergePatch because it doesn't use the replace directive, generate +// duplicate integers for a merging list patch, or generate empty merging lists. +var customStrategicMergePatchTestCaseData = []byte(` +testCases: + - description: unique scalars when merging lists + original: + mergingIntList: + - 1 + - 2 + patch: + mergingIntList: + - 2 + - 3 + modified: + mergingIntList: + - 1 + - 2 + - 3 + - description: delete all items from merging list + original: + mergingList: + - name: 1 + - name: 2 + patch: + mergingList: + - $patch: replace + modified: + mergingList: [] + - description: merge empty merging lists + original: + mergingList: [] + patch: + mergingList: [] + modified: + mergingList: [] + - description: delete all keys from map + original: + name: 1 + value: 1 + patch: + $patch: replace + modified: {} + - description: add key and delete all keys from map + original: + name: 1 + value: 1 + patch: + other: a + $patch: replace + modified: + other: a +`) + +func TestCustomStrategicMergePatch(t *testing.T) { + tc := StrategicMergePatchTestCases{} + err := yaml.Unmarshal(customStrategicMergePatchTestCaseData, &tc) if err != nil { - panic(fmt.Sprintf("yaml marshal failed: %v", err)) + t.Errorf("can't unmarshal test cases: %v", err) + return } + + for _, c := range tc.TestCases { + cOriginal, cPatch, cModified := testCaseToJSONOrFail(t, c) + testPatchApplication(t, cOriginal, cPatch, cModified, c.Description) + } +} + +func testCaseToJSONOrFail(t *testing.T, c StrategicMergePatchTestCase) ([]byte, []byte, []byte) { + var e MergeItem + cOriginal := toJSONOrFail(c.Original, t) + cPatch, err := sortMergeListsByName(toJSONOrFail(c.Patch, t), e) + if err != nil { + t.Errorf("error:%v sorting patch object:\n%v", err, c.Patch) + } + + cModified, err := sortMergeListsByName(toJSONOrFail(c.Modified, t), e) + if err != nil { + t.Errorf("error: %v sorting modified object:\n%v", err, c.Modified) + } + + return cOriginal, cPatch, cModified +} + +func testPatchApplication(t *testing.T, cOriginal, cPatch, cModified []byte, description string) { + var e MergeItem + modified, err := StrategicMergePatch(cOriginal, cPatch, e) + if err != nil { + t.Errorf("error applying patch: %v:\noriginal:\n%vpatch:\n%v", + err, jsonToYAMLOrError(cOriginal), jsonToYAMLOrError(cPatch)) + } + + // Sort the lists that have merged maps, since order is not significant. + modified, err = sortMergeListsByName(modified, e) + if err != nil { + t.Errorf("error: %v sorting modified object:\n%v", err, modified) + } + + if !reflect.DeepEqual(modified, cModified) { + t.Errorf("patch application failed: %v\noriginal:\n%vpatch:\n%vexpected modified:\n%vgot modified:\n%v", + description, jsonToYAMLOrError(cOriginal), jsonToYAMLOrError(cPatch), + jsonToYAMLOrError(cModified), jsonToYAMLOrError(modified)) + } +} + +// These are test cases for CreateStrategicMergePatch, used to assert that it +// generates the correct patch for a given outcome. They are also test cases for +// StrategicMergePatch, used to assert that applying a patch yields the correct +// outcome. +var createStrategicMergePatchTestCaseData = []byte(` +testCases: + - description: add field to map + original: + name: 1 + patch: + value: 1 + modified: + name: 1 + value: 1 + - description: add field and delete field from map + original: + name: 1 + patch: + name: null + value: 1 + modified: + value: 1 + - description: delete field from nested map + original: + simpleMap: + key1: 1 + key2: 1 + patch: + simpleMap: + key2: null + modified: + simpleMap: + key1: 1 + - description: delete all fields from map + original: + name: 1 + value: 1 + patch: + name: null + value: null + modified: {} + - description: add field and delete all fields from map + original: + name: 1 + value: 1 + patch: + other: a + name: null + value: null + modified: + other: a + - description: replace list of scalars + original: + nonMergingIntList: + - 1 + - 2 + patch: + nonMergingIntList: + - 2 + - 3 + modified: + nonMergingIntList: + - 2 + - 3 + - description: merge lists of scalars + original: + mergingIntList: + - 1 + - 2 + patch: + mergingIntList: + - 3 + modified: + mergingIntList: + - 1 + - 2 + - 3 + - description: merge lists of maps + original: + mergingList: + - name: 1 + - name: 2 + value: 2 + patch: + mergingList: + - name: 3 + value: 3 + modified: + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 3 + value: 3 + - description: add field to map in merging list + original: + mergingList: + - name: 1 + - name: 2 + value: 2 + patch: + mergingList: + - name: 1 + value: 1 + modified: + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - description: add duplicate field to map in merging list + original: + mergingList: + - name: 1 + - name: 2 + value: 2 + patch: + mergingList: + - name: 1 + value: 1 + modified: + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - description: replace map field value in merging list + original: + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + patch: + mergingList: + - name: 1 + value: a + modified: + mergingList: + - name: 1 + value: a + - name: 2 + value: 2 + - description: delete map from merging list + original: + mergingList: + - name: 1 + - name: 2 + patch: + mergingList: + - name: 1 + $patch: delete + modified: + mergingList: + - name: 2 + - description: delete missing map from merging list + original: + mergingList: + - name: 1 + - name: 2 + patch: + mergingList: + - name: 1 + $patch: delete + modified: + mergingList: + - name: 2 + - description: add map and delete map from merging list + original: + merginglist: + - name: 1 + - name: 2 + patch: + merginglist: + - name: 1 + $patch: delete + - name: 3 + modified: + merginglist: + - name: 2 + - name: 3 + - description: delete all maps from merging list + original: + mergingList: + - name: 1 + - name: 2 + patch: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + modified: + mergingList: [] + - description: delete all maps from partially empty merging list + original: + mergingList: + - name: 1 + - name: 2 + patch: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + modified: + mergingList: [] + - description: delete all maps from empty merging list + original: + mergingList: + - name: 1 + - name: 2 + patch: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + modified: + mergingList: [] + - description: delete field from map in merging list + original: + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + patch: + mergingList: + - name: 1 + value: null + modified: + mergingList: + - name: 1 + - name: 2 + value: 2 + - description: replace non merging list nested in merging list + original: + mergingList: + - name: 1 + nonMergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 + patch: + mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 + modified: + mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 + - name: 2 + - description: add field to map in merging list nested in merging list + original: + mergingList: + - name: 1 + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 + patch: + mergingList: + - name: 1 + mergingList: + - name: 1 + value: 1 + modified: + mergingList: + - name: 1 + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 + - description: merge empty merging lists + original: + mergingList: [] + patch: + {} + modified: + mergingList: [] + current: + mergingList: [] + result: + mergingList: [] + - description: add map to merging list by pointer + original: + mergeItemPtr: + - name: 1 + patch: + mergeItemPtr: + - name: 2 + modified: + mergeItemPtr: + - name: 1 + - name: 2 + - description: add field to map in merging list by pointer + original: + mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + - name: 2 + value: 2 + - name: 2 + patch: + mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + value: 1 + modified: + mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 +`) + +func TestStrategicMergePatch(t *testing.T) { + tc := StrategicMergePatchTestCases{} + err := yaml.Unmarshal(createStrategicMergePatchTestCaseData, &tc) + if err != nil { + t.Errorf("can't unmarshal test cases: %v", err) + return + } + + var e MergeItem + for _, c := range tc.TestCases { + cOriginal, cPatch, cModified := testCaseToJSONOrFail(t, c) + + // Test patch generation + patch, err := CreateStrategicMergePatch(cOriginal, cModified, e) + if err != nil { + t.Errorf("error generating patch: %s:\n%v", err, toYAMLOrError(c.StrategicMergePatchTestCaseData)) + } + + // Sort the lists that have merged maps, since order is not significant. + patch, err = sortMergeListsByName(patch, e) + if err != nil { + t.Errorf("error: %s sorting patch object:\n%v", err, patch) + } + + if !reflect.DeepEqual(patch, cPatch) { + t.Errorf("patch generation failed:\n%vgot patch:\n%v", toYAMLOrError(c.StrategicMergePatchTestCaseData), jsonToYAMLOrError(patch)) + } + + // Test patch application + testPatchApplication(t, cOriginal, cPatch, cModified, c.Description) + } +} + +func toYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toJSONOrFail(v interface{}, t *testing.T) []byte { + theJSON, err := toJSON(v) + if err != nil { + t.Error(err) + } + + return theJSON +} + +func jsonToYAMLOrError(j []byte) string { + y, err := jsonToYAML(j) + if err != nil { + return err.Error() + } + return string(y) } -func toJSON(v interface{}) []byte { - j, err := json.Marshal(v) +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) if err != nil { - panic(fmt.Sprintf("json marshal failed: %s", spew.Sdump(v))) + return "", fmt.Errorf("yaml marshal failed: %v\n%v", err, spew.Sdump(v)) } - return j + + return string(y), nil } -func jsonToYAML(j []byte) []byte { +func toJSON(v interface{}) ([]byte, error) { + j, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("json marshal failed: %v\n%v", err, spew.Sdump(v)) + } + + return j, nil +} + +func jsonToYAML(j []byte) ([]byte, error) { y, err := yaml.JSONToYAML(j) if err != nil { - panic(fmt.Sprintf("json to yaml failed: %v", err)) + return nil, fmt.Errorf("json to yaml failed: %v\n%v", err, j) } - return y + + return y, nil } diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 449c9e9d940..33c4a351557 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -17,8 +17,8 @@ limitations under the License. package glusterfs import ( - "math/rand" "os" + "path" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" @@ -221,12 +221,23 @@ func (b *glusterfsBuilder) setUpAtInternal(dir string) error { options = append(options, "ro") } - l := len(b.hosts.Subsets) + p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName) + if err := os.MkdirAll(p, 0750); err != nil { + return err + } + log := path.Join(p, "glusterfs.log") + options = append(options, "log-file="+log) + + addr := make(map[string]struct{}) + for _, s := range b.hosts.Subsets { + for _, a := range s.Addresses { + addr[a.IP] = struct{}{} + } + } + // Avoid mount storm, pick a host randomly. - start := rand.Int() % l // Iterate all hosts until mount succeeds. - for i := start; i < start+l; i++ { - hostIP := b.hosts.Subsets[i%l].Addresses[0].IP + for hostIP := range addr { errs = b.mounter.Mount(hostIP+":"+b.path, dir, "glusterfs", options) if errs == nil { return nil diff --git a/pkg/volume/util.go b/pkg/volume/util.go index c8336496af8..9dba580b898 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -21,8 +21,8 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" diff --git a/pkg/watch/mux.go b/pkg/watch/mux.go index 5d1f71768de..ccae32264fb 100644 --- a/pkg/watch/mux.go +++ b/pkg/watch/mux.go @@ -41,8 +41,9 @@ const incomingQueueLength = 25 type Broadcaster struct { lock sync.Mutex - watchers map[int64]*broadcasterWatcher - nextWatcher int64 + watchers map[int64]*broadcasterWatcher + nextWatcher int64 + distributing sync.WaitGroup incoming chan Event @@ -67,6 +68,7 @@ func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *B watchQueueLength: queueLength, fullChannelBehavior: fullChannelBehavior, } + m.distributing.Add(1) go m.loop() return m } @@ -146,9 +148,14 @@ func (m *Broadcaster) Action(action EventType, obj runtime.Object) { } // Shutdown disconnects all watchers (but any queued events will still be distributed). -// You must not call Action after calling Shutdown. +// You must not call Action or Watch* after calling Shutdown. This call blocks +// until all events have been distributed through the outbound channels. Note +// that since they can be buffered, this means that the watchers might not +// have received the data yet as it can remain sitting in the buffered +// channel. func (m *Broadcaster) Shutdown() { close(m.incoming) + m.distributing.Wait() } // loop receives from m.incoming and distributes to all watchers. @@ -163,6 +170,7 @@ func (m *Broadcaster) loop() { m.distribute(event) } m.closeAll() + m.distributing.Done() } // distribute sends event to all watchers. Blocking. diff --git a/pkg/watch/mux_test.go b/pkg/watch/mux_test.go index fd31910060c..d3e48279cc6 100644 --- a/pkg/watch/mux_test.go +++ b/pkg/watch/mux_test.go @@ -124,9 +124,8 @@ func TestBroadcasterDropIfChannelFull(t *testing.T) { event2 := Event{Added, &myType{"bar", "hello world 2"}} // Add a couple watchers - const testWatchers = 2 - watches := make([]Interface, testWatchers) - for i := 0; i < testWatchers; i++ { + watches := make([]Interface, 2) + for i := range watches { watches[i] = m.Watch() } @@ -139,8 +138,8 @@ func TestBroadcasterDropIfChannelFull(t *testing.T) { // Pull events from the queue. wg := sync.WaitGroup{} - wg.Add(testWatchers) - for i := 0; i < testWatchers; i++ { + wg.Add(len(watches)) + for i := range watches { // Verify that each watcher only gets the first event because its watch // queue of length one was full from the first one. go func(watcher int, w Interface) { @@ -148,14 +147,12 @@ func TestBroadcasterDropIfChannelFull(t *testing.T) { e1, ok := <-w.ResultChan() if !ok { t.Errorf("Watcher %v failed to retrieve first event.", watcher) - return } if e, a := event1, e1; !reflect.DeepEqual(e, a) { t.Errorf("Watcher %v: Expected (%v, %#v), got (%v, %#v)", watcher, e.Type, e.Object, a.Type, a.Object) - } else { - t.Logf("Got (%v, %#v)", e1.Type, e1.Object) } + t.Logf("Got (%v, %#v)", e1.Type, e1.Object) e2, ok := <-w.ResultChan() if ok { t.Errorf("Watcher %v received second event (%v, %#v) even though it shouldn't have.", diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 3936ff4cfd2..4670def0350 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -27,10 +27,10 @@ import ( "strconv" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/healthz" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util" diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index ba98913d254..012d08da8ad 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -240,7 +240,7 @@ func maxConstraint(limitType api.LimitType, resourceName api.ResourceName, enfor func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { req, reqExists := request[resourceName] lim, limExists := limit[resourceName] - observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) + observedReqValue, observedLimValue, _ := requestLimitEnforcedValues(req, lim, enforced) if !reqExists || (observedReqValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no request is specified or request is 0.", resourceName, limitType, enforced.String()) @@ -249,10 +249,16 @@ func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.Resou return fmt.Errorf("%s max limit to request ratio per %s is %s, but no limit is specified or limit is 0.", resourceName, limitType, enforced.String()) } - observedValue := observedLimValue / observedReqValue + observedRatio := float64(observedLimValue) / float64(observedReqValue) + displayObservedRatio := observedRatio + maxLimitRequestRatio := float64(enforced.Value()) + if enforced.Value() <= resource.MaxMilliValue { + observedRatio = observedRatio * 1000 + maxLimitRequestRatio = float64(enforced.MilliValue()) + } - if observedValue > enforcedValue { - return fmt.Errorf("%s max limit to request ratio per %s is %s, but provided ratio is %d.", resourceName, limitType, enforced.String(), observedValue) + if observedRatio > maxLimitRequestRatio { + return fmt.Errorf("%s max limit to request ratio per %s is %s, but provided ratio is %f.", resourceName, limitType, enforced.String(), displayObservedRatio) } return nil diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index 2635c4bfddb..42aefb1818f 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/unversioned/testclient" ) @@ -46,7 +46,7 @@ func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequ } // createLimitRange creates a limit range with the specified data -func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRequest api.ResourceList) api.LimitRange { +func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRequest, maxLimitRequestRatio api.ResourceList) api.LimitRange { return api.LimitRange{ ObjectMeta: api.ObjectMeta{ Name: "abc", @@ -55,11 +55,12 @@ func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRe Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { - Type: limitType, - Min: min, - Max: max, - Default: defaultLimit, - DefaultRequest: defaultRequest, + Type: limitType, + Min: min, + Max: max, + Default: defaultLimit, + DefaultRequest: defaultRequest, + MaxLimitRequestRatio: maxLimitRequestRatio, }, }, }, @@ -187,67 +188,75 @@ func TestPodLimitFunc(t *testing.T) { successCases := []testCase{ { pod: validPod("ctr-min-cpu-request", 1, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-cpu-request-limit", 1, getResourceRequirements(getResourceList("100m", ""), getResourceList("200m", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-memory-request", 1, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-memory-request-limit", 1, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-cpu-request-limit", 1, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-cpu-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-mem-request-limit", 1, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, + { + pod: validPod("ctr-max-cpu-ratio", 1, getResourceRequirements(getResourceList("500m", ""), getResourceList("750m", ""))), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, getResourceList("1.5", "")), }, { pod: validPod("ctr-max-mem-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-cpu-request", 2, getResourceRequirements(getResourceList("75m", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-cpu-request-limit", 2, getResourceRequirements(getResourceList("75m", ""), getResourceList("200m", ""))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-memory-request", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-memory-request-limit", 2, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-cpu-request-limit", 2, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-cpu-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-mem-request-limit", 2, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-mem-limit", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, + { + pod: validPod("pod-max-mem-ratio", 3, getResourceRequirements(getResourceList("", "300Mi"), getResourceList("", "450Mi"))), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "2Gi"), api.ResourceList{}, api.ResourceList{}, getResourceList("", "1.5")), }, } for i := range successCases { @@ -261,83 +270,91 @@ func TestPodLimitFunc(t *testing.T) { errorCases := []testCase{ { pod: validPod("ctr-min-cpu-request", 1, getResourceRequirements(getResourceList("40m", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-cpu-request-limit", 1, getResourceRequirements(getResourceList("40m", ""), getResourceList("200m", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-cpu-no-request-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("50m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-memory-request", 1, getResourceRequirements(getResourceList("", "40Mi"), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-memory-request-limit", 1, getResourceRequirements(getResourceList("", "40Mi"), getResourceList("", "100Mi"))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-min-memory-no-request-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, getResourceList("", "50Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-cpu-request-limit", 1, getResourceRequirements(getResourceList("500m", ""), getResourceList("2500m", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-cpu-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("2500m", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-cpu-no-request-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, + { + pod: validPod("ctr-max-cpu-ratio", 1, getResourceRequirements(getResourceList("1250m", ""), getResourceList("2500m", ""))), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, getResourceList("1", "")), }, { pod: validPod("ctr-max-mem-request-limit", 1, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "2Gi"))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-mem-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "2Gi"))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("ctr-max-mem-no-request-limit", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypeContainer, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-cpu-request", 1, getResourceRequirements(getResourceList("75m", ""), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-cpu-request-limit", 1, getResourceRequirements(getResourceList("75m", ""), getResourceList("200m", ""))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("100m", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-memory-request", 1, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", ""))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-min-memory-request-limit", 1, getResourceRequirements(getResourceList("", "60Mi"), getResourceList("", "100Mi"))), - limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, getResourceList("", "100Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-cpu-request-limit", 3, getResourceRequirements(getResourceList("500m", ""), getResourceList("1", ""))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-cpu-limit", 3, getResourceRequirements(getResourceList("", ""), getResourceList("1", ""))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-mem-request-limit", 3, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), }, { pod: validPod("pod-max-mem-limit", 3, getResourceRequirements(getResourceList("", ""), getResourceList("", "500Mi"))), - limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "1Gi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, + { + pod: validPod("pod-max-mem-ratio", 3, getResourceRequirements(getResourceList("", "250Mi"), getResourceList("", "500Mi"))), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getResourceList("", "2Gi"), api.ResourceList{}, api.ResourceList{}, getResourceList("", "1.5")), }, } for i := range errorCases { diff --git a/plugin/pkg/admission/namespace/autoprovision/admission.go b/plugin/pkg/admission/namespace/autoprovision/admission.go index 7ddb236bc06..5224c1c6624 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -23,8 +23,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" diff --git a/plugin/pkg/admission/namespace/autoprovision/admission_test.go b/plugin/pkg/admission/namespace/autoprovision/admission_test.go index a5a53395646..17830f41c81 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission_test.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/runtime" ) diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index f906d8398e1..a405d4daced 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" diff --git a/plugin/pkg/admission/namespace/lifecycle/admission.go b/plugin/pkg/admission/namespace/lifecycle/admission.go index ac644cf1a9a..80210f0cbf6 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" diff --git a/plugin/pkg/admission/namespace/lifecycle/admission_test.go b/plugin/pkg/admission/namespace/lifecycle/admission_test.go index 4f04d940e58..f714c54732f 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission_test.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" diff --git a/plugin/pkg/admission/resourcequota/admission.go b/plugin/pkg/admission/resourcequota/admission.go index aba2d235f45..98a88ac9dfd 100644 --- a/plugin/pkg/admission/resourcequota/admission.go +++ b/plugin/pkg/admission/resourcequota/admission.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/resourcequota" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/plugin/pkg/admission/resourcequota/admission_test.go b/plugin/pkg/admission/resourcequota/admission_test.go index 83c0efe05b1..9628159bb47 100644 --- a/plugin/pkg/admission/resourcequota/admission_test.go +++ b/plugin/pkg/admission/resourcequota/admission_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/controller/resourcequota" ) diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 8590b33c328..b06b6e62fb9 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/labels" diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 2b7b79293eb..67fbd005c06 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util" diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index e18250224e6..1f27a07c302 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -26,8 +26,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" diff --git a/plugin/pkg/scheduler/modeler.go b/plugin/pkg/scheduler/modeler.go index 5a2061fcb5c..3d91396c610 100644 --- a/plugin/pkg/scheduler/modeler.go +++ b/plugin/pkg/scheduler/modeler.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" diff --git a/plugin/pkg/scheduler/modeler_test.go b/plugin/pkg/scheduler/modeler_test.go index f2e77c1adf0..ab9ff937681 100644 --- a/plugin/pkg/scheduler/modeler_test.go +++ b/plugin/pkg/scheduler/modeler_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/labels" ) diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index 48ed1ad88ef..26d6be414df 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/metrics" diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index 3e2f7f13016..dbac64faa9f 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index e60503a32ff..46a3c5fe9fa 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -30,7 +30,7 @@ import ( const ( consumptionTimeInSeconds = 30 sleepTime = 30 * time.Second - requestSizeInMilicores = 100 + requestSizeInMillicores = 100 port = 80 targetPort = 8080 timeoutRC = 120 * time.Second @@ -80,11 +80,11 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() { for { select { case millicores := <-rc.channel: - count = millicores / requestSizeInMilicores - rest = millicores - count*requestSizeInMilicores + count = millicores / requestSizeInMillicores + rest = millicores - count*requestSizeInMillicores case <-time.After(sleepTime): if count > 0 { - rc.sendConsumeCPUrequests(count, requestSizeInMilicores, consumptionTimeInSeconds) + rc.sendConsumeCPUrequests(count, requestSizeInMillicores, consumptionTimeInSeconds) } if rest > 0 { go rc.sendOneConsumeCPUrequest(rest, consumptionTimeInSeconds) diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index 4d3e971b8cf..3067b52a8ae 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -22,8 +22,8 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" controllerFramework "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go new file mode 100644 index 00000000000..05f412768ce --- /dev/null +++ b/test/e2e/daemon_set.go @@ -0,0 +1,221 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/experimental" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/wait" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Daemon set", func() { + f := &Framework{BaseName: "daemonsets"} + + BeforeEach(func() { + f.beforeEach() + err := clearNodeLabels(f.Client) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + err := clearNodeLabels(f.Client) + Expect(err).NotTo(HaveOccurred()) + f.afterEach() + }) + + It("should launch a daemon pod on every node of the cluster", func() { + testDaemonSets(f) + }) +}) + +func clearNodeLabels(c *client.Client) error { + nodeClient := c.Nodes() + nodeList, err := nodeClient.List(labels.Everything(), fields.Everything()) + if err != nil { + return err + } + for _, node := range nodeList.Items { + if len(node.Labels) != 0 { + node.Labels = map[string]string{} + newNode, err := nodeClient.Update(&node) + if err != nil { + return err + } else if len(newNode.Labels) != 0 { + return fmt.Errorf("Could not make node labels nil.") + } + } + } + return nil +} + +func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames []string) func() (bool, error) { + return func() (bool, error) { + podList, err := f.Client.Pods(f.Namespace.Name).List(labels.Set(selector).AsSelector(), fields.Everything()) + if err != nil { + return false, nil + } + pods := podList.Items + + nodesToPodCount := make(map[string]int) + for _, pod := range pods { + nodesToPodCount[pod.Spec.NodeName] += 1 + } + + // Ensure that exactly 1 pod is running on all nodes in nodeNames. + for _, nodeName := range nodeNames { + if nodesToPodCount[nodeName] != 1 { + return false, nil + } + } + + // Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in + // nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any + // other nodes. + return len(nodesToPodCount) == len(nodeNames), nil + } +} + +func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) { + return func() (bool, error) { + nodeList, err := f.Client.Nodes().List(labels.Everything(), fields.Everything()) + if err != nil { + return false, nil + } + nodeNames := make([]string, 0) + for _, node := range nodeList.Items { + nodeNames = append(nodeNames, node.Name) + } + return checkDaemonPodOnNodes(f, selector, nodeNames)() + } +} + +func checkRunningOnNoNodes(f *Framework, selector map[string]string) func() (bool, error) { + return checkDaemonPodOnNodes(f, selector, make([]string, 0)) +} + +func testDaemonSets(f *Framework) { + ns := f.Namespace.Name + c := f.Client + simpleDSName := "simple-daemon-set" + image := "gcr.io/google_containers/serve_hostname:1.1" + label := map[string]string{"name": simpleDSName} + retryTimeout := 1 * time.Minute + retryInterval := 5 * time.Second + + Logf("Creating simple daemon set %s", simpleDSName) + _, err := c.DaemonSets(ns).Create(&experimental.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: simpleDSName, + }, + Spec: experimental.DaemonSetSpec{ + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: label, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: simpleDSName, + Image: image, + Ports: []api.ContainerPort{{ContainerPort: 9376}}, + }, + }, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Check that daemon pods launch on every node of the cluster.") + Expect(err).NotTo(HaveOccurred()) + err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label)) + Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + + By("Stop a daemon pod, check that the daemon pod is revived.") + podClient := c.Pods(ns) + + podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything()) + Expect(err).NotTo(HaveOccurred()) + Expect(len(podList.Items)).To(BeNumerically(">", 0)) + pod := podList.Items[0] + err = podClient.Delete(pod.Name, nil) + Expect(err).NotTo(HaveOccurred()) + err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label)) + Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") + + complexDSName := "complex-daemon-set" + complexLabel := map[string]string{"name": complexDSName} + nodeSelector := map[string]string{"color": "blue"} + Logf("Creating daemon with a node selector %s", complexDSName) + _, err = c.DaemonSets(ns).Create(&experimental.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: complexDSName, + }, + Spec: experimental.DaemonSetSpec{ + Selector: complexLabel, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: complexLabel, + }, + Spec: api.PodSpec{ + NodeSelector: nodeSelector, + Containers: []api.Container{ + { + Name: complexDSName, + Image: image, + Ports: []api.ContainerPort{{ContainerPort: 9376}}, + }, + }, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Initially, daemon pods should not be running on any nodes.") + err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel)) + Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") + + By("Change label of node, check that daemon pod is launched.") + nodeClient := c.Nodes() + nodeList, err := nodeClient.List(labels.Everything(), fields.Everything()) + Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) + nodeList.Items[0].Labels = nodeSelector + newNode, err := nodeClient.Update(&nodeList.Items[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(len(newNode.Labels)).To(Equal(1)) + err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name})) + Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + + By("remove the node selector and wait for") + newNode, err = nodeClient.Get(newNode.Name) + Expect(err).NotTo(HaveOccurred(), "error getting node") + newNode.Labels = map[string]string{} + newNode, err = nodeClient.Update(newNode) + Expect(err).NotTo(HaveOccurred()) + Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))). + NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") +} diff --git a/test/e2e/density.go b/test/e2e/density.go index cb310aaa117..80b772de330 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -27,8 +27,8 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/test/e2e/google_compute.go b/test/e2e/google_compute.go new file mode 100644 index 00000000000..270831ff9e8 --- /dev/null +++ b/test/e2e/google_compute.go @@ -0,0 +1,67 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "github.com/golang/glog" + "os/exec" + "regexp" + "strings" +) + +func createGCEStaticIP(name string) (string, error) { + // gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1" + // abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1" + // Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip]. + // NAME REGION ADDRESS STATUS + // test-static-ip us-central1 104.197.143.7 RESERVED + + output, err := exec.Command("gcloud", "compute", "addresses", "create", + name, "--project", testContext.CloudConfig.ProjectID, + "--region", "us-central1", "-q").CombinedOutput() + if err != nil { + return "", err + } + glog.Errorf("Creating static IP with name:%s in project: %s", name, testContext.CloudConfig.ProjectID) + text := string(output) + if strings.Contains(text, "RESERVED") { + r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+") + staticIP := r.FindString(text) + if staticIP == "" { + glog.Errorf("Static IP creation output is \n %s", text) + return "", fmt.Errorf("Static IP not found in gcloud compute command output") + } else { + return staticIP, nil + } + } else { + return "", fmt.Errorf("Static IP Could not be reserved.") + } +} + +func deleteGCEStaticIP(name string) error { + // gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1" + // abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1" + // Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip]. + // NAME REGION ADDRESS STATUS + // test-static-ip us-central1 104.197.143.7 RESERVED + + _, err := exec.Command("gcloud", "compute", "addresses", "delete", + name, "--project", testContext.CloudConfig.ProjectID, + "--region", "us-central1", "-q").CombinedOutput() + return err +} diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index 771e05e1ba2..b1734ce5e04 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -21,7 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/apis/experimental" . "github.com/onsi/ginkgo" ) @@ -101,13 +101,13 @@ var _ = Describe("Horizontal pod autoscaling", func() { }) func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) { - hpa := &expapi.HorizontalPodAutoscaler{ + hpa := &experimental.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: rc.name, Namespace: rc.framework.Namespace.Name, }, - Spec: expapi.HorizontalPodAutoscalerSpec{ - ScaleRef: &expapi.SubresourceReference{ + Spec: experimental.HorizontalPodAutoscalerSpec{ + ScaleRef: &experimental.SubresourceReference{ Kind: "replicationController", Name: rc.name, Namespace: rc.framework.Namespace.Name, @@ -115,7 +115,7 @@ func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) { }, MinCount: 1, MaxCount: 5, - Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(cpu)}, + Target: experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(cpu)}, }, } _, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index eec0ac155b2..41166811954 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -188,6 +188,14 @@ var _ = Describe("Kubectl client", func() { } }) + It("should support inline execution and attach", func() { + By("executing a command with run and attach") + runOutput := runKubectl(fmt.Sprintf("--namespace=%v", ns), "run", "run-test", "--image=busybox", "--restart=Never", "--attach=true", "echo", "running", "in", "container") + expectedRunOutput := "running in container" + Expect(runOutput).To(ContainSubstring(expectedRunOutput)) + // everything in the ns will be deleted at the end of the test + }) + It("should support port-forward", func() { By("forwarding the container port to a local port") cmd := kubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), simplePodName, fmt.Sprintf(":%d", simplePodPort)) diff --git a/test/e2e/namespace.go b/test/e2e/namespace.go index 5c7f49013df..fbaa89ee60c 100644 --- a/test/e2e/namespace.go +++ b/test/e2e/namespace.go @@ -19,13 +19,14 @@ package e2e import ( "fmt" //"k8s.io/kubernetes/pkg/api" + "strings" + "sync" + "time" + client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" - "strings" - "sync" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -110,10 +111,10 @@ var _ = Describe("Namespaces", func() { //Confirms that namespace draining is functioning reasonably //at minute intervals. - It("Delete 90 percent of 100 namespace in 150 seconds", + It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)", func() { extinguish(c, 100, 10, 150) }) //comprehensive draining ; uncomment after #7372 - PIt("Delete ALL of 100 namespace in 150 seconds", + PIt("should always delete fast (ALL of 100 namespaces in 150 seconds)", func() { extinguish(c, 100, 0, 150) }) }) diff --git a/test/e2e/service.go b/test/e2e/service.go index f6b054b645a..8160208577a 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -26,6 +26,7 @@ import ( "strings" "time" + "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" @@ -418,6 +419,75 @@ var _ = Describe("Services", func() { testLoadBalancerReachable(ingress, inboundPort) }) + It("should be able to create a functioning external load balancer with user-provided load balancer ip", func() { + // requires ExternalLoadBalancer + SkipUnlessProviderIs("gce", "gke") + + serviceName := "lb-test-with-user-ip" + ns := namespaces[0] + + t := NewWebserverTest(c, ns, serviceName) + defer func() { + defer GinkgoRecover() + errs := t.Cleanup() + if len(errs) != 0 { + Failf("errors in cleanup: %v", errs) + } + }() + + inboundPort := 3000 + + service := t.BuildServiceSpec() + service.Spec.Type = api.ServiceTypeLoadBalancer + service.Spec.Ports[0].Port = inboundPort + service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(80) + + By("creating an external static ip") + rand.Seed(time.Now().UTC().UnixNano()) + staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535)) + glog.Errorf("static ip name is %s", staticIPName) + loadBalancerIP, err := createGCEStaticIP(staticIPName) + Expect(err).NotTo(HaveOccurred()) + defer func() { + deleteGCEStaticIP(staticIPName) + }() + + service.Spec.LoadBalancerIP = loadBalancerIP + + By("creating service " + serviceName + " with external load balancer in namespace " + ns) + result, err := t.CreateService(service) + Expect(err).NotTo(HaveOccurred()) + + // Wait for the load balancer to be created asynchronously, which is + // currently indicated by ingress point(s) being added to the status. + result, err = waitForLoadBalancerIngress(c, serviceName, ns) + Expect(err).NotTo(HaveOccurred()) + if len(result.Status.LoadBalancer.Ingress) != 1 { + Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result) + } + ingress := result.Status.LoadBalancer.Ingress[0] + Expect(ingress.IP).To(Equal(loadBalancerIP)) + if len(result.Spec.Ports) != 1 { + Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result) + } + port := result.Spec.Ports[0] + if port.NodePort == 0 { + Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result) + } + if !ServiceNodePortRange.Contains(port.NodePort) { + Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result) + } + + By("creating pod to be part of service " + serviceName) + t.CreateWebserverRC(1) + + By("hitting the pod through the service's NodePort") + testReachable(pickMinionIP(c), port.NodePort) + + By("hitting the pod through the service's external load balancer") + testLoadBalancerReachable(ingress, inboundPort) + }) + It("should be able to create a functioning NodePort service", func() { serviceName := "nodeportservice-test" ns := namespaces[0] @@ -1049,18 +1119,15 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { // use endpoint annotations to recover the container port in a Mesos setup // compare contrib/mesos/pkg/service/endpoints_controller.syncService - if providerIs("mesos/docker") { - key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) - containerPortString := endpoints.Annotations[key] - if containerPortString == "" { - continue - } + key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) + mesosContainerPortString := endpoints.Annotations[key] + if mesosContainerPortString != "" { var err error - containerPort, err = strconv.Atoi(containerPortString) + containerPort, err = strconv.Atoi(mesosContainerPortString) if err != nil { continue } - Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, containerPortString) + Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) } Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go index 91171378f5d..7dfc4f09037 100644 --- a/test/e2e/service_latency.go +++ b/test/e2e/service_latency.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/unversioned/cache" + "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" diff --git a/test/e2e/util.go b/test/e2e/util.go index 63233a25e5b..104cb4aa07b 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -35,8 +35,8 @@ import ( "k8s.io/kubernetes/pkg/api" apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/cloudprovider" diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 7c90f1596a1..60683243b7c 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -29,11 +29,11 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/testapi" + explatest "k8s.io/kubernetes/pkg/apis/experimental/latest" "k8s.io/kubernetes/pkg/apiserver" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/controller/replication" - explatest "k8s.io/kubernetes/pkg/expapi/latest" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" diff --git a/test/integration/scheduler_test.go b/test/integration/scheduler_test.go index a6042900afa..639c8ad7035 100644 --- a/test/integration/scheduler_test.go +++ b/test/integration/scheduler_test.go @@ -32,9 +32,9 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apiserver" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait"