From ea9500956cb7e2a6e21bf04c1b0b581d0168822d Mon Sep 17 00:00:00 2001 From: Jeffrey Sica Date: Mon, 7 Jan 2019 19:36:45 -0500 Subject: [PATCH 001/194] update OWNERS so it isn't single threaded. --- cluster/addons/dashboard/OWNERS | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cluster/addons/dashboard/OWNERS b/cluster/addons/dashboard/OWNERS index 04353ada135..0ec35d8c102 100644 --- a/cluster/addons/dashboard/OWNERS +++ b/cluster/addons/dashboard/OWNERS @@ -1,12 +1,13 @@ approvers: - bryk +- floreks +- jeefy +- maciaszczykm reviewers: - cheld - cupofcat - danielromlein -- floreks - ianlewis - konryd -- maciaszczykm - mhenc -- rf232 +- rf232 \ No newline at end of file From cbfe654a30f1027d2434cd227a54a0649b616ade Mon Sep 17 00:00:00 2001 From: maohongbin01 Date: Sat, 26 Jan 2019 23:05:47 +0800 Subject: [PATCH 002/194] remove unused const in node-controller.go --- pkg/controller/cloud/node_controller.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 312bb550bee..85b5d717f7c 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -57,14 +57,6 @@ type CloudNodeController struct { nodeStatusUpdateFrequency time.Duration } -const ( - // nodeStatusUpdateRetry controls the number of retries of writing NodeStatus update. - nodeStatusUpdateRetry = 5 - - // The amount of time the nodecontroller should sleep between retrying NodeStatus updates - retrySleepTime = 20 * time.Millisecond -) - // NewCloudNodeController creates a CloudNodeController object func NewCloudNodeController( nodeInformer coreinformers.NodeInformer, From 6e7132e9db6c6ff49808498a688a2f48702016f9 Mon Sep 17 00:00:00 2001 From: AdamDang Date: Wed, 13 Feb 2019 17:05:24 +0800 Subject: [PATCH 003/194] Fix the klog.Errorf message health server->healthz server --- cmd/kubelet/app/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 99d125e5439..2df1b1f50e9 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -717,7 +717,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan go wait.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil) if err != nil { - klog.Errorf("Starting health server failed: %v", err) + klog.Errorf("Starting healthz server failed: %v", err) } }, 5*time.Second, wait.NeverStop) } From 2a40ef473f906b6a165690480dc000b9e5560258 Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Sat, 19 Jan 2019 05:54:28 +0100 Subject: [PATCH 004/194] Add initial support for OpenRC * Gentoo has init scripts for kubelet * Added a new method of the InitSystem Interface This helps issuing nicer messages when not on systemd. * OpenRCInitSystem.ServiceExists uses CombinedOutput because the behaviour of OpenRC is different from systemd. This is a partial fix for https://github.com/kubernetes/kubeadm/issues/1295 --- pkg/util/initsystem/initsystem.go | 66 +++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/pkg/util/initsystem/initsystem.go b/pkg/util/initsystem/initsystem.go index 6638bab5eb2..da24217ebc3 100644 --- a/pkg/util/initsystem/initsystem.go +++ b/pkg/util/initsystem/initsystem.go @@ -23,6 +23,9 @@ import ( ) type InitSystem interface { + // return a string describing how to enable a service + EnableCommand(service string) string + // ServiceStart tries to start a specific service ServiceStart(service string) error @@ -42,8 +45,63 @@ type InitSystem interface { ServiceIsActive(service string) bool } +type OpenRCInitSystem struct{} + +func (openrc OpenRCInitSystem) ServiceStart(service string) error { + args := []string{service, "start"} + return exec.Command("rc-service", args...).Run() +} + +func (openrc OpenRCInitSystem) ServiceStop(service string) error { + args := []string{service, "stop"} + return exec.Command("rc-service", args...).Run() +} + +func (openrc OpenRCInitSystem) ServiceRestart(service string) error { + args := []string{service, "restart"} + return exec.Command("rc-service", args...).Run() +} + +// openrc writes to stderr if a service is not found or not enabled +// this is in contrast to systemd which only writes to stdout. +// Hence, we use the Combinedoutput, and ignore the error. +func (openrc OpenRCInitSystem) ServiceExists(service string) bool { + args := []string{service, "status"} + outBytes, _ := exec.Command("rc-service", args...).CombinedOutput() + if strings.Contains(string(outBytes), "does not exist") { + return false + } + return true +} + +func (openrc OpenRCInitSystem) ServiceIsEnabled(service string) bool { + args := []string{"show", "default"} + outBytes, _ := exec.Command("rc-update", args...).Output() + if strings.Contains(string(outBytes), service) { + return true + } + return false +} + +func (openrc OpenRCInitSystem) ServiceIsActive(service string) bool { + args := []string{service, "status"} + outBytes, _ := exec.Command("rc-service", args...).Output() + if strings.Contains(string(outBytes), "stopped") { + return false + } + return true +} + +func (openrc OpenRCInitSystem) EnableCommand(service string) string { + return fmt.Sprintf("rc-update add %s default", service) +} + type SystemdInitSystem struct{} +func (sysd SystemdInitSystem) EnableCommand(service string) string { + return fmt.Sprintf("systemctl enable %s.service", service) +} + func (sysd SystemdInitSystem) reloadSystemd() error { if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { return fmt.Errorf("failed to reload systemd: %v", err) @@ -110,6 +168,10 @@ func (sysd SystemdInitSystem) ServiceIsActive(service string) bool { // WindowsInitSystem is the windows implementation of InitSystem type WindowsInitSystem struct{} +func (sysd WindowsInitSystem) EnableCommand(service string) string { + return fmt.Sprintf("Set-Service '%s' -StartupType Automatic", service) +} + func (sysd WindowsInitSystem) ServiceStart(service string) error { args := []string{"Start-Service", service} err := exec.Command("powershell", args...).Run() @@ -171,6 +233,10 @@ func GetInitSystem() (InitSystem, error) { if err == nil { return &SystemdInitSystem{}, nil } + _, err = exec.LookPath("openrc") + if err == nil { + return &OpenRCInitSystem{}, nil + } _, err = exec.LookPath("wininit.exe") if err == nil { return &WindowsInitSystem{}, nil From 1c468bf2e2c3db880af7485310334ffbff39ffe3 Mon Sep 17 00:00:00 2001 From: viegasdom Date: Thu, 11 Apr 2019 14:17:08 +0100 Subject: [PATCH 005/194] Fix golint failures of util/bandwith/*.go --- CHANGELOG-1.14.md | 1 + build/workspace.bzl | 12 +- cluster/gce/config-default.sh | 3 + cluster/gce/config-test.sh | 3 + cluster/gce/gci/configure-helper.sh | 8 +- cluster/gce/gci/configure.sh | 13 +- cluster/gce/util.sh | 2 + cmd/kubeadm/app/phases/certs/certs_test.go | 2 +- hack/.golint_failures | 1 - hack/.shellcheck_failures | 1 - hack/update-codegen.sh | 11 - hack/update-vendor-licenses.sh | 13 +- .../providers/.import-restrictions | 4 +- pkg/scheduler/core/generic_scheduler.go | 3 +- pkg/scheduler/factory/plugins.go | 20 +- pkg/scheduler/internal/cache/node_tree.go | 1 + pkg/util/bandwidth/fake_shaper.go | 7 + pkg/util/bandwidth/interfaces.go | 4 +- pkg/util/bandwidth/linux.go | 6 +- pkg/util/bandwidth/unsupported.go | 3 +- pkg/util/bandwidth/utils.go | 1 + .../apiserver/pkg/storage/cacher/cacher.go | 8 +- staging/src/k8s.io/client-go/INSTALL.md | 14 +- .../in-cluster-client-configuration/README.md | 2 +- .../examples/leader-election/README.md | 8 +- .../README.md | 2 +- .../src/k8s.io/client-go/tools/pager/BUILD | 1 + .../src/k8s.io/client-go/tools/pager/pager.go | 114 +++++++++ .../client-go/tools/pager/pager_test.go | 242 +++++++++++++++++- test/e2e/framework/service_util.go | 90 +++---- 30 files changed, 482 insertions(+), 118 deletions(-) diff --git a/CHANGELOG-1.14.md b/CHANGELOG-1.14.md index f4eeef531e4..d54038e304b 100644 --- a/CHANGELOG-1.14.md +++ b/CHANGELOG-1.14.md @@ -155,6 +155,7 @@ filename | sha512 hash * [metatada-proxy addon] Bump prometheus-to-sd v0.5.0 to pick up security fixes. * kube-proxy no longer automatically cleans up network rules created by running kube-proxy in other modes. If you are switching the mode that kube-proxy is in running in (EG: iptables to IPVS), you will need to run `kube-proxy --cleanup`, or restart the worker node (recommended) before restarting kube-proxy. ([#76109](https://github.com/kubernetes/kubernetes/pull/76109), [@vllry](https://github.com/vllry)) * If you are not switching kube-proxy between different modes, this change should not require any action. + * This fixes a bug where restarting the iptables proxier can cause connections to fail (https://github.com/kubernetes/kubernetes/issues/75360). * kubeadm: fixes error when upgrading from v1.13 to v1.14 clusters created with kubeadm v1.12. Please note that it is required to upgrade etcd during the final v1.13 to v1.14 upgrade. ([#75956](https://github.com/kubernetes/kubernetes/pull/75956), [@fabriziopandini](https://github.com/fabriziopandini)) * Fixes a regression proxying responses from aggregated API servers which could cause watch requests to hang until the first event was received ([#75887](https://github.com/kubernetes/kubernetes/pull/75887), [@liggitt](https://github.com/liggitt)) * Increased verbose level for local openapi aggregation logs to avoid flooding the log during normal operation ([#75781](https://github.com/kubernetes/kubernetes/pull/75781), [@roycaihw](https://github.com/roycaihw)) diff --git a/build/workspace.bzl b/build/workspace.bzl index cffc0cb71a6..b4cd5ab205f 100644 --- a/build/workspace.bzl +++ b/build/workspace.bzl @@ -26,13 +26,13 @@ _CNI_TARBALL_ARCH_SHA256 = { "s390x": "415cdcf02c65c22f5b7e55b0ab61208a10f2b95a0c8310176c771d07a9f448cf", } -CRI_TOOLS_VERSION = "1.12.0" +CRI_TOOLS_VERSION = "1.14.0" _CRI_TARBALL_ARCH_SHA256 = { - "amd64": "e7d913bcce40bf54e37ab1d4b75013c823d0551e6bc088b217bc1893207b4844", - "arm": "ca6b4ac80278d32d9cc8b8b19de140fd1cc35640f088969f7068fea2df625490", - "arm64": "8466f08b59bf36d2eebcb9428c3d4e6e224c3065d800ead09ad730ce374da6fe", - "ppc64le": "ec6254f1f6ffa064ba41825aab5612b7b005c8171fbcdac2ca3927d4e393000f", - "s390x": "814aa9cd496be416612c2653097a1c9eb5784e38aa4889034b44ebf888709057", + "amd64": "483c90a9fe679590df4332ba807991c49232e8cd326c307c575ecef7fe22327b", + "arm": "9910cecfd6558239ba015323066c7233d8371af359b9ddd0b2a35d5223bcf945", + "arm64": "f76b3d00a272c8d210e9a45f77d07d3770bee310d99c4fd9a72d6f55278882e5", + "ppc64le": "1e2cd11a1e025ed9755521cf13bb1bda986afa0052597a9bb44d31e62583413b", + "s390x": "8b7b5749cba88ef337997ae90aa04380e3cab2c040b44b505b2fcd691c4935e4", } ETCD_VERSION = "3.3.10" diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index a8ef5c0a9c9..abc8d0daa4c 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -295,6 +295,9 @@ NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" +CNI_VERSION="${CNI_VERSION:-}" +CNI_SHA1="${CNI_SHA1:-}" + # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 3d5002c28d5..28f0a68c19f 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -307,6 +307,9 @@ NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" +CNI_VERSION="${CNI_VERSION:-}" +CNI_SHA1="${CNI_SHA1:-}" + # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index b1aa6eb2ee8..2adbbf4d878 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1150,19 +1150,19 @@ function create-master-etcd-apiserver-auth { echo "${ETCD_APISERVER_CA_KEY}" | base64 --decode > "${ETCD_APISERVER_CA_KEY_PATH}" ETCD_APISERVER_CA_CERT_PATH="${auth_dir}/etcd-apiserver-ca.crt" - echo "${ETCD_APISERVER_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-apiserver-ca.crt" + echo "${ETCD_APISERVER_CA_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CA_CERT_PATH}" ETCD_APISERVER_SERVER_KEY_PATH="${auth_dir}/etcd-apiserver-server.key" echo "${ETCD_APISERVER_SERVER_KEY}" | base64 --decode > "${ETCD_APISERVER_SERVER_KEY_PATH}" ETCD_APISERVER_SERVER_CERT_PATH="${auth_dir}/etcd-apiserver-server.crt" - echo "${ETCD_APISERVER_SERVER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-apiserver-server.crt" + echo "${ETCD_APISERVER_SERVER_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_SERVER_CERT_PATH}" ETCD_APISERVER_CLIENT_KEY_PATH="${auth_dir}/etcd-apiserver-client.key" - echo "${ETCD_APISERVER_CLIENT_KEY}" | base64 --decode > "${auth_dir}/etcd-apiserver-client.key" + echo "${ETCD_APISERVER_CLIENT_KEY}" | base64 --decode > "${ETCD_APISERVER_CLIENT_KEY_PATH}" ETCD_APISERVER_CLIENT_CERT_PATH="${auth_dir}/etcd-apiserver-client.crt" - echo "${ETCD_APISERVER_CLIENT_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-apiserver-client.crt" + echo "${ETCD_APISERVER_CLIENT_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CLIENT_CERT_PATH}" fi } diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 2bb9e6224f8..f03914616cb 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -28,8 +28,8 @@ DEFAULT_CNI_VERSION="v0.7.5" DEFAULT_CNI_SHA1="52e9d2de8a5f927307d9397308735658ee44ab8d" DEFAULT_NPD_VERSION="v0.6.3" DEFAULT_NPD_SHA1="3a6ac56be6c121f1b94450bfd1a81ad28d532369" -DEFAULT_CRICTL_VERSION="v1.12.0" -DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f" +DEFAULT_CRICTL_VERSION="v1.14.0" +DEFAULT_CRICTL_SHA1="1f93c6183d0a4e186708efe7899da7a7bce9c736" DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571" ### @@ -235,8 +235,13 @@ function install-node-problem-detector { } function install-cni-binaries { - local -r cni_tar="cni-plugins-amd64-${DEFAULT_CNI_VERSION}.tgz" - local -r cni_sha1="${DEFAULT_CNI_SHA1}" + if [[ -n "${CNI_VERSION:-}" ]]; then + local -r cni_tar="cni-plugins-amd64-${CNI_VERSION}.tgz" + local -r cni_sha1="${CNI_SHA1}" + else + local -r cni_tar="cni-plugins-amd64-${DEFAULT_CNI_VERSION}.tgz" + local -r cni_sha1="${DEFAULT_CNI_SHA1}" + fi if is-preloaded "${cni_tar}" "${cni_sha1}"; then echo "${cni_tar} is preloaded." return diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 543d64f20d0..337dee092e5 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -1112,6 +1112,8 @@ NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-}) NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-}) NODE_PROBLEM_DETECTOR_RELEASE_PATH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}) NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS: $(yaml-quote ${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}) +CNI_VERSION: $(yaml-quote ${CNI_VERSION:-}) +CNI_SHA1: $(yaml-quote ${CNI_SHA1:-}) ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false}) LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-}) ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-}) diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index 20bf5d4a74d..a77a33f95de 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -364,7 +364,7 @@ func TestWriteKeyFilesIfNotExist(t *testing.T) { } //TODO: check if there is a better method to compare keys - if resultingKey.D == key.D { + if resultingKey.D == test.expectedKey.D { t.Error("created key does not match expected key") } } diff --git a/hack/.golint_failures b/hack/.golint_failures index 09f0f40a4d8..f8523d5e7d3 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -319,7 +319,6 @@ pkg/security/podsecuritypolicy/util pkg/securitycontext pkg/serviceaccount pkg/ssh -pkg/util/bandwidth pkg/util/config pkg/util/ebtables pkg/util/env diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 3fbdac45e68..842679db1d1 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -51,7 +51,6 @@ ./hack/update-gofmt.sh ./hack/update-openapi-spec.sh ./hack/update-translations.sh -./hack/update-vendor-licenses.sh ./hack/update-vendor.sh ./hack/verify-api-groups.sh ./hack/verify-boilerplate.sh diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index add561e071b..126db0dafda 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -40,7 +40,6 @@ informergen=$(kube::util::find-binary "informer-gen") GROUP_VERSIONS=(${KUBE_AVAILABLE_GROUP_VERSIONS}) GV_DIRS=() -INTERNAL_DIRS=() for gv in "${GROUP_VERSIONS[@]}"; do # add items, but strip off any leading apis/ you find to match command expectations api_dir=$(kube::util::group-version-to-pkg-path "${gv}") @@ -55,19 +54,9 @@ for gv in "${GROUP_VERSIONS[@]}"; do fi GV_DIRS+=("${pkg_dir}") - - # collect internal groups - int_group="${pkg_dir%/*}/" - if [[ "${pkg_dir}" = core/* ]]; then - int_group="api/" - fi - if ! [[ " ${INTERNAL_DIRS[@]:-} " =~ " ${int_group} " ]]; then - INTERNAL_DIRS+=("${int_group}") - fi done # delimit by commas for the command GV_DIRS_CSV=$(IFS=',';echo "${GV_DIRS[*]// /,}";IFS=$) -INTERNAL_DIRS_CSV=$(IFS=',';echo "${INTERNAL_DIRS[*]// /,}";IFS=$) # This can be called with one flag, --verify-only, so it works for both the # update- and verify- scripts. diff --git a/hack/update-vendor-licenses.sh b/hack/update-vendor-licenses.sh index b823cb8125f..21d7d2f42cf 100755 --- a/hack/update-vendor-licenses.sh +++ b/hack/update-vendor-licenses.sh @@ -93,7 +93,8 @@ process_content () { esac # Find files - only root and package level - local_files=($( + local_files=() + IFS=" " read -r -a local_files <<< "$( for dir_root in ${package} ${package_root}; do [[ -d ${DEPS_DIR}/${dir_root} ]] || continue @@ -101,7 +102,7 @@ process_content () { find "${DEPS_DIR}/${dir_root}" \ -xdev -follow -maxdepth ${find_maxdepth} \ -type f "${find_names[@]}" - done | sort -u)) + done | sort -u)" local index local f @@ -126,13 +127,13 @@ process_content () { ############################################################################# # MAIN ############################################################################# -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" export GO111MODULE=on # Check bash version -if ((${BASH_VERSINFO[0]}<4)); then +if (( BASH_VERSINFO[0] < 4 )); then echo echo "ERROR: Bash v4+ required." # Extra help for OSX @@ -161,7 +162,7 @@ echo "= Kubernetes licensed under: =" echo cat "${LICENSE_ROOT}/LICENSE" echo -echo "= LICENSE $(cat "${LICENSE_ROOT}/LICENSE" | md5sum | awk '{print $1}')" +echo "= LICENSE $(md5sum < "${LICENSE_ROOT}/LICENSE" | awk '{print $1}')" echo "================================================================================" ) > ${TMP_LICENSE_FILE} @@ -210,7 +211,7 @@ __EOF__ cat "${file}" echo - echo "= ${file} $(cat "${file}" | md5sum | awk '{print $1}')" + echo "= ${file} $(md5sum < "${file}" | awk '{print $1}')" echo "================================================================================" echo done >> ${TMP_LICENSE_FILE} diff --git a/pkg/cloudprovider/providers/.import-restrictions b/pkg/cloudprovider/providers/.import-restrictions index 2d0306a628f..80b05fdd205 100644 --- a/pkg/cloudprovider/providers/.import-restrictions +++ b/pkg/cloudprovider/providers/.import-restrictions @@ -15,9 +15,7 @@ "SelectorRegexp": "k8s[.]io/kubernetes", "AllowedPrefixes": [ "k8s.io/kubernetes/pkg/cloudprovider/providers", - "k8s.io/kubernetes/pkg/credentialprovider", - "k8s.io/kubernetes/pkg/util/mount", - "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/pkg/util/mount" ], "ForbiddenPrefixes": [] } diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 116213f7e87..851daaa4bbd 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -1160,8 +1160,9 @@ func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedule nomNodeName := pod.Status.NominatedNodeName if len(nomNodeName) > 0 { if nodeInfo, found := nodeNameToInfo[nomNodeName]; found { + podPriority := util.GetPodPriority(pod) for _, p := range nodeInfo.Pods() { - if p.DeletionTimestamp != nil && util.GetPodPriority(p) < util.GetPodPriority(pod) { + if p.DeletionTimestamp != nil && util.GetPodPriority(p) < podPriority { // There is a terminating pod on the nominated node. return false } diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index 921f17410d2..2fc64df6cfa 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -245,8 +245,8 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string { // IsFitPredicateRegistered is useful for testing providers. func IsFitPredicateRegistered(name string) bool { - schedulerFactoryMutex.Lock() - defer schedulerFactoryMutex.Unlock() + schedulerFactoryMutex.RLock() + defer schedulerFactoryMutex.RUnlock() _, ok := fitPredicateMap[name] return ok } @@ -408,8 +408,8 @@ func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) { } func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]predicates.FitPredicate, error) { - schedulerFactoryMutex.Lock() - defer schedulerFactoryMutex.Unlock() + schedulerFactoryMutex.RLock() + defer schedulerFactoryMutex.RUnlock() fitPredicates := map[string]predicates.FitPredicate{} for _, name := range names.List() { @@ -451,8 +451,8 @@ func getPredicateMetadataProducer(args PluginFactoryArgs) (predicates.PredicateM } func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]priorities.PriorityConfig, error) { - schedulerFactoryMutex.Lock() - defer schedulerFactoryMutex.Unlock() + schedulerFactoryMutex.RLock() + defer schedulerFactoryMutex.RUnlock() var configs []priorities.PriorityConfig for _, name := range names.List() { @@ -538,8 +538,8 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) { // ListRegisteredFitPredicates returns the registered fit predicates. func ListRegisteredFitPredicates() []string { - schedulerFactoryMutex.Lock() - defer schedulerFactoryMutex.Unlock() + schedulerFactoryMutex.RLock() + defer schedulerFactoryMutex.RUnlock() var names []string for name := range fitPredicateMap { @@ -550,8 +550,8 @@ func ListRegisteredFitPredicates() []string { // ListRegisteredPriorityFunctions returns the registered priority functions. func ListRegisteredPriorityFunctions() []string { - schedulerFactoryMutex.Lock() - defer schedulerFactoryMutex.Unlock() + schedulerFactoryMutex.RLock() + defer schedulerFactoryMutex.RUnlock() var names []string for name := range priorityFunctionMap { diff --git a/pkg/scheduler/internal/cache/node_tree.go b/pkg/scheduler/internal/cache/node_tree.go index f29024d0ed5..1c7ef2c6ebf 100644 --- a/pkg/scheduler/internal/cache/node_tree.go +++ b/pkg/scheduler/internal/cache/node_tree.go @@ -127,6 +127,7 @@ func (nt *NodeTree) removeZone(zone string) { for i, z := range nt.zones { if z == zone { nt.zones = append(nt.zones[:i], nt.zones[i+1:]...) + return } } } diff --git a/pkg/util/bandwidth/fake_shaper.go b/pkg/util/bandwidth/fake_shaper.go index 8c95e3bb317..78577185d73 100644 --- a/pkg/util/bandwidth/fake_shaper.go +++ b/pkg/util/bandwidth/fake_shaper.go @@ -22,28 +22,35 @@ import ( "k8s.io/apimachinery/pkg/api/resource" ) +// FakeShaper provides an implementation of the bandwith.Shaper. +// Beware this is implementation has no features besides Reset and GetCIDRs. type FakeShaper struct { CIDRs []string ResetCIDRs []string } +// Limit is not implemented func (f *FakeShaper) Limit(cidr string, egress, ingress *resource.Quantity) error { return errors.New("unimplemented") } +// Reset appends a particular CIDR to the set of ResetCIDRs being managed by this shaper func (f *FakeShaper) Reset(cidr string) error { f.ResetCIDRs = append(f.ResetCIDRs, cidr) return nil } +// ReconcileInterface is not implemented func (f *FakeShaper) ReconcileInterface() error { return errors.New("unimplemented") } +// ReconcileCIDR is not implemented func (f *FakeShaper) ReconcileCIDR(cidr string, egress, ingress *resource.Quantity) error { return errors.New("unimplemented") } +// GetCIDRs returns the set of CIDRs that are being managed by this shaper func (f *FakeShaper) GetCIDRs() ([]string, error) { return f.CIDRs, nil } diff --git a/pkg/util/bandwidth/interfaces.go b/pkg/util/bandwidth/interfaces.go index 6b0e160aae6..ec29d5d1047 100644 --- a/pkg/util/bandwidth/interfaces.go +++ b/pkg/util/bandwidth/interfaces.go @@ -18,7 +18,9 @@ package bandwidth import "k8s.io/apimachinery/pkg/api/resource" -type BandwidthShaper interface { +// Shaper is designed so that the shaper structs created +// satisfy the Shaper interface. +type Shaper interface { // Limit the bandwidth for a particular CIDR on a particular interface // * ingress and egress are in bits/second // * cidr is expected to be a valid network CIDR (e.g. '1.2.3.4/32' or '10.20.0.1/16') diff --git a/pkg/util/bandwidth/linux.go b/pkg/util/bandwidth/linux.go index 7050b4f763c..725c2557e8a 100644 --- a/pkg/util/bandwidth/linux.go +++ b/pkg/util/bandwidth/linux.go @@ -44,6 +44,7 @@ type tcShaper struct { iface string } +// NewTCShaper makes a new tcShaper for the given interface func NewTCShaper(iface string) BandwidthShaper { shaper := &tcShaper{ e: exec.New(), @@ -157,10 +158,9 @@ func (t *tcShaper) findCIDRClass(cidr string) (classAndHandleList [][]string, fo // filter parent 1: protocol ip pref 1 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1 if len(parts) != 19 { return classAndHandleList, false, fmt.Errorf("unexpected output from tc: %s %d (%v)", filter, len(parts), parts) - } else { - resultTmp := []string{parts[18], parts[9]} - classAndHandleList = append(classAndHandleList, resultTmp) } + resultTmp := []string{parts[18], parts[9]} + classAndHandleList = append(classAndHandleList, resultTmp) } } if len(classAndHandleList) > 0 { diff --git a/pkg/util/bandwidth/unsupported.go b/pkg/util/bandwidth/unsupported.go index 7d556fd64da..929f5e0584d 100644 --- a/pkg/util/bandwidth/unsupported.go +++ b/pkg/util/bandwidth/unsupported.go @@ -27,7 +27,8 @@ import ( type unsupportedShaper struct { } -func NewTCShaper(iface string) BandwidthShaper { +// NewTCShaper makes a new unsupportedShapper for the given interface +func NewTCShaper(iface string) Shaper { return &unsupportedShaper{} } diff --git a/pkg/util/bandwidth/utils.go b/pkg/util/bandwidth/utils.go index 451ab68836c..b29825bdfb9 100644 --- a/pkg/util/bandwidth/utils.go +++ b/pkg/util/bandwidth/utils.go @@ -35,6 +35,7 @@ func validateBandwidthIsReasonable(rsrc *resource.Quantity) error { return nil } +// ExtractPodBandwidthResources extracts the ingress and egress from the given pod annotations func ExtractPodBandwidthResources(podAnnotations map[string]string) (ingress, egress *resource.Quantity, err error) { if podAnnotations == nil { return nil, nil, nil diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index d5385318353..0bb903f8c4d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -234,6 +234,8 @@ type Cacher struct { // its internal cache and updating its cache in the background based on the // given configuration. func NewCacherFromConfig(config Config) *Cacher { + stopCh := make(chan struct{}) + watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc, config.GetAttrsFunc, config.Versioner) listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) reflectorName := "storage/cacher.go:" + config.ResourcePrefix @@ -245,7 +247,6 @@ func NewCacherFromConfig(config Config) *Cacher { panic("storage codec doesn't seem to match given type: " + err.Error()) } - stopCh := make(chan struct{}) reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) // Configure reflector's pager to for an appropriate pagination chunk size for fetching data from // storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error. @@ -774,12 +775,9 @@ func (c *Cacher) isStopped() bool { // Stop implements the graceful termination. func (c *Cacher) Stop() { - // avoid stopping twice (note: cachers are shared with subresources) - if c.isStopped() { - return - } c.stopLock.Lock() if c.stopped { + // avoid stopping twice (note: cachers are shared with subresources) c.stopLock.Unlock() return } diff --git a/staging/src/k8s.io/client-go/INSTALL.md b/staging/src/k8s.io/client-go/INSTALL.md index 9236f334a35..db97efc515b 100644 --- a/staging/src/k8s.io/client-go/INSTALL.md +++ b/staging/src/k8s.io/client-go/INSTALL.md @@ -7,7 +7,7 @@ library install, don't mind getting HEAD (which may be less stable than a particular release), then simply: ```sh -$ go get k8s.io/client-go@master +go get k8s.io/client-go@master ``` This will record a dependency on `k8s.io/client-go` in your go module. @@ -24,12 +24,12 @@ If you are using a version of go prior to 1.11, or do not wish to use go modules, you can download `k8s.io/client-go` to your `$GOPATH` instead: ```sh -$ go get -u k8s.io/client-go/... -$ go get -u k8s.io/apimachinery/... -$ cd $GOPATH/src/k8s.io/client-go -$ git checkout v11.0.0 -$ cd $GOPATH/src/k8s.io/apimachinery -$ git checkout kubernetes-1.14.0 +go get -u k8s.io/client-go/... +go get -u k8s.io/apimachinery/... +cd $GOPATH/src/k8s.io/client-go +git checkout v11.0.0 +cd $GOPATH/src/k8s.io/apimachinery +git checkout kubernetes-1.14.0 ``` This downloads a version of `k8s.io/client-go` prior to v1.12.0, diff --git a/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/README.md b/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/README.md index 6f42ca3575f..5aaf495689e 100644 --- a/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/README.md +++ b/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/README.md @@ -37,7 +37,7 @@ kubectl create clusterrolebinding default-view --clusterrole=view --serviceaccou Then, run the image in a Pod with a single instance Deployment: - $ kubectl run --rm -i demo --image=in-cluster --image-pull-policy=Never + kubectl run --rm -i demo --image=in-cluster --image-pull-policy=Never There are 4 pods in the cluster There are 4 pods in the cluster diff --git a/staging/src/k8s.io/client-go/examples/leader-election/README.md b/staging/src/k8s.io/client-go/examples/leader-election/README.md index cded1f63b3d..7a182c6ecc1 100644 --- a/staging/src/k8s.io/client-go/examples/leader-election/README.md +++ b/staging/src/k8s.io/client-go/examples/leader-election/README.md @@ -8,14 +8,14 @@ Run the following three commands in separate terminals. Each terminal needs a un ```bash # first terminal -$ go run *.go -kubeconfig=/my/config -logtostderr=true -id=1 +go run *.go -kubeconfig=/my/config -logtostderr=true -id=1 # second terminal -$ go run *.go -kubeconfig=/my/config -logtostderr=true -id=2 +go run *.go -kubeconfig=/my/config -logtostderr=true -id=2 # third terminal -$ go run *.go -kubeconfig=/my/config -logtostderr=true -id=3 +go run *.go -kubeconfig=/my/config -logtostderr=true -id=3 ``` > You can ignore the `-kubeconfig` flag if you are running these commands in the Kubernetes cluster. -Now kill the existing leader. You will see from the terminal outputs that one of the remaining two processes will be elected as the new leader. \ No newline at end of file +Now kill the existing leader. You will see from the terminal outputs that one of the remaining two processes will be elected as the new leader. diff --git a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/README.md b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/README.md index c2bccfb63be..b81e6be73ac 100644 --- a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/README.md +++ b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/README.md @@ -22,7 +22,7 @@ Run this application with: Running this application will use the kubeconfig file and then authenticate to the cluster, and print the number of pods in the cluster every 10 seconds: - $ ./app + ./app There are 3 pods in the cluster There are 3 pods in the cluster There are 3 pods in the cluster diff --git a/staging/src/k8s.io/client-go/tools/pager/BUILD b/staging/src/k8s.io/client-go/tools/pager/BUILD index 304d5b65069..9cf8111a631 100644 --- a/staging/src/k8s.io/client-go/tools/pager/BUILD +++ b/staging/src/k8s.io/client-go/tools/pager/BUILD @@ -17,6 +17,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/pager/pager.go b/staging/src/k8s.io/client-go/tools/pager/pager.go index 74ea3586ab8..d265db78683 100644 --- a/staging/src/k8s.io/client-go/tools/pager/pager.go +++ b/staging/src/k8s.io/client-go/tools/pager/pager.go @@ -25,9 +25,11 @@ import ( metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) const defaultPageSize = 500 +const defaultPageBufferSize = 10 // ListPageFunc returns a list object for the given list options. type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) @@ -48,6 +50,9 @@ type ListPager struct { PageFn ListPageFunc FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 } // New creates a new pager from the provided pager function using the default @@ -58,6 +63,7 @@ func New(fn ListPageFunc) *ListPager { PageSize: defaultPageSize, PageFn: fn, FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, } } @@ -73,6 +79,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti } var list *metainternalversion.List for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + obj, err := p.PageFn(ctx, options) if err != nil { if !errors.IsResourceExpired(err) || !p.FullListIfExpired { @@ -115,3 +127,105 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti options.Continue = m.GetContinue() } } + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/staging/src/k8s.io/client-go/tools/pager/pager_test.go b/staging/src/k8s.io/client-go/tools/pager/pager_test.go index ae517cab207..2332b53d78f 100644 --- a/staging/src/k8s.io/client-go/tools/pager/pager_test.go +++ b/staging/src/k8s.io/client-go/tools/pager/pager_test.go @@ -21,6 +21,7 @@ import ( "fmt" "reflect" "testing" + "time" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" @@ -115,7 +116,6 @@ func (p *testPager) ExpiresOnSecondPageThenFullList(ctx context.Context, options } return p.PagedList(ctx, options) } - func TestListPager_List(t *testing.T) { type fields struct { PageSize int64 @@ -189,7 +189,11 @@ func TestListPager_List(t *testing.T) { PageFn: tt.fields.PageFn, FullListIfExpired: tt.fields.FullListIfExpired, } - got, err := p.List(tt.args.ctx, tt.args.options) + ctx := tt.args.ctx + if ctx == nil { + ctx = context.Background() + } + got, err := p.List(ctx, tt.args.options) if (err != nil) != tt.wantErr { t.Errorf("ListPager.List() error = %v, wantErr %v", err, tt.wantErr) return @@ -204,3 +208,237 @@ func TestListPager_List(t *testing.T) { }) } } + +func TestListPager_EachListItem(t *testing.T) { + type fields struct { + PageSize int64 + PageFn ListPageFunc + } + tests := []struct { + name string + fields fields + want runtime.Object + wantErr bool + wantPanic bool + isExpired bool + processorErrorOnItem int + processorPanicOnItem int + cancelContextOnItem int + }{ + { + name: "empty page", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 0, rv: "rv:20"}).PagedList}, + want: list(0, "rv:20"), + }, + { + name: "one page", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 9, rv: "rv:20"}).PagedList}, + want: list(9, "rv:20"), + }, + { + name: "one full page", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 10, rv: "rv:20"}).PagedList}, + want: list(10, "rv:20"), + }, + { + name: "two pages", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 11, rv: "rv:20"}).PagedList}, + want: list(11, "rv:20"), + }, + { + name: "three pages", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).PagedList}, + want: list(21, "rv:20"), + }, + { + name: "expires on second page", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).ExpiresOnSecondPage}, + want: list(10, "rv:20"), // all items on the first page should have been visited + wantErr: true, + isExpired: true, + }, + { + name: "error processing item", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 51, rv: "rv:20"}).PagedList}, + want: list(3, "rv:20"), // all the items <= the one the processor returned an error on should have been visited + wantPanic: true, + processorPanicOnItem: 3, + }, + { + name: "cancel context while processing", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 51, rv: "rv:20"}).PagedList}, + want: list(3, "rv:20"), // all the items <= the one the processor returned an error on should have been visited + wantErr: true, + cancelContextOnItem: 3, + }, + { + name: "panic processing item", + fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 51, rv: "rv:20"}).PagedList}, + want: list(3, "rv:20"), // all the items <= the one the processor returned an error on should have been visited + wantPanic: true, + }, + } + + processorErr := fmt.Errorf("processor error") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + p := &ListPager{ + PageSize: tt.fields.PageSize, + PageFn: tt.fields.PageFn, + } + var items []runtime.Object + + fn := func(obj runtime.Object) error { + items = append(items, obj) + if tt.processorErrorOnItem > 0 && len(items) == tt.processorErrorOnItem { + return processorErr + } + if tt.processorPanicOnItem > 0 && len(items) == tt.processorPanicOnItem { + panic(processorErr) + } + if tt.cancelContextOnItem > 0 && len(items) == tt.cancelContextOnItem { + cancel() + } + return nil + } + var err error + var panic interface{} + func() { + defer func() { + panic = recover() + }() + err = p.EachListItem(ctx, metav1.ListOptions{}, fn) + }() + if (panic != nil) && !tt.wantPanic { + t.Fatalf(".EachListItem() panic = %v, wantPanic %v", panic, tt.wantPanic) + } else { + return + } + if (err != nil) != tt.wantErr { + t.Errorf("ListPager.EachListItem() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.isExpired != errors.IsResourceExpired(err) { + t.Errorf("ListPager.EachListItem() error = %v, isExpired %v", err, tt.isExpired) + return + } + if tt.processorErrorOnItem > 0 && err != processorErr { + t.Errorf("ListPager.EachListItem() error = %v, processorErrorOnItem %d", err, tt.processorErrorOnItem) + return + } + l := tt.want.(*metainternalversion.List) + if !reflect.DeepEqual(items, l.Items) { + t.Errorf("ListPager.EachListItem() = %v, want %v", items, l.Items) + } + }) + } +} + +func TestListPager_eachListPageBuffered(t *testing.T) { + tests := []struct { + name string + totalPages int + pagesProcessed int + wantPageLists int + pageBufferSize int32 + pageSize int + }{ + { + name: "no buffer, one total page", + totalPages: 1, + pagesProcessed: 1, + wantPageLists: 1, + pageBufferSize: 0, + }, { + name: "no buffer, 1/5 pages processed", + totalPages: 5, + pagesProcessed: 1, + wantPageLists: 2, // 1 received for processing, 1 listed + pageBufferSize: 0, + }, + { + name: "no buffer, 2/5 pages processed", + totalPages: 5, + pagesProcessed: 2, + wantPageLists: 3, + pageBufferSize: 0, + }, + { + name: "no buffer, 5/5 pages processed", + totalPages: 5, + pagesProcessed: 5, + wantPageLists: 5, + pageBufferSize: 0, + }, + { + name: "size 1 buffer, 1/5 pages processed", + totalPages: 5, + pagesProcessed: 1, + wantPageLists: 3, + pageBufferSize: 1, + }, + { + name: "size 1 buffer, 5/5 pages processed", + totalPages: 5, + pagesProcessed: 5, + wantPageLists: 5, + pageBufferSize: 1, + }, + { + name: "size 10 buffer, 1/5 page processed", + totalPages: 5, + pagesProcessed: 1, + wantPageLists: 5, + pageBufferSize: 10, // buffer is larger than list + }, + } + processorErr := fmt.Errorf("processor error") + pageSize := 10 + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pgr := &testPager{t: t, expectPage: int64(pageSize), remaining: tt.totalPages * pageSize, rv: "rv:20"} + pageLists := 0 + wantedPageListsDone := make(chan struct{}) + listFn := func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + pageLists++ + if pageLists == tt.wantPageLists { + close(wantedPageListsDone) + } + return pgr.PagedList(ctx, options) + } + p := &ListPager{ + PageSize: int64(pageSize), + PageBufferSize: tt.pageBufferSize, + PageFn: listFn, + } + + pagesProcessed := 0 + fn := func(obj runtime.Object) error { + pagesProcessed++ + if tt.pagesProcessed == pagesProcessed && tt.wantPageLists > 0 { + // wait for buffering to catch up + select { + case <-time.After(time.Second): + return fmt.Errorf("Timed out waiting for %d page lists", tt.wantPageLists) + case <-wantedPageListsDone: + } + return processorErr + } + return nil + } + err := p.eachListChunkBuffered(context.Background(), metav1.ListOptions{}, fn) + if tt.pagesProcessed > 0 && err == processorErr { + // expected + } else if err != nil { + t.Fatal(err) + } + if tt.wantPageLists > 0 && pageLists != tt.wantPageLists { + t.Errorf("expected %d page lists, got %d", tt.wantPageLists, pageLists) + } + if pagesProcessed != tt.pagesProcessed { + t.Errorf("expected %d pages processed, got %d", tt.pagesProcessed, pagesProcessed) + } + }) + } +} diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 28bf1749dae..bfd756beb78 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -43,7 +43,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) const ( @@ -65,12 +65,13 @@ const ( // on AWS. A few minutes is typical, so use 10m. LoadBalancerLagTimeoutAWS = 10 * time.Minute - // How long to wait for a load balancer to be created/modified. - //TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable + // LoadBalancerCreateTimeoutDefault is the default time to wait for a load balancer to be created/modified. + // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable LoadBalancerCreateTimeoutDefault = 20 * time.Minute - LoadBalancerCreateTimeoutLarge = 2 * time.Hour + // LoadBalancerCreateTimeoutLarge is the maximum time to wait for a load balancer to be created/modified. + LoadBalancerCreateTimeoutLarge = 2 * time.Hour - // Time required by the loadbalancer to cleanup, proportional to numApps/Ing. + // LoadBalancerCleanupTimeout is the time required by the loadbalancer to cleanup, proportional to numApps/Ing. // Bring the cleanup timeout back down to 5m once b/33588344 is resolved. LoadBalancerCleanupTimeout = 15 * time.Minute @@ -97,10 +98,10 @@ const ( AffinityConfirmCount = 15 ) -// This should match whatever the default/configured range is +// ServiceNodePortRange should match whatever the default/configured range is var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} -// A test jig to help service testing. +// ServiceTestJig is a test jig to help service testing. type ServiceTestJig struct { ID string Name string @@ -255,7 +256,7 @@ func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.Se // If createPod is true, it also creates an RC with 1 replica of // the standard netexec container used everywhere in this test. func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service { - By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and ExternalTrafficPolicy=Local") + ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and ExternalTrafficPolicy=Local") svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal @@ -263,7 +264,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s }) if createPod { - By("creating a pod to be part of the service " + serviceName) + ginkgo.By("creating a pod to be part of the service " + serviceName) j.RunOrFail(namespace, nil) } j.SanityCheckService(svc, v1.ServiceTypeNodePort) @@ -276,7 +277,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s // the standard netexec container used everywhere in this test. func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool, tweak func(svc *v1.Service)) *v1.Service { - By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local") + ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local") svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer // We need to turn affinity off for our LB distribution tests @@ -288,10 +289,10 @@ func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceNa }) if createPod { - By("creating a pod to be part of the service " + serviceName) + ginkgo.By("creating a pod to be part of the service " + serviceName) j.RunOrFail(namespace, nil) } - By("waiting for loadbalancer for service " + namespace + "/" + serviceName) + ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName) svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) return svc @@ -300,7 +301,7 @@ func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceNa // CreateLoadBalancerService creates a loadbalancer service and waits // for it to acquire an ingress IP. func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service { - By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer") + ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer") svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer // We need to turn affinity off for our LB distribution tests @@ -310,7 +311,7 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string } }) - By("waiting for loadbalancer for service " + namespace + "/" + serviceName) + ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName) svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) return svc @@ -402,7 +403,7 @@ func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { return nodeMap } -// getNodes returns the first maxNodesForTest nodes. Useful in large clusters +// GetNodes returns the first maxNodesForTest nodes. Useful in large clusters // where we don't eg: want to create an endpoint per node. func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) { nodes = GetReadySchedulableNodesOrDie(j.Client) @@ -1053,13 +1054,13 @@ func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, requ return nil } -// Simple helper class to avoid too much boilerplate in tests +// ServiceTestFixture is a simple helper class to avoid too much boilerplate in tests type ServiceTestFixture struct { ServiceName string Namespace string Client clientset.Interface - TestId string + TestID string Labels map[string]string rcs map[string]bool @@ -1073,9 +1074,9 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str t.Client = client t.Namespace = namespace t.ServiceName = serviceName - t.TestId = t.ServiceName + "-" + string(uuid.NewUUID()) + t.TestID = t.ServiceName + "-" + string(uuid.NewUUID()) t.Labels = map[string]string{ - "testid": t.TestId, + "testid": t.TestID, } t.rcs = make(map[string]bool) @@ -1087,7 +1088,7 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str return t } -// Build default config for a service (which can then be changed) +// BuildServiceSpec builds default config for a service (which can then be changed) func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service { service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1128,7 +1129,7 @@ func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.Replica return rc, err } -// Create a service, and record it for cleanup +// CreateService creates a service, and record it for cleanup func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) { result, err := t.Client.CoreV1().Services(t.Namespace).Create(service) if err == nil { @@ -1137,7 +1138,7 @@ func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, er return result, err } -// Delete a service, and remove it from the cleanup list +// DeleteService deletes a service, and remove it from the cleanup list func (t *ServiceTestFixture) DeleteService(serviceName string) error { err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) if err == nil { @@ -1149,7 +1150,7 @@ func (t *ServiceTestFixture) DeleteService(serviceName string) error { func (t *ServiceTestFixture) Cleanup() []error { var errs []error for rcName := range t.rcs { - By("stopping RC " + rcName + " in namespace " + t.Namespace) + ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { // First, resize the RC to 0. old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) @@ -1182,7 +1183,7 @@ func (t *ServiceTestFixture) Cleanup() []error { } for serviceName := range t.services { - By("deleting service " + serviceName + " in namespace " + t.Namespace) + ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) if err != nil { if !errors.IsNotFound(err) { @@ -1281,7 +1282,7 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI } func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) { - By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) + ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) { endpoints, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) @@ -1323,7 +1324,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) { podNames := make([]string, replicas) name := svc.ObjectMeta.Name - By("creating service " + name + " in namespace " + ns) + ginkgo.By("creating service " + name + " in namespace " + ns) _, err := c.CoreV1().Services(ns).Create(svc) if err != nil { return podNames, "", err @@ -1420,7 +1421,7 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect } expectedEndpoints := sets.NewString(expectedPods...) - By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) + ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) for _, cmdFunc := range commands { passed := false gotEndpoints := sets.NewString() @@ -1567,9 +1568,9 @@ func checkAffinityFailed(tracker affinityTracker, err string) { // number of same response observed in a row. If affinity is not expected, the // test will keep observe until different responses observed. The function will // return false only in case of unexpected errors. -func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, targetPort int, shouldHold bool) bool { - targetIpPort := net.JoinHostPort(targetIp, strconv.Itoa(targetPort)) - cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIpPort) +func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIP string, targetPort int, shouldHold bool) bool { + targetIPPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort)) + cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIPPort) timeout := ServiceTestTimeout if execPod == nil { timeout = LoadBalancerPollTimeout @@ -1577,14 +1578,14 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target var tracker affinityTracker if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { if execPod != nil { - if stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd); err != nil { - Logf("Failed to get response from %s. Retry until timeout", targetIpPort) + stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd) + if err != nil { + Logf("Failed to get response from %s. Retry until timeout", targetIPPort) return false, nil - } else { - tracker.recordHost(stdout) } + tracker.recordHost(stdout) } else { - rawResponse := jig.GetHTTPContent(targetIp, targetPort, timeout, "") + rawResponse := jig.GetHTTPContent(targetIP, targetPort, timeout, "") tracker.recordHost(rawResponse.String()) } trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount) @@ -1600,17 +1601,16 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target if pollErr != wait.ErrWaitTimeout { checkAffinityFailed(tracker, pollErr.Error()) return false - } else { - if !trackerFulfilled { - checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIpPort)) - } - if shouldHold { - checkAffinityFailed(tracker, "Affinity should hold but didn't.") - } else { - checkAffinityFailed(tracker, "Affinity shouldn't hold but did.") - } - return true } + if !trackerFulfilled { + checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIPPort)) + } + if shouldHold { + checkAffinityFailed(tracker, "Affinity should hold but didn't.") + } else { + checkAffinityFailed(tracker, "Affinity shouldn't hold but did.") + } + return true } return true } From 80578d5bf125fc9994e812537cf638010ea92d11 Mon Sep 17 00:00:00 2001 From: viegasdom Date: Thu, 11 Apr 2019 18:50:32 +0100 Subject: [PATCH 006/194] Changes code that still used BandWidthShaper instead of Shaper --- pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go | 2 +- pkg/util/bandwidth/linux.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index c3909b1c7f4..53f375392b6 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -74,7 +74,7 @@ type kubenetNetworkPlugin struct { netConfig *libcni.NetworkConfig loConfig *libcni.NetworkConfig cniConfig libcni.CNI - bandwidthShaper bandwidth.BandwidthShaper + bandwidthShaper bandwidth.Shaper mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization podIPs map[kubecontainer.ContainerID]string mtu int diff --git a/pkg/util/bandwidth/linux.go b/pkg/util/bandwidth/linux.go index 725c2557e8a..f01e7cc02f9 100644 --- a/pkg/util/bandwidth/linux.go +++ b/pkg/util/bandwidth/linux.go @@ -33,7 +33,7 @@ import ( "k8s.io/klog" ) -// tcShaper provides an implementation of the BandwidthShaper interface on Linux using the 'tc' tool. +// tcShaper provides an implementation of the Shaper interface on Linux using the 'tc' tool. // In general, using this requires that the caller posses the NET_CAP_ADMIN capability, though if you // do this within an container, it only requires the NS_CAPABLE capability for manipulations to that // container's network namespace. @@ -45,7 +45,7 @@ type tcShaper struct { } // NewTCShaper makes a new tcShaper for the given interface -func NewTCShaper(iface string) BandwidthShaper { +func NewTCShaper(iface string) Shaper { shaper := &tcShaper{ e: exec.New(), iface: iface, From 9d3d7a7b51b236cd29c3989de2f00865a3d9f8d1 Mon Sep 17 00:00:00 2001 From: viegasdom Date: Fri, 12 Apr 2019 17:24:57 +0100 Subject: [PATCH 007/194] Changes another usage of the BandwidthShaper to Shaper --- pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 53f375392b6..acd47ee2fbd 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -597,7 +597,7 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo // shaper retrieves the bandwidth shaper and, if it hasn't been fetched before, // initializes it and ensures the bridge is appropriately configured // This function should only be called while holding the `plugin.mu` lock -func (plugin *kubenetNetworkPlugin) shaper() bandwidth.BandwidthShaper { +func (plugin *kubenetNetworkPlugin) shaper() bandwidth.Shaper { if plugin.bandwidthShaper == nil { plugin.bandwidthShaper = bandwidth.NewTCShaper(BridgeName) plugin.bandwidthShaper.ReconcileInterface() From 344dd908814767d8f7c929e6df7e18b0f4899be2 Mon Sep 17 00:00:00 2001 From: Thomas Gamble Date: Sat, 20 Apr 2019 18:13:45 +0000 Subject: [PATCH 008/194] Fix golint failures in pkg/securitycontext --- hack/.golint_failures | 1 - pkg/securitycontext/accessors.go | 10 ++++++++++ pkg/securitycontext/util.go | 3 +++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 8ab29fc3b7a..564542b39c6 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -314,7 +314,6 @@ pkg/security/podsecuritypolicy/seccomp pkg/security/podsecuritypolicy/selinux pkg/security/podsecuritypolicy/user pkg/security/podsecuritypolicy/util -pkg/securitycontext pkg/serviceaccount pkg/ssh pkg/util/bandwidth diff --git a/pkg/securitycontext/accessors.go b/pkg/securitycontext/accessors.go index 739ca126f62..283181a7792 100644 --- a/pkg/securitycontext/accessors.go +++ b/pkg/securitycontext/accessors.go @@ -201,6 +201,7 @@ func (w *podSecurityContextWrapper) SetFSGroup(v *int64) { w.podSC.FSGroup = v } +// ContainerSecurityContextAccessor allows reading the values of a SecurityContext object type ContainerSecurityContextAccessor interface { Capabilities() *api.Capabilities Privileged() *bool @@ -213,6 +214,7 @@ type ContainerSecurityContextAccessor interface { AllowPrivilegeEscalation() *bool } +// ContainerSecurityContextMutator allows reading and writing the values of a SecurityContext object type ContainerSecurityContextMutator interface { ContainerSecurityContextAccessor @@ -228,10 +230,14 @@ type ContainerSecurityContextMutator interface { SetAllowPrivilegeEscalation(*bool) } +// NewContainerSecurityContextAccessor returns an accessor for the provided container security context +// May be initialized with a nil SecurityContext func NewContainerSecurityContextAccessor(containerSC *api.SecurityContext) ContainerSecurityContextAccessor { return &containerSecurityContextWrapper{containerSC: containerSC} } +// NewContainerSecurityContextMutator returns a mutator for the provided container security context +// May be initialized with a nil SecurityContext func NewContainerSecurityContextMutator(containerSC *api.SecurityContext) ContainerSecurityContextMutator { return &containerSecurityContextWrapper{containerSC: containerSC} } @@ -365,10 +371,14 @@ func (w *containerSecurityContextWrapper) SetAllowPrivilegeEscalation(v *bool) { w.containerSC.AllowPrivilegeEscalation = v } +// NewEffectiveContainerSecurityContextAccessor returns an accessor for reading effective values +// for the provided pod security context and container security context func NewEffectiveContainerSecurityContextAccessor(podSC PodSecurityContextAccessor, containerSC ContainerSecurityContextMutator) ContainerSecurityContextAccessor { return &effectiveContainerSecurityContextWrapper{podSC: podSC, containerSC: containerSC} } +// NewEffectiveContainerSecurityContextMutator returns a mutator for reading and writing effective values +// for the provided pod security context and container security context func NewEffectiveContainerSecurityContextMutator(podSC PodSecurityContextAccessor, containerSC ContainerSecurityContextMutator) ContainerSecurityContextMutator { return &effectiveContainerSecurityContextWrapper{podSC: podSC, containerSC: containerSC} } diff --git a/pkg/securitycontext/util.go b/pkg/securitycontext/util.go index f324f7d1216..a39ee7571a8 100644 --- a/pkg/securitycontext/util.go +++ b/pkg/securitycontext/util.go @@ -44,6 +44,9 @@ func HasCapabilitiesRequest(container *v1.Container) bool { return len(container.SecurityContext.Capabilities.Add) > 0 || len(container.SecurityContext.Capabilities.Drop) > 0 } +// DetermineEffectiveSecurityContext returns a synthesized SecurityContext for reading effective configurations +// from the provided pod's and container's security context. Container's fields take precedence in cases where both +// are set func DetermineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container) *v1.SecurityContext { effectiveSc := securityContextFromPodSecurityContext(pod) containerSc := container.SecurityContext From 2260cc29b90240aa93025cd5e5946e1c91122cb9 Mon Sep 17 00:00:00 2001 From: Xiangyang Chu Date: Wed, 27 Feb 2019 10:26:22 +0800 Subject: [PATCH 009/194] Make test/cmd/a*.sh pass shellcheck --- hack/.shellcheck_failures | 3 - test/cmd/apply.sh | 172 ++++++------- test/cmd/apps.sh | 491 +++++++++++++++++++------------------- test/cmd/authorization.sh | 12 +- 4 files changed, 338 insertions(+), 340 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index eac68a25ae6..158e956ac83 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -33,9 +33,6 @@ ./hack/update-vendor.sh ./hack/verify-golint.sh ./hack/verify-test-featuregates.sh -./test/cmd/apply.sh -./test/cmd/apps.sh -./test/cmd/authorization.sh ./test/cmd/batch.sh ./test/cmd/certificate.sh ./test/cmd/core.sh diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 9158118098f..d84ba47f0b8 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -27,75 +27,75 @@ run_kubectl_apply_tests() { kube::log::status "Testing kubectl apply" ## kubectl apply should create the resource that doesn't exist yet # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: apply a pod "test-pod" (doesn't exist) should create this pod - kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # Post-Condition: pod "test-pod" is created - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + kubectl get pods test-pod -o yaml "${kube_flags[@]:?}" | grep -q kubectl.kubernetes.io/last-applied-configuration # Clean up - kubectl delete pods test-pod "${kube_flags[@]}" + kubectl delete pods test-pod "${kube_flags[@]:?}" ## kubectl apply should be able to clear defaulted fields. # Pre-Condition: no deployment exists - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment - kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]:?}" # Post-Condition: deployment "test-deployment-retainkeys" created - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}{{end}}" 'test-deployment-retainkeys' # Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]] + kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q RollingUpdate + kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q maxSurge + kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q maxUnavailable + kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q emptyDir # Command: apply a deployment "test-deployment-retainkeys" should clear # defaulted fields and successfully update the deployment - [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]] + [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]] # Post-Condition: deployment "test-deployment-retainkeys" has updated fields - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]] - ! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]] - ! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]] + kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q Recreate + ! kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q RollingUpdate + kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q hostPath + ! kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q emptyDir # Clean up - kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}" + kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}" ## kubectl apply -f with label selector should only apply matching objects # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply - kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}" + kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod' + kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field:?}.name}}" 'selector-test-pod' # check wrong pod doesn't exist - output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found' # cleanup kubectl delete pods selector-test-pod ## kubectl apply --server-dry-run # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply dry-run - kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # No pod exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply non dry-run creates the pod - kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # apply changes - kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" + kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" # Post-Condition: label still has initial value - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # clean-up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}" ## kubectl apply dry-run on CR # Create CRD - kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__ + kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__ { "kind": "CustomResourceDefinition", "apiVersion": "apiextensions.k8s.io/v1beta1", @@ -117,31 +117,31 @@ run_kubectl_apply_tests() { __EOF__ # Dry-run create the CR - kubectl "${kube_flags[@]}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}" + kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist - ! kubectl "${kube_flags[@]}" get resource/myobj + ! kubectl "${kube_flags[@]:?}" get resource/myobj # clean-up - kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com + kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com ## kubectl apply --prune # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply a - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}" + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # check wrong pod doesn't exist - output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "b" not found' # apply b - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}" + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b' + kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' # check wrong pod doesn't exist - output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "a" not found' # cleanup @@ -149,79 +149,79 @@ __EOF__ # same thing without prune for a sanity check # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply a - kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}" + kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # check wrong pod doesn't exist - output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "b" not found' # apply b - kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}" + kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" # check both pods exist - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' - kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' + kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' # check wrong pod doesn't exist # cleanup kubectl delete pod/a pod/b ## kubectl apply --prune requires a --all flag to select everything - output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" \ 'all resources selected for prune without explicitly passing --all' # should apply everything kubectl apply --all --prune -f hack/testdata/prune - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' - kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' + kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' kubectl delete pod/a pod/b ## kubectl apply --prune should fallback to delete for non reapable types - kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc' - kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}" + kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'pvc a-pvc' "{{${id_field:?}}}" 'a-pvc' + kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'pvc b-pvc' "{{${id_field:?}}}" 'b-pvc' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]:?}" ## kubectl apply --prune --prune-whitelist # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply pod a - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}" + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # apply svc and don't prune pod a by overwriting whitelist - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc' - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # apply svc and prune pod a with default whitelist - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # cleanup - kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}" + kubectl delete svc prune-svc 2>&1 "${kube_flags[@]:?}" ## kubectl apply -f some.yml --force # Pre-condition: no service exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert services "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply service a - kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]:?}" # check right service exists - kube::test::get_object_assert 'services a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'services a' "{{${id_field:?}}}" 'a' # change immutable field and apply service a - output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'field is immutable' # apply --force to recreate resources for immutable fields - kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]}" + kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]:?}" # check immutable field exists kube::test::get_object_assert 'services a' "{{.spec.clusterIP}}" '10.0.0.12' # cleanup - kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]:?}" ## kubectl apply -k somedir kubectl apply -k hack/testdata/kustomize @@ -252,31 +252,31 @@ run_kubectl_apply_tests() { kube::log::status "Testing kubectl apply --experimental-server-side" ## kubectl apply should create the resource that doesn't exist yet # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: apply a pod "test-pod" (doesn't exist) should create this pod - kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # Post-Condition: pod "test-pod" is created - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Clean up - kubectl delete pods test-pod "${kube_flags[@]}" + kubectl delete pods test-pod "${kube_flags[@]:?}" ## kubectl apply --server-dry-run # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply dry-run - kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # No pod exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply non dry-run creates the pod - kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # apply changes - kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" # Post-Condition: label still has initial value - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # clean-up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}" ## kubectl apply dry-run on CR # Create CRD @@ -302,12 +302,12 @@ run_kubectl_apply_tests() { __EOF__ # Dry-run create the CR - kubectl "${kube_flags[@]}" apply --experimental-server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}" + kubectl "${kube_flags[@]:?}" apply --experimental-server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist - ! kubectl "${kube_flags[@]}" get resource/myobj + ! kubectl "${kube_flags[@]:?}" get resource/myobj # clean-up - kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com + kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com set +o nounset set +o errexit diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index 1f947d2e59d..f355a223f7e 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -27,24 +27,24 @@ run_daemonset_tests() { ### Create a rolling update DaemonSet # Pre-condition: no DaemonSet exists - kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" # Template Generation should be 1 - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1' - kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1' + kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" # Template Generation should stay 1 - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1' + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1' # Test set commands - kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2' - kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3' - kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4' + kubectl set image daemonsets/bind "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd" + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '2' + kubectl set env daemonsets/bind "${kube_flags[@]:?}" foo=bar + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '3' + kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '4' # Clean up - kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" set +o nounset set +o errexit @@ -59,42 +59,42 @@ run_daemonset_history_tests() { ### Test rolling back a DaemonSet # Pre-condition: no DaemonSet or its pods exists - kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command # Create a DaemonSet (revision 1) - kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*" # Rollback to revision 1 - should be no-op - kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Update the DaemonSet (revision 2) - kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]:?}" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Rollback to revision 1 - kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to revision 1000000 - should fail - output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1) + output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1) kube::test::if_has_string "${output_message}" "unable to find specified revision" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to last revision - kubectl rollout undo daemonset "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo daemonset "${kube_flags[@]:?}" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Clean up - kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" set +o nounset set +o errexit @@ -108,20 +108,20 @@ run_kubectl_apply_deployments_tests() { kube::log::status "Testing kubectl apply deployments" ## kubectl apply should propagate user defined null values # Pre-Condition: no Deployments, ReplicaSets, Pods exist - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply base deployment - kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]:?}" # check right deployment exists - kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl' + kube::test::get_object_assert 'deployments my-depl' "{{${id_field:?}}}" 'my-depl' # check right labels exists kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1' kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1' kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1' # apply new deployment with new template labels - kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]:?}" # check right labels exists kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '' kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '' @@ -134,24 +134,24 @@ run_kubectl_apply_deployments_tests() { # need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0 # Post-Condition: no Deployments, ReplicaSets, Pods exist - kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::wait_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # kubectl apply deployment --overwrite=true --force=true # Pre-Condition: no deployment exists - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply deployment nginx - kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]:?}" # check right deployment exists - kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx' + kube::test::get_object_assert 'deployment nginx' "{{${id_field:?}}}" 'nginx' # apply deployment with new labels and a conflicting resourceVersion - output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'Error from server (Conflict)' # apply deployment with --force and --overwrite will succeed kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10 # check the changed deployment - output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2) + output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]:?}" |grep nginx2) kube::test::if_has_string "${output_message}" '"name": "nginx2"' # applying a resource (with --force) that is both conflicting and invalid will # cause the server to only return a "Conflict" error when we attempt to patch. @@ -161,10 +161,10 @@ run_kubectl_apply_deployments_tests() { # invalid, we will receive an invalid error when we attempt to create it, after # having deleted the old resource. Ensure that when this case is reached, the # old resource is restored once again, and the validation error is printed. - output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'Invalid value' # Ensure that the old object has been restored - kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2' + kube::test::get_object_assert 'deployment nginx' "{{${template_labels:?}}}" 'nginx2' # cleanup kubectl delete deployments --all --grace-period=10 @@ -181,140 +181,140 @@ run_deployment_tests() { # Test kubectl create deployment (using default - old generator) kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd # Post-Condition: Deployment "nginx" is created. - kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx' + kube::test::get_object_assert 'deploy test-nginx-extensions' "{{${container_name_field:?}}}" 'nginx' # and old generator was used, iow. old defaults are applied output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}') kube::test::if_has_not_string "${output_message}" '2' # Ensure we can interact with deployments through extensions and apps endpoints - output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'extensions/v1beta1' - output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'apps/v1' # Clean up - kubectl delete deployment test-nginx-extensions "${kube_flags[@]}" + kubectl delete deployment test-nginx-extensions "${kube_flags[@]:?}" # Test kubectl create deployment kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1 # Post-Condition: Deployment "nginx" is created. - kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx' + kube::test::get_object_assert 'deploy test-nginx-apps' "{{${container_name_field:?}}}" 'nginx' # and new generator was used, iow. new defaults are applied output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}') kube::test::if_has_string "${output_message}" '2' # Ensure we can interact with deployments through extensions and apps endpoints - output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'extensions/v1beta1' - output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'apps/v1' # Describe command (resource only) should print detailed information kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:" # Describe command (resource only) should print detailed information kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By" # Clean up - kubectl delete deployment test-nginx-apps "${kube_flags[@]}" + kubectl delete deployment test-nginx-apps "${kube_flags[@]:?}" ### Test kubectl create deployment with image and command # Pre-Condition: No deployment exists. - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity # Post-Condition: Deployment "nginx" is created. - kube::test::get_object_assert 'deploy nginx-with-command' "{{$container_name_field}}" 'nginx' + kube::test::get_object_assert 'deploy nginx-with-command' "{{${container_name_field:?}}}" 'nginx' # Clean up - kubectl delete deployment nginx-with-command "${kube_flags[@]}" + kubectl delete deployment nginx-with-command "${kube_flags[@]:?}" ### Test kubectl create deployment should not fail validation # Pre-Condition: No deployment exists. - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]:?}" # Post-Condition: Deployment "deployment-with-unixuserid" is created. - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'deployment-with-unixuserid:' # Clean up - kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}" + kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]:?}" ### Test cascading deletion ## Test that rs is deleted when deployment is deleted. # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create deployment - kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}" + kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}" # Wait for rs to come up. - kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3' + kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '3' # Deleting the deployment should delete the rs. - kubectl delete deployment nginx-deployment "${kube_flags[@]}" - kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" + kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' ## Test that rs is not deleted when deployment is deleted with cascade set to false. # Pre-condition: no deployment and rs exist - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create deployment kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd # Wait for rs to come up. - kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1' + kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1' # Delete the deployment with cascade set to false. - kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" --cascade=false # Wait for the deployment to be deleted and then verify that rs is not # deleted. - kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1' + kube::test::wait_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1' # Cleanup # Find the name of the rs to be deleted. - output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}}) - kubectl delete rs ${output_message} "${kube_flags[@]}" + output_message=$(kubectl get rs "${kube_flags[@]:?}" -o template --template="{{range.items}}{{${id_field:?}}}{{end}}") + kubectl delete rs "${output_message}" "${kube_flags[@]:?}" ### Auto scale deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' + kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' # autoscale 2~3 pods, no CPU utilization specified - kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80' + kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3 + kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' # Clean up # Note that we should delete hpa first, otherwise it may fight with the deployment reaper. - kubectl delete hpa nginx-deployment "${kube_flags[@]}" - kubectl delete deployment.apps nginx-deployment "${kube_flags[@]}" + kubectl delete hpa nginx-deployment "${kube_flags[@]:?}" + kubectl delete deployment.apps nginx-deployment "${kube_flags[@]:?}" ### Rollback a deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command # Create a deployment (revision 1) - kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:' - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx:' + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to revision 1 - should be no-op - kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Update the deployment (revision 2) - kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd" - kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]:?}" | grep "test-cmd" + kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Rollback to revision 1 - kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" + kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}" sleep 1 - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to revision 1000000 - should be no-op - ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to last revision - kubectl rollout undo deployment nginx "${kube_flags[@]}" + kubectl rollout undo deployment nginx "${kube_flags[@]:?}" sleep 1 - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Pause the deployment - kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}" + kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}" # A paused deployment cannot be rolled back - ! kubectl rollout undo deployment nginx "${kube_flags[@]}" + ! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" # A paused deployment cannot be restarted - ! kubectl rollout restart deployment nginx "${kube_flags[@]}" + ! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" # Resume the deployment - kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}" + kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}" # The resumed deployment can now be rolled back - kubectl rollout undo deployment nginx "${kube_flags[@]}" + kubectl rollout undo deployment nginx "${kube_flags[@]:?}" # Check that the new replica set has all old revisions stored in an annotation newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3" @@ -327,83 +327,84 @@ run_deployment_tests() { rs="$(kubectl get rs "${newrs}" -o yaml)" kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\"" cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}" + #${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}" # Deletion of both deployments should not be blocked - kubectl delete deployment nginx2 "${kube_flags[@]}" + kubectl delete deployment nginx2 "${kube_flags[@]:?}" # Clean up - kubectl delete deployment nginx "${kube_flags[@]}" + kubectl delete deployment nginx "${kube_flags[@]:?}" ### Set image of a deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create a deployment - kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set the deployment's image - kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set non-existing container should fail - ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}" + ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" # Set image of deployments without specifying name - kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set image of a deployment specified by file - kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set image of a local file without talking to the server - kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" --local -o yaml + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set image of all containers of the deployment - kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Set image of all containners of the deployment again when image not change - kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Clean up - kubectl delete deployment nginx-deployment "${kube_flags[@]}" + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" ### Set env of a deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create a deployment - kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' + kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' #configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically - kube::test::get_object_assert 'configmaps/test-set-env-config' "{{$id_field}}" 'test-set-env-config' - kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:' + kube::test::get_object_assert 'configmaps/test-set-env-config' "{{${id_field:?}}}" 'test-set-env-config' + kube::test::get_object_assert secret "{{range.items}}{{${id_field:?}}}:{{end}}" 'test-set-env-secret:' # Set env of deployments by configmap from keys - kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]:?}" # Assert correct value in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2' # Assert single value in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1' # Set env of deployments by configmap - kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}" # Assert all values in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2' # Set env of deployments for all container - kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}" + kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]:?}" # Set env of deployments for specific container - kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}" + kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]:?}" # Set env of deployments by secret from keys - kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]:?}" # Set env of deployments by secret - kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]:?}" # Remove specific env of deployment kubectl set env deployment nginx-deployment env- # Clean up - kubectl delete deployment nginx-deployment "${kube_flags[@]}" - kubectl delete configmap test-set-env-config "${kube_flags[@]}" - kubectl delete secret test-set-env-secret "${kube_flags[@]}" + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" + kubectl delete configmap test-set-env-config "${kube_flags[@]:?}" + kubectl delete secret test-set-env-secret "${kube_flags[@]:?}" set +o nounset set +o errexit @@ -418,42 +419,42 @@ run_statefulset_history_tests() { ### Test rolling back a StatefulSet # Pre-condition: no statefulset or its pods exists - kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command # Create a StatefulSet (revision 1) - kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*" # Rollback to revision 1 - should be no-op - kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Update the statefulset (revision 2) - kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]:?}" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Rollback to revision 1 - kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to revision 1000000 - should fail - output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1) + output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1) kube::test::if_has_string "${output_message}" "unable to find specified revision" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to last revision - kubectl rollout undo statefulset "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo statefulset "${kube_flags[@]:?}" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Clean up - delete newest configuration - kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]:?}" # Post-condition: no pods from statefulset controller wait-for-pods-with-label "app=nginx-statefulset" "" @@ -470,26 +471,26 @@ run_stateful_set_tests() { ### Create and stop statefulset, make sure it doesn't leak pods # Pre-condition: no statefulset exists - kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: create statefulset - kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}" ### Scale statefulset test with current-replicas and replicas # Pre-condition: 0 replicas - kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0' - kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1' + kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '0' + kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '1' # Command: Scale up - kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}" + kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]:?}" # Post-condition: 1 replica, named nginx-0 - kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1' - kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2' + kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '1' + kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '2' # Typically we'd wait and confirm that N>1 replicas are up, but this framework # doesn't start the scheduler, so pet-0 will block all others. # TODO: test robust scaling in an e2e. wait-for-pods-with-label "app=nginx-statefulset" "nginx-0" ### Clean up - kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}" # Post-condition: no pods from statefulset controller wait-for-pods-with-label "app=nginx-statefulset" "" @@ -507,40 +508,40 @@ run_rs_tests() { ### Create and stop a replica set, make sure it doesn't leak pods # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" kube::log::status "Deleting rs" - kubectl delete rs frontend "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" # Post-condition: no pods from frontend replica set - kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${id_field:?}}}:{{end}}" '' ### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods. # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command #TODO(mortent): Remove this workaround when ReplicaSet bug described in issue #69376 is fixed local replicaset_name="frontend-no-cascade" - sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]}" -f - + sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name:?}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]:?}" -f - # wait for all 3 pods to be set up - kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:' + kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:' kube::log::status "Deleting rs" - kubectl delete rs "${replicaset_name}" "${kube_flags[@]}" --cascade=false + kubectl delete rs "${replicaset_name}" "${kube_flags[@]:?}" --cascade=false # Wait for the rs to be deleted. - kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Post-condition: All 3 pods still remain from frontend replica set kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:' # Cleanup - kubectl delete pods -l "tier=frontend" "${kube_flags[@]}" - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}" + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' ### Create replica set frontend from YAML # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" # Post-condition: frontend replica set is created - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' # Describe command should print detailed information kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" # Describe command should print events information by default @@ -562,16 +563,16 @@ run_rs_tests() { ### Scale replica set frontend with current-replicas and replicas # Pre-condition: 3 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' + kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3' # Command - kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}" + kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}" # Post-condition: 2 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2' + kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '2' # Set up three deploy, two deploy have same label - kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]:?}" kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1' kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1' kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1' @@ -586,78 +587,78 @@ run_rs_tests() { kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3' kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3' # Clean-up - kubectl delete rs frontend "${kube_flags[@]}" - kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" + kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]:?}" ### Expose replica set as service - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" # Pre-condition: 3 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' + kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3' # Command - kubectl expose rs frontend --port=80 "${kube_flags[@]}" + kubectl expose rs frontend --port=80 "${kube_flags[@]:?}" # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" ' 80' + kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{${port_field:?}}}" ' 80' # Create a service using service/v1 generator - kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}" + kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]:?}" # Post-condition: service exists and the port is named default. - kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80' + kube::test::get_object_assert 'service frontend-2' "{{${port_name:?}}} {{${port_field:?}}}" 'default 80' # Cleanup services - kubectl delete service frontend{,-2} "${kube_flags[@]}" + kubectl delete service frontend{,-2} "${kube_flags[@]:?}" # Test set commands # Pre-condition: frontend replica set exists at generation 1 - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1' - kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2' - kubectl set env rs/frontend "${kube_flags[@]}" foo=bar - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3' - kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4' + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '1' + kubectl set image rs/frontend "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd" + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2' + kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3' + kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4' ### Delete replica set with id # Pre-condition: frontend replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' # Command - kubectl delete rs frontend "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" # Post-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' ### Create two replica sets # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]:?}" # Post-condition: frontend and redis-slave - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:' ### Delete multiple replica sets at once # Pre-condition: frontend and redis-slave - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:' # Command - kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once + kubectl delete rs frontend redis-slave "${kube_flags[@]:?}" # delete multiple replica sets at once # Post-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' - if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then + if kube::test::if_supports_resource "${horizontalpodautoscalers:?}" ; then ### Auto scale replica set # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file - kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' - kubectl delete hpa frontend "${kube_flags[@]}" + kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" --max=2 --cpu-percent=70 + kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70' + kubectl delete hpa frontend "${kube_flags[@]:?}" # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name - kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80' - kubectl delete hpa frontend "${kube_flags[@]}" + kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3 + kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' + kubectl delete hpa frontend "${kube_flags[@]:?}" # autoscale without specifying --max should fail - ! kubectl autoscale rs frontend "${kube_flags[@]}" + ! kubectl autoscale rs frontend "${kube_flags[@]:?}" # Clean up - kubectl delete rs frontend "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" fi set +o nounset diff --git a/test/cmd/authorization.sh b/test/cmd/authorization.sh index 895ccd2a55e..e271fb1a546 100755 --- a/test/cmd/authorization.sh +++ b/test/cmd/authorization.sh @@ -59,21 +59,21 @@ run_impersonation_tests() { kube::log::status "Testing impersonation" - output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1) + output_message=$(! kubectl get pods "${kube_flags_with_token[@]:?}" --as-group=foo 2>&1) kube::test::if_has_string "${output_message}" 'without impersonating a user' - if kube::test::if_supports_resource "${csr}" ; then + if kube::test::if_supports_resource "${csr:?}" ; then # --as - kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 + kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" --as=user1 kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1' kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated' - kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" + kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" # --as-group - kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon + kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3' kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon ' - kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" + kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" fi set +o nounset From 906058bf7b81bd60c45258dbb7a64fa4e27872d2 Mon Sep 17 00:00:00 2001 From: Xiangyang Chu <936394+xychu@users.noreply.github.com> Date: Tue, 2 Apr 2019 11:42:47 +0800 Subject: [PATCH 010/194] Update grep -q usage to avoid closed pipe --- test/cmd/apply.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index d84ba47f0b8..68d29f5ffa5 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -33,7 +33,7 @@ run_kubectl_apply_tests() { # Post-Condition: pod "test-pod" is created kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Post-Condition: pod "test-pod" has configuration annotation - kubectl get pods test-pod -o yaml "${kube_flags[@]:?}" | grep -q kubectl.kubernetes.io/last-applied-configuration + grep -q kubectl.kubernetes.io/last-applied-configuration <<< kubectl get pods test-pod -o yaml "${kube_flags[@]:?}" # Clean up kubectl delete pods test-pod "${kube_flags[@]:?}" @@ -46,18 +46,18 @@ run_kubectl_apply_tests() { # Post-Condition: deployment "test-deployment-retainkeys" created kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}{{end}}" 'test-deployment-retainkeys' # Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields - kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q RollingUpdate - kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q maxSurge - kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q maxUnavailable - kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q emptyDir + grep -q RollingUpdate <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + grep -q maxSurge <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + grep -q maxUnavailable <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + grep -q emptyDir <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" # Command: apply a deployment "test-deployment-retainkeys" should clear # defaulted fields and successfully update the deployment [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]] # Post-Condition: deployment "test-deployment-retainkeys" has updated fields - kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q Recreate - ! kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q RollingUpdate - kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q hostPath - ! kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" | grep -q emptyDir + grep -q Recreate <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + ! grep -q RollingUpdate <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + grep -q hostPath <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + ! grep -q emptyDir <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" # Clean up kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}" From 7a385bf2ea981e0d5a6078b1957583338ea048f2 Mon Sep 17 00:00:00 2001 From: Xiangyang Chu <936394+xychu@users.noreply.github.com> Date: Fri, 26 Apr 2019 11:49:04 +0800 Subject: [PATCH 011/194] Fix wrong pipe in grep -q --- test/cmd/apply.sh | 18 +++++++++--------- test/cmd/apps.sh | 3 +-- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 68d29f5ffa5..8b6acec44a1 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -33,7 +33,7 @@ run_kubectl_apply_tests() { # Post-Condition: pod "test-pod" is created kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Post-Condition: pod "test-pod" has configuration annotation - grep -q kubectl.kubernetes.io/last-applied-configuration <<< kubectl get pods test-pod -o yaml "${kube_flags[@]:?}" + grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" # Clean up kubectl delete pods test-pod "${kube_flags[@]:?}" @@ -46,18 +46,18 @@ run_kubectl_apply_tests() { # Post-Condition: deployment "test-deployment-retainkeys" created kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}{{end}}" 'test-deployment-retainkeys' # Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields - grep -q RollingUpdate <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" - grep -q maxSurge <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" - grep -q maxUnavailable <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" - grep -q emptyDir <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q maxSurge <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q maxUnavailable <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" # Command: apply a deployment "test-deployment-retainkeys" should clear # defaulted fields and successfully update the deployment [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]] # Post-Condition: deployment "test-deployment-retainkeys" has updated fields - grep -q Recreate <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" - ! grep -q RollingUpdate <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" - grep -q hostPath <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" - ! grep -q emptyDir <<< kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}" + grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + ! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + ! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" # Clean up kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}" diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index f355a223f7e..4390a3947ca 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -326,8 +326,7 @@ run_deployment_tests() { newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" rs="$(kubectl get rs "${newrs}" -o yaml)" kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\"" - cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}" - #${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}" + ${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}" # Deletion of both deployments should not be blocked kubectl delete deployment nginx2 "${kube_flags[@]:?}" # Clean up From 9e35d3d709ea9a48f2568a4162c5716f7fd9a169 Mon Sep 17 00:00:00 2001 From: obitech Date: Wed, 24 Apr 2019 21:44:18 +0200 Subject: [PATCH 012/194] Fix golint issues in pkg/kubelet/oom --- hack/.golint_failures | 2 +- pkg/kubelet/kubelet.go | 6 +++--- pkg/kubelet/oom/oom_watcher_linux.go | 16 ++++++++-------- pkg/kubelet/oom/oom_watcher_linux_test.go | 5 +++-- pkg/kubelet/oom/oom_watcher_unsupported.go | 8 ++++---- pkg/kubelet/oom/types.go | 8 +++----- 6 files changed, 22 insertions(+), 23 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index c42cebb1350..f4f7a3390a7 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -181,7 +181,7 @@ pkg/kubelet/dockershim/network/testing pkg/kubelet/events pkg/kubelet/lifecycle pkg/kubelet/metrics -pkg/kubelet/oom +pkg/kubelet/pod pkg/kubelet/pod/testing pkg/kubelet/preemption pkg/kubelet/prober diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index aa1c571e74a..57c843a7e2d 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -32,7 +32,7 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -465,7 +465,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, containerRefManager := kubecontainer.NewRefManager() - oomWatcher := oomwatcher.NewOOMWatcher(kubeDeps.Recorder) + oomWatcher := oomwatcher.NewWatcher(kubeDeps.Recorder) clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS)) for _, ipEntry := range kubeCfg.ClusterDNS { @@ -1079,7 +1079,7 @@ type Kubelet struct { os kubecontainer.OSInterface // Watcher of out of memory events. - oomWatcher oomwatcher.OOMWatcher + oomWatcher oomwatcher.Watcher // Monitor resource usage resourceAnalyzer serverstats.ResourceAnalyzer diff --git a/pkg/kubelet/oom/oom_watcher_linux.go b/pkg/kubelet/oom/oom_watcher_linux.go index 77c75bf65cb..82984d23904 100644 --- a/pkg/kubelet/oom/oom_watcher_linux.go +++ b/pkg/kubelet/oom/oom_watcher_linux.go @@ -19,7 +19,7 @@ limitations under the License. package oom import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/record" @@ -28,23 +28,23 @@ import ( "github.com/google/cadvisor/utils/oomparser" ) -type realOOMWatcher struct { +type realWatcher struct { recorder record.EventRecorder } -var _ OOMWatcher = &realOOMWatcher{} +var _ Watcher = &realWatcher{} -// NewOOMWatcher creates and initializes a OOMWatcher based on parameters. -func NewOOMWatcher(recorder record.EventRecorder) OOMWatcher { - return &realOOMWatcher{ +// NewWatcher creates and initializes a OOMWatcher based on parameters. +func NewWatcher(recorder record.EventRecorder) Watcher { + return &realWatcher{ recorder: recorder, } } const systemOOMEvent = "SystemOOM" -// Watches for system oom's and records an event for every system oom encountered. -func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error { +// Start watches for system oom's and records an event for every system oom encountered. +func (ow *realWatcher) Start(ref *v1.ObjectReference) error { oomLog, err := oomparser.New() if err != nil { return err diff --git a/pkg/kubelet/oom/oom_watcher_linux_test.go b/pkg/kubelet/oom/oom_watcher_linux_test.go index 5aba6e41424..0a09a2fd842 100644 --- a/pkg/kubelet/oom/oom_watcher_linux_test.go +++ b/pkg/kubelet/oom/oom_watcher_linux_test.go @@ -21,14 +21,15 @@ import ( "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" ) +// TestBasic verifies that the OOMWatch works without error. func TestBasic(t *testing.T) { fakeRecorder := &record.FakeRecorder{} node := &v1.ObjectReference{} - oomWatcher := NewOOMWatcher(fakeRecorder) + oomWatcher := NewWatcher(fakeRecorder) assert.NoError(t, oomWatcher.Start(node)) // TODO: Improve this test once cadvisor exports events.EventChannel as an interface diff --git a/pkg/kubelet/oom/oom_watcher_unsupported.go b/pkg/kubelet/oom/oom_watcher_unsupported.go index b7f1530b17d..a18b19d7af2 100644 --- a/pkg/kubelet/oom/oom_watcher_unsupported.go +++ b/pkg/kubelet/oom/oom_watcher_unsupported.go @@ -19,16 +19,16 @@ limitations under the License. package oom import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" ) type oomWatcherUnsupported struct{} -var _ OOMWatcher = new(oomWatcherUnsupported) +var _ Watcher = new(oomWatcherUnsupported) -// NewOOMWatcher creates a fake one here -func NewOOMWatcher(_ record.EventRecorder) OOMWatcher { +// NewWatcher creates a fake one here +func NewWatcher(_ record.EventRecorder) Watcher { return &oomWatcherUnsupported{} } diff --git a/pkg/kubelet/oom/types.go b/pkg/kubelet/oom/types.go index 09d50b181de..ca9dd8b17af 100644 --- a/pkg/kubelet/oom/types.go +++ b/pkg/kubelet/oom/types.go @@ -16,11 +16,9 @@ limitations under the License. package oom -import ( - "k8s.io/api/core/v1" -) +import v1 "k8s.io/api/core/v1" -// OOMWatcher defines the interface of OOM watchers. -type OOMWatcher interface { +// Watcher defines the interface of OOM watchers. +type Watcher interface { Start(ref *v1.ObjectReference) error } From ccfb6379f3fa61175250248073e7d55d72155b49 Mon Sep 17 00:00:00 2001 From: obitech Date: Sat, 27 Apr 2019 10:36:38 +0200 Subject: [PATCH 013/194] Remove pkg/kubelet/pod from .golint_failures --- hack/.golint_failures | 1 - 1 file changed, 1 deletion(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index f4f7a3390a7..671bf1ce4b6 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -181,7 +181,6 @@ pkg/kubelet/dockershim/network/testing pkg/kubelet/events pkg/kubelet/lifecycle pkg/kubelet/metrics -pkg/kubelet/pod pkg/kubelet/pod/testing pkg/kubelet/preemption pkg/kubelet/prober From becbed87f12dbb3abc0b90e73be7286220dd9dbd Mon Sep 17 00:00:00 2001 From: ialidzhikov Date: Fri, 19 Apr 2019 10:14:03 +0300 Subject: [PATCH 014/194] Update gem versions Signed-off-by: ialidzhikov --- .../addons/fluentd-elasticsearch/fluentd-es-ds.yaml | 10 +++++----- .../fluentd-elasticsearch/fluentd-es-image/Gemfile | 6 +++--- .../fluentd-elasticsearch/fluentd-es-image/Makefile | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index f39ac46f044..c341c5ffc04 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -45,22 +45,22 @@ roleRef: apiVersion: apps/v1 kind: DaemonSet metadata: - name: fluentd-es-v2.5.1 + name: fluentd-es-v2.5.2 namespace: kube-system labels: k8s-app: fluentd-es - version: v2.5.1 + version: v2.5.2 addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: k8s-app: fluentd-es - version: v2.5.1 + version: v2.5.2 template: metadata: labels: k8s-app: fluentd-es - version: v2.5.1 + version: v2.5.2 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -72,7 +72,7 @@ spec: serviceAccountName: fluentd-es containers: - name: fluentd-es - image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.1 + image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.2 env: - name: FLUENTD_ARGS value: --no-supervisor -q diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile index bdb3cc020eb..e479dd31a01 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile @@ -1,12 +1,12 @@ source 'https://rubygems.org' gem 'activesupport', '~>5.2.2' -gem 'fluentd', '<=1.4.1' +gem 'fluentd', '<=1.4.2' gem 'fluent-plugin-concat', '~>2.3.0' gem 'fluent-plugin-detect-exceptions', '~>0.0.12' -gem 'fluent-plugin-elasticsearch', '~>3.3.3' +gem 'fluent-plugin-elasticsearch', '~>3.4.3' gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.1.6' gem 'fluent-plugin-multi-format-parser', '~>1.0.0' gem 'fluent-plugin-prometheus', '~>1.3.0' gem 'fluent-plugin-systemd', '~>1.0.2' -gem 'oj', '~>3.7.9' +gem 'oj', '~>3.7.12' diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 61efd46ab88..62d89dbd30f 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -16,7 +16,7 @@ PREFIX = gcr.io/fluentd-elasticsearch IMAGE = fluentd -TAG = v2.5.1 +TAG = v2.5.2 build: gcloud builds submit --tag $(PREFIX)/$(IMAGE):$(TAG) From 2ba023167057b39c6a70b849ccf18654fe5d835a Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 3 May 2019 17:01:15 +0800 Subject: [PATCH 015/194] fix shellcheck failures of cluster/gce/upgrade-aliases.sh --- cluster/gce/upgrade-aliases.sh | 49 ++++++++++++++++++++-------------- hack/.shellcheck_failures | 1 - 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/cluster/gce/upgrade-aliases.sh b/cluster/gce/upgrade-aliases.sh index 92b2382074a..7c853af488c 100755 --- a/cluster/gce/upgrade-aliases.sh +++ b/cluster/gce/upgrade-aliases.sh @@ -26,7 +26,7 @@ if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then exit 1 fi -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. source "${KUBE_ROOT}/hack/lib/util.sh" source "${KUBE_ROOT}/cluster/kube-util.sh" @@ -35,8 +35,9 @@ source "${KUBE_ROOT}/cluster/kube-util.sh" # Assumed vars: # PROJECT function get-k8s-node-routes-count() { - local k8s_node_routes_count=$(gcloud compute routes list \ - --project=${PROJECT} --filter='description=k8s-node-route' \ + local k8s_node_routes_count + k8s_node_routes_count=$(gcloud compute routes list \ + --project="${PROJECT}" --filter='description=k8s-node-route' \ --format='value(name)' | wc -l) echo -n "${k8s_node_routes_count}" } @@ -50,11 +51,12 @@ function get-k8s-node-routes-count() { # Vars set: # IP_ALIAS_SUBNETWORK function detect-k8s-subnetwork() { - local subnetwork_url=$(gcloud compute instances describe \ - ${KUBE_MASTER} --project=${PROJECT} --zone=${ZONE} \ + local subnetwork_url + subnetwork_url=$(gcloud compute instances describe \ + "${KUBE_MASTER}" --project="${PROJECT}" --zone="${ZONE}" \ --format='value(networkInterfaces[0].subnetwork)') if [[ -n ${subnetwork_url} ]]; then - IP_ALIAS_SUBNETWORK=$(echo ${subnetwork_url##*/}) + IP_ALIAS_SUBNETWORK=${subnetwork_url##*/} fi } @@ -69,21 +71,24 @@ function detect-k8s-subnetwork() { function set-allow-subnet-cidr-routes-overlap() { local allow_subnet_cidr_routes_overlap allow_subnet_cidr_routes_overlap=$(gcloud compute networks subnets \ - describe ${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \ + describe "${IP_ALIAS_SUBNETWORK}" --project="${PROJECT}" --region="${REGION}" \ --format='value(allowSubnetCidrRoutesOverlap)') local allow_overlap=$1 - if [ ${allow_subnet_cidr_routes_overlap,,} = ${allow_overlap} ]; then + if [ "${allow_subnet_cidr_routes_overlap,,}" = "${allow_overlap}" ]; then echo "Subnet ${IP_ALIAS_SUBNETWORK}'s allowSubnetCidrRoutesOverlap is already set as $1" return fi echo "Setting subnet \"${IP_ALIAS_SUBNETWORK}\" allowSubnetCidrRoutesOverlap to $1" - local fingerprint=$(gcloud compute networks subnets describe \ - ${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \ + local fingerprint + fingerprint=$(gcloud compute networks subnets describe \ + "${IP_ALIAS_SUBNETWORK}" --project="${PROJECT}" --region="${REGION}" \ --format='value(fingerprint)') - local access_token=$(gcloud auth print-access-token) + local access_token + access_token=$(gcloud auth print-access-token) local request="{\"allowSubnetCidrRoutesOverlap\":$1, \"fingerprint\":\"${fingerprint}\"}" - local subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}" + local subnetwork_url + subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}" until curl -s --header "Content-Type: application/json" --header "Authorization: Bearer ${access_token}" \ -X PATCH -d "${request}" "${subnetwork_url}" --output /dev/null; do printf "." @@ -100,7 +105,8 @@ function set-allow-subnet-cidr-routes-overlap() { # CLUSTER_IP_RANGE # SERVICE_CLUSTER_IP_RANGE function add-k8s-subnet-secondary-ranges() { - local secondary_ranges=$(gcloud compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \ + local secondary_ranges + secondary_ranges=$(gcloud compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \ --project="${PROJECT}" --region="${REGION}" \ --format='value(secondaryIpRanges)') if [[ "${secondary_ranges}" =~ "pods-default" && "${secondary_ranges}" =~ "services-default" ]]; then @@ -109,8 +115,8 @@ function add-k8s-subnet-secondary-ranges() { fi echo "Adding secondary ranges: pods-default (${CLUSTER_IP_RANGE}), services-default (${SERVICE_CLUSTER_IP_RANGE})" - until gcloud compute networks subnets update ${IP_ALIAS_SUBNETWORK} \ - --project=${PROJECT} --region=${REGION} \ + until gcloud compute networks subnets update "${IP_ALIAS_SUBNETWORK}" \ + --project="${PROJECT}" --region="${REGION}" \ --add-secondary-ranges="pods-default=${CLUSTER_IP_RANGE},services-default=${SERVICE_CLUSTER_IP_RANGE}"; do printf "." sleep 1 @@ -124,9 +130,12 @@ function add-k8s-subnet-secondary-ranges() { function delete-k8s-node-routes() { local -a routes local -r batch=200 - routes=( $(gcloud compute routes list \ - --project=${PROJECT} --filter='description=k8s-node-route' \ - --format='value(name)') ) + routes=() + while IFS=$'\n' read -r route; do + routes+=( "${route}" ) + done < <(gcloud compute routes list \ + --project="${PROJECT}" --filter='description=k8s-node-route' \ + --format='value(name)') while (( "${#routes[@]}" > 0 )); do echo Deleting k8s node routes "${routes[*]::${batch}}" gcloud compute routes delete --project "${PROJECT}" --quiet "${routes[@]::${batch}}" @@ -145,7 +154,7 @@ fi echo "Found ${k8s_node_routes_count} K8s node routes. Proceeding to upgrade them to IP aliases based connectivity..." detect-k8s-subnetwork -if [ -z ${IP_ALIAS_SUBNETWORK} ]; then +if [ -z "${IP_ALIAS_SUBNETWORK}" ]; then echo "No k8s cluster subnetwork found. Exiting..." exit 1 fi @@ -165,7 +174,7 @@ export ETCD_IMAGE=3.3.10-0 export ETCD_VERSION=3.3.10 # Upgrade master with updated kube envs -${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l +"${KUBE_ROOT}/cluster/gce/upgrade.sh" -M -l delete-k8s-node-routes set-allow-subnet-cidr-routes-overlap false diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 06f051c65fc..e1e722572fe 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -10,7 +10,6 @@ ./cluster/gce/gci/flexvolume_node_setup.sh ./cluster/gce/gci/health-monitor.sh ./cluster/gce/gci/master-helper.sh -./cluster/gce/upgrade-aliases.sh ./cluster/gce/upgrade.sh ./cluster/gce/util.sh ./cluster/log-dump/log-dump.sh From ea37acfbc4f5aea37bb23f73f4ec84f84d015a18 Mon Sep 17 00:00:00 2001 From: Tim Bannister Date: Fri, 3 May 2019 16:48:55 +0100 Subject: [PATCH 016/194] Fix kubectl rename-context description grammar --- pkg/kubectl/cmd/config/rename_context.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/config/rename_context.go b/pkg/kubectl/cmd/config/rename_context.go index 519b12456bc..14fbf8f5a8b 100644 --- a/pkg/kubectl/cmd/config/rename_context.go +++ b/pkg/kubectl/cmd/config/rename_context.go @@ -45,7 +45,7 @@ var ( renameContextLong = templates.LongDesc(` Renames a context from the kubeconfig file. - CONTEXT_NAME is the context name that you wish change. + CONTEXT_NAME is the context name that you wish to change. NEW_NAME is the new name you wish to set. From 52885a8ec3b4f8381bea5f28a26323fe9e456633 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Wed, 17 Apr 2019 19:37:29 +0000 Subject: [PATCH 017/194] Check conformance test should not call any Skip Basically conformance test checks the target k8s cluster works all features which are specified in each test and that should not depend on any condition. This adds checking that conformance test should not call any Skip methods. And it detects the existing conformance test "creating/deleting custom resource definition objects works" calls framework.SkipUnlessServerVersionGTE(). So this removes the Skip also. --- hack/conformance/check_conformance_test_requirements.go | 8 ++++---- test/e2e/apimachinery/custom_resource_definition.go | 2 -- test/e2e/common/BUILD | 1 - test/e2e/common/downward_api.go | 8 -------- test/e2e/kubectl/kubectl.go | 3 --- 5 files changed, 4 insertions(+), 18 deletions(-) diff --git a/hack/conformance/check_conformance_test_requirements.go b/hack/conformance/check_conformance_test_requirements.go index a0519483b9d..30897f1a151 100755 --- a/hack/conformance/check_conformance_test_requirements.go +++ b/hack/conformance/check_conformance_test_requirements.go @@ -34,7 +34,7 @@ const ( //e.g. framework.ConformanceIt("should provide secure master service ", func() { patternStartConformance = "framework.ConformanceIt\\(.*, func\\(\\) {$" patternEndConformance = "}\\)$" - patternSkipProviderIs = "Skip.*ProviderIs\\(" + patternSkip = "framework.Skip.*\\(" ) // This function checks the requirement: it works for all providers (e.g., no SkipIfProviderIs/SkipUnlessProviderIs calls) @@ -44,7 +44,7 @@ func checkAllProviders(e2eFile string) error { regStartConformance := regexp.MustCompile(patternStartConformance) regEndConformance := regexp.MustCompile(patternEndConformance) - regSkipProviderIs := regexp.MustCompile(patternSkipProviderIs) + regSkip := regexp.MustCompile(patternSkip) fileInput, err := ioutil.ReadFile(e2eFile) if err != nil { @@ -62,9 +62,9 @@ func checkAllProviders(e2eFile string) error { inConformanceCode = true } if inConformanceCode { - if regSkipProviderIs.MatchString(line) { + if regSkip.MatchString(line) { // To list all invalid places in a single operation of this tool, here doesn't return error and continues checking. - fmt.Fprintf(os.Stderr, "%v: Conformance test should not call SkipIfProviderIs()/SkipUnlessProviderIs()\n", e2eFile) + fmt.Fprintf(os.Stderr, "%v: Conformance test should not call any framework.Skip*()\n", e2eFile) checkFailed = true } if regEndConformance.MatchString(line) { diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index d3bc9d040a1..fd2aa81ac2f 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -40,8 +40,6 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() { */ framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() { - framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery()) - config, err := framework.LoadConfig() if err != nil { framework.Failf("failed to load config: %v", err) diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index e11f5b83cfc..60e89b72a3c 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -69,7 +69,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go index d1169ed21f8..a2c51d8c953 100644 --- a/test/e2e/common/downward_api.go +++ b/test/e2e/common/downward_api.go @@ -23,18 +23,12 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" ) -var ( - hostIPVersion = utilversion.MustParseSemantic("v1.8.0") - podUIDVersion = utilversion.MustParseSemantic("v1.8.0") -) - var _ = Describe("[sig-node] Downward API", func() { f := framework.NewDefaultFramework("downward-api") @@ -90,7 +84,6 @@ var _ = Describe("[sig-node] Downward API", func() { Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container. */ framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() { - framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery()) podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -218,7 +211,6 @@ var _ = Describe("[sig-node] Downward API", func() { Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container. */ framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() { - framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery()) podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 1a3ca537c6b..6f4e33608eb 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -944,9 +944,6 @@ metadata: Description: Deploy a redis controller and a redis service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information. */ framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() { - kv, err := framework.KubectlVersion() - framework.ExpectNoError(err) - framework.SkipUnlessServerVersionGTE(kv, c.Discovery()) controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) serviceJSON := readTestFileOrDie(redisServiceFilename) From 6732906d3bf27fbe32624747f95a25155ecaf24b Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Fri, 3 May 2019 15:46:44 -0400 Subject: [PATCH 018/194] Implement UpdateContainerResources in FakeRuntimeService Signed-off-by: Aldo Culquicondor --- .../k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go index 1bd4ab2bde6..3efb6f04a9a 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go @@ -407,6 +407,10 @@ func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.Co } func (r *FakeRuntimeService) UpdateContainerResources(string, *runtimeapi.LinuxContainerResources) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "UpdateContainerResources") return nil } From f83bac61a4493f4326e9d2db5783f95b3add498d Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Fri, 3 May 2019 14:07:44 -0700 Subject: [PATCH 019/194] Obtain unsorted slice in cpuAccumulator#freeCores --- pkg/kubelet/cm/cpumanager/cpu_assignment.go | 2 +- pkg/kubelet/cm/cpuset/cpuset.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index be6babab14c..d9cf909fd22 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -69,7 +69,7 @@ func (a *cpuAccumulator) freeSockets() []int { // - socket ID, ascending // - core ID, ascending func (a *cpuAccumulator) freeCores() []int { - socketIDs := a.details.Sockets().ToSlice() + socketIDs := a.details.Sockets().ToSliceNoSort() sort.Slice(socketIDs, func(i, j int) bool { iCores := a.details.CoresInSocket(socketIDs[i]).Filter(a.isCoreFree) diff --git a/pkg/kubelet/cm/cpuset/cpuset.go b/pkg/kubelet/cm/cpuset/cpuset.go index d87efc78593..e49a54d2452 100644 --- a/pkg/kubelet/cm/cpuset/cpuset.go +++ b/pkg/kubelet/cm/cpuset/cpuset.go @@ -172,6 +172,16 @@ func (s CPUSet) ToSlice() []int { return result } +// ToSliceNoSort returns a slice of integers that contains all elements from +// this set. +func (s CPUSet) ToSliceNoSort() []int { + result := []int{} + for cpu := range s.elems { + result = append(result, cpu) + } + return result +} + // String returns a new string representation of the elements in this CPU set // in canonical linux CPU list format. // From e256c15df3ae38791d83291283c00198fb830884 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 4 May 2019 16:54:02 -0400 Subject: [PATCH 020/194] Make metav1beta1 be equivalent to metav1 internally All except PartialObjectMetadataList are identical and should be made that way internally. This allows the internalversion to convert between them. --- .../pkg/apis/meta/internalversion/register.go | 18 +- .../apimachinery/pkg/apis/meta/v1/register.go | 15 +- .../apimachinery/pkg/apis/meta/v1/types.go | 2 +- .../pkg/apis/meta/v1beta1/deepcopy.go | 27 -- .../pkg/apis/meta/v1beta1/generated.pb.go | 314 ++---------------- .../pkg/apis/meta/v1beta1/generated.proto | 24 +- .../pkg/apis/meta/v1beta1/register.go | 4 +- .../pkg/apis/meta/v1beta1/types.go | 141 ++------ .../v1beta1/types_swagger_doc_generated.go | 67 +--- .../pkg/apis/meta/v1beta1/validation/BUILD | 1 + .../meta/v1beta1/validation/validation.go | 3 +- .../meta/v1beta1/zz_generated.deepcopy.go | 133 +------- 12 files changed, 88 insertions(+), 661 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index 158655caad1..d0149810b3c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -89,18 +89,12 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &metav1beta1.PartialObjectMetadata{}, &metav1beta1.PartialObjectMetadataList{}, ) - scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, - &metav1beta1.Table{}, - &metav1beta1.TableOptions{}, - &metav1beta1.PartialObjectMetadata{}, - &metav1beta1.PartialObjectMetadataList{}, - ) - scheme.AddKnownTypes(metav1.SchemeGroupVersion, - &metav1.Table{}, - &metav1.TableOptions{}, - &metav1.PartialObjectMetadata{}, - &metav1.PartialObjectMetadataList{}, - ) + if err := metav1beta1.AddMetaToScheme(scheme); err != nil { + return err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return err + } // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}, diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 4610eed6462..24fc134150f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -94,6 +94,15 @@ func init() { &PatchOptions{}, ) + if err := AddMetaToScheme(scheme); err != nil { + panic(err) + } + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterDefaults(scheme)) +} + +func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, &TableOptions{}, @@ -101,6 +110,10 @@ func init() { &PartialObjectMetadataList{}, ) + return scheme.AddConversionFuncs( + Convert_Slice_string_To_v1_IncludeObjectPolicy, + ) + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - utilruntime.Must(RegisterDefaults(scheme)) + //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index d3547940024..a6fe80cc3c3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1150,8 +1150,8 @@ type Fields struct { // Table is a tabular representation of a set of API resources. The server transforms the // object into a set of preferred columns for quickly reviewing the objects. -// +protobuf=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false type Table struct { TypeMeta `json:",inline"` // Standard list metadata. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 3b2bedd9233..2b7e8ca0bfb 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -15,30 +15,3 @@ limitations under the License. */ package v1beta1 - -import "k8s.io/apimachinery/pkg/runtime" - -func (in *TableRow) DeepCopy() *TableRow { - if in == nil { - return nil - } - - out := new(TableRow) - - if in.Cells != nil { - out.Cells = make([]interface{}, len(in.Cells)) - for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) - } - } - - if in.Conditions != nil { - out.Conditions = make([]TableRowCondition, len(in.Conditions)) - for i := range in.Conditions { - in.Conditions[i].DeepCopyInto(&out.Conditions[i]) - } - } - - in.Object.DeepCopyInto(&out.Object) - return out -} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 2c242fef71a..557dfa2c731 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -24,9 +24,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto It has these top-level messages: - PartialObjectMetadata PartialObjectMetadataList - TableOptions */ package v1beta1 @@ -34,6 +32,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -50,51 +50,15 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptorGenerated, []int{0} } -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - func init() { - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") } -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -125,33 +89,11 @@ func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + n1, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 - return i, nil -} - -func (m *TableOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) + i += n1 return i, nil } @@ -164,14 +106,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *PartialObjectMetadata) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *PartialObjectMetadataList) Size() (n int) { var l int _ = l @@ -186,14 +120,6 @@ func (m *PartialObjectMetadataList) Size() (n int) { return n } -func (m *TableOptions) Size() (n int) { - var l int - _ = l - l = len(m.IncludeObject) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -207,37 +133,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, + `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata", 1) + `,`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *TableOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -246,86 +152,6 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -381,7 +207,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &PartialObjectMetadata{}) + m.Items = append(m.Items, &k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -437,85 +263,6 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } return nil } -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -626,31 +373,26 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 402 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x33, 0xca, 0xe2, 0xee, 0xac, 0x0b, 0x12, 0x11, 0xd6, 0x1e, 0x26, 0xcb, 0x9e, 0x2a, - 0xd8, 0x19, 0x5b, 0x44, 0x3c, 0x4a, 0x6e, 0x05, 0xa5, 0x25, 0x78, 0x12, 0x0f, 0x4e, 0x92, 0xd7, - 0x74, 0xcc, 0x9f, 0x09, 0x99, 0x49, 0xa1, 0x37, 0x3f, 0x82, 0x1f, 0xab, 0xc7, 0x1e, 0x7b, 0x90, - 0x60, 0xe3, 0xb7, 0xf0, 0x24, 0xf9, 0xa3, 0x4d, 0x6b, 0x65, 0x73, 0x9b, 0xf7, 0x79, 0x79, 0x7e, - 0x79, 0x9e, 0x37, 0xd8, 0x09, 0x5f, 0x2b, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, - 0xb6, 0x84, 0xc4, 0x97, 0x19, 0x6b, 0x17, 0x3c, 0x15, 0x31, 0xf7, 0x16, 0x22, 0x81, 0x6c, 0xc5, - 0xd2, 0x30, 0xa8, 0x04, 0xc5, 0x62, 0xd0, 0x9c, 0x2d, 0xc7, 0x2e, 0x68, 0x3e, 0x66, 0x01, 0x24, - 0x90, 0x71, 0x0d, 0x3e, 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xac, 0xb1, 0xd2, 0xae, 0x95, 0xa6, 0x61, - 0x50, 0x09, 0x8a, 0x56, 0x56, 0xda, 0x5a, 0x07, 0xa3, 0x40, 0xe8, 0x45, 0xee, 0x52, 0x4f, 0xc6, - 0x2c, 0x90, 0x81, 0x64, 0x35, 0xc1, 0xcd, 0x3f, 0xd7, 0x53, 0x3d, 0xd4, 0xaf, 0x86, 0x3c, 0x78, - 0xd9, 0x27, 0xd4, 0x71, 0x9e, 0xc1, 0x7f, 0xab, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0xff, 0x18, 0x5e, - 0xdd, 0x65, 0x50, 0xde, 0x02, 0x62, 0x7e, 0xec, 0xbb, 0x5d, 0xe1, 0x27, 0x73, 0x9e, 0x69, 0xc1, - 0xa3, 0x99, 0xfb, 0x05, 0x3c, 0xfd, 0x0e, 0x34, 0xf7, 0xb9, 0xe6, 0xe6, 0x27, 0x7c, 0x1e, 0xb7, - 0xef, 0x6b, 0x74, 0x83, 0x86, 0x97, 0x93, 0x17, 0xb4, 0xcf, 0x91, 0xe8, 0x9e, 0x63, 0x9b, 0xeb, - 0xc2, 0x32, 0xca, 0xc2, 0xc2, 0x7b, 0xcd, 0xf9, 0x4b, 0xbd, 0xfd, 0x8e, 0xf0, 0xd3, 0x93, 0xdf, - 0x7e, 0x2b, 0x94, 0x36, 0x39, 0x3e, 0x13, 0x1a, 0x62, 0x75, 0x8d, 0x6e, 0xee, 0x0f, 0x2f, 0x27, - 0x6f, 0x68, 0xef, 0x3f, 0x44, 0x4f, 0x42, 0xed, 0x8b, 0xb2, 0xb0, 0xce, 0xa6, 0x15, 0xd2, 0x69, - 0xc8, 0xe6, 0xc7, 0x4e, 0xc5, 0x7b, 0x75, 0x45, 0xda, 0xaf, 0x62, 0x15, 0xb0, 0x2e, 0xf8, 0xa8, - 0x2d, 0x78, 0xfe, 0x47, 0xe9, 0xd4, 0x73, 0xf1, 0xc3, 0xf7, 0xdc, 0x8d, 0x60, 0x96, 0x6a, 0x21, - 0x13, 0x65, 0x3a, 0xf8, 0x4a, 0x24, 0x5e, 0x94, 0xfb, 0xd0, 0x04, 0xab, 0xaf, 0x7a, 0x61, 0x3f, - 0x6f, 0x11, 0x57, 0xd3, 0xee, 0xf2, 0x57, 0x61, 0x3d, 0x3e, 0x10, 0xe6, 0x32, 0x12, 0xde, 0xca, - 0x39, 0x44, 0xd8, 0xa3, 0xf5, 0x8e, 0x18, 0x9b, 0x1d, 0x31, 0xb6, 0x3b, 0x62, 0x7c, 0x2d, 0x09, - 0x5a, 0x97, 0x04, 0x6d, 0x4a, 0x82, 0xb6, 0x25, 0x41, 0x3f, 0x4a, 0x82, 0xbe, 0xfd, 0x24, 0xc6, - 0x87, 0x07, 0xed, 0x61, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x78, 0xd8, 0x63, 0x3a, 0x03, + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, + 0x14, 0x86, 0xe3, 0x7b, 0x55, 0x51, 0xd2, 0x05, 0x75, 0x2a, 0x1d, 0xdc, 0x8a, 0xa9, 0x0c, 0xb5, + 0xd5, 0x0a, 0x21, 0x24, 0xb6, 0x6e, 0x48, 0x20, 0x50, 0x47, 0xd4, 0xc5, 0x49, 0x0f, 0xa9, 0x09, + 0x8e, 0x23, 0xfb, 0xa4, 0x12, 0x1b, 0x8f, 0xc0, 0x63, 0x75, 0xec, 0x46, 0xa7, 0x8a, 0x9a, 0x17, + 0x41, 0x49, 0x03, 0x42, 0x05, 0x44, 0xb6, 0x9c, 0xff, 0xe8, 0xfb, 0xf2, 0xdb, 0xf6, 0xc7, 0xf1, + 0x99, 0x65, 0x52, 0xf3, 0x38, 0x0b, 0xc0, 0x24, 0x80, 0x60, 0xf9, 0x1c, 0x92, 0xa9, 0x36, 0xbc, + 0x5c, 0x88, 0x54, 0x2a, 0x11, 0xce, 0x64, 0x02, 0xe6, 0x91, 0xa7, 0x71, 0x94, 0x07, 0x96, 0x2b, + 0x40, 0xc1, 0xe7, 0x83, 0x00, 0x50, 0x0c, 0x78, 0x04, 0x09, 0x18, 0x81, 0x30, 0x65, 0xa9, 0xd1, + 0xa8, 0x9b, 0xc7, 0x5b, 0x94, 0x7d, 0x45, 0x59, 0x1a, 0x47, 0x79, 0x60, 0x59, 0x8e, 0xb2, 0x12, + 0x6d, 0xf7, 0x23, 0x89, 0xb3, 0x2c, 0x60, 0xa1, 0x56, 0x3c, 0xd2, 0x91, 0xe6, 0x85, 0x21, 0xc8, + 0xee, 0x8a, 0xa9, 0x18, 0x8a, 0xaf, 0xad, 0xb9, 0x7d, 0x52, 0xa5, 0xd4, 0x6e, 0x9f, 0xf6, 0xaf, + 0x47, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf4, 0x2f, 0xc0, 0x86, 0x33, 0x50, 0x62, 0x97, + 0x3b, 0x7a, 0x21, 0xfe, 0xe1, 0x8d, 0x30, 0x28, 0xc5, 0xc3, 0x75, 0x70, 0x0f, 0x21, 0x5e, 0x01, + 0x8a, 0xa9, 0x40, 0x71, 0x29, 0x2d, 0x36, 0x27, 0x7e, 0x4d, 0x22, 0x28, 0xdb, 0x22, 0xdd, 0xff, + 0xbd, 0xc6, 0xf0, 0x9c, 0x55, 0xb9, 0x26, 0xf6, 0xa3, 0x6f, 0xb4, 0xef, 0xd6, 0x9d, 0xda, 0x45, + 0x6e, 0x1b, 0x6f, 0xa5, 0xcd, 0x89, 0x5f, 0x57, 0xe5, 0xb6, 0xf5, 0xaf, 0x4b, 0x7a, 0x8d, 0x21, + 0xab, 0xf6, 0x83, 0xbc, 0x5b, 0xee, 0x1d, 0x1d, 0x2c, 0xd6, 0x1d, 0xcf, 0xad, 0x3b, 0xf5, 0x8f, + 0x64, 0xfc, 0x69, 0x1c, 0xf5, 0x17, 0x1b, 0xea, 0x2d, 0x37, 0xd4, 0x5b, 0x6d, 0xa8, 0xf7, 0xe4, + 0x28, 0x59, 0x38, 0x4a, 0x96, 0x8e, 0x92, 0x95, 0xa3, 0xe4, 0xd5, 0x51, 0xf2, 0xfc, 0x46, 0xbd, + 0xdb, 0xbd, 0xf2, 0x59, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xce, 0xfa, 0x86, 0x29, 0x56, 0x02, 0x00, 0x00, } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 5097ad350da..6339e719ad3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -28,17 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadata { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; -} - -// PartialObjectMetadataList contains a list of objects containing only their metadata +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message PartialObjectMetadataList { // Standard list metadata. @@ -47,16 +37,6 @@ message PartialObjectMetadataList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated PartialObjectMetadata items = 1; -} - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message TableOptions { - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - optional string includeObject = 1; + repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index 6d348fe14f4..108a0764e72 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -39,12 +39,12 @@ var scheme = runtime.NewScheme() var ParameterCodec = runtime.NewParameterCodec(scheme) func init() { - if err := AddToScheme(scheme); err != nil { + if err := AddMetaToScheme(scheme); err != nil { panic(err) } } -func AddToScheme(scheme *runtime.Scheme) error { +func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, &TableOptions{}, diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 2750f65f829..8b7f6bd4f54 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -19,143 +19,46 @@ package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. - // Table is a tabular representation of a set of API resources. The server transforms the // object into a set of preferred columns for quickly reviewing the objects. -// +protobuf=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Table struct { - v1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - v1.ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} +// +protobuf=false +type Table = v1.Table // TableColumnDefinition contains information about a column returned in the Table. // +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} +type TableColumnDefinition = v1.TableColumnDefinition // TableRow is an individual row in a table. // +protobuf=false -type TableRow struct { - // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple - // maps, or lists, or null. See the type field of the column definition for a more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} +type TableRow = v1.TableRow // TableRowCondition allows a row to be marked with additional information. // +protobuf=false -type TableRowCondition struct { - // Type of row condition. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} +type TableRowCondition = v1.TableRowCondition -type RowConditionType string +type RowConditionType = v1.RowConditionType -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) +type ConditionStatus = v1.ConditionStatus -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) +type IncludeObjectPolicy = v1.IncludeObjectPolicy // TableOptions are used when a Table is requested by the caller. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - v1.TypeMeta `json:",inline"` - - // NoHeaders is only exposed for internal callers. - NoHeaders bool `json:"-"` - - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} +type TableOptions = v1.TableOptions // PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients // to get access to a particular ObjectMeta schema without knowing the details of the version. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - v1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} +type PartialObjectMetadata = v1.PartialObjectMetadata -// PartialObjectMetadataList contains a list of objects containing only their metadata +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. + +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PartialObjectMetadataList struct { v1.TypeMeta `json:",inline"` @@ -165,5 +68,17 @@ type PartialObjectMetadataList struct { v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` // items contains each of the included items. - Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` + Items []*v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` } + +const ( + RowCompleted = v1.RowCompleted + + ConditionTrue = v1.ConditionTrue + ConditionFalse = v1.ConditionFalse + ConditionUnknown = v1.ConditionUnknown + + IncludeNone = v1.IncludeNone + IncludeMetadata = v1.IncludeMetadata + IncludeObject = v1.IncludeObject +) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index d5bb86e84d1..26d13f5d91c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -27,17 +27,8 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "items contains each of the included items.", } @@ -46,60 +37,4 @@ func (PartialObjectMetadataList) SwaggerDoc() map[string]string { return map_PartialObjectMetadataList } -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/BUILD index f2515b972f0..cbf11f06dec 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/BUILD @@ -7,6 +7,7 @@ go_library( importpath = "k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation", visibility = ["//visibility:public"], deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go index 28256349018..70fc01a34eb 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go @@ -17,6 +17,7 @@ limitations under the License. package validation import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -25,7 +26,7 @@ import ( func ValidateTableOptions(opts *metav1beta1.TableOptions) field.ErrorList { var allErrs field.ErrorList switch opts.IncludeObject { - case metav1beta1.IncludeMetadata, metav1beta1.IncludeNone, metav1beta1.IncludeObject, "": + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": default: allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 74c370149f2..9c21e91f13f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -21,35 +21,10 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { *out = *in @@ -57,11 +32,11 @@ func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*PartialObjectMetadata, len(*in)) + *out = make([]*v1.PartialObjectMetadata, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(PartialObjectMetadata) + *out = new(v1.PartialObjectMetadata) (*in).DeepCopyInto(*out) } } @@ -86,105 +61,3 @@ func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} From fad9dec758be4dcc49735aea98ada2de46cff9fe Mon Sep 17 00:00:00 2001 From: Mike Spreitzer Date: Sat, 4 May 2019 23:27:42 -0400 Subject: [PATCH 021/194] Fix comment on SharedInformer.Run The old wording suggested that `Run` only gets the controller started. Changed the wording to make it clear that `Run` only returns after the controller is stopped. --- staging/src/k8s.io/client-go/tools/cache/shared_informer.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/client-go/tools/cache/shared_informer.go b/staging/src/k8s.io/client-go/tools/cache/shared_informer.go index 9715d344ecc..5bc1b0ff9b7 100644 --- a/staging/src/k8s.io/client-go/tools/cache/shared_informer.go +++ b/staging/src/k8s.io/client-go/tools/cache/shared_informer.go @@ -53,7 +53,8 @@ type SharedInformer interface { GetStore() Store // GetController gives back a synthetic interface that "votes" to start the informer GetController() Controller - // Run starts the shared informer, which will be stopped when stopCh is closed. + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) // HasSynced returns true if the shared informer's store has synced. HasSynced() bool From a01f0b4e5eac9e3bb6b5e024f92df15bffe06c24 Mon Sep 17 00:00:00 2001 From: caiweidong Date: Fri, 26 Apr 2019 13:09:20 +0800 Subject: [PATCH 022/194] Bugfix: fix channel leak when stop error --- pkg/kubelet/util/pluginwatcher/plugin_watcher.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go index fbe8aa69d4a..f1c6293a144 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -161,14 +161,17 @@ func (w *Watcher) Stop() error { close(w.stopCh) c := make(chan struct{}) + var once sync.Once + closeFunc := func() { close(c) } go func() { - defer close(c) + defer once.Do(closeFunc) w.wg.Wait() }() select { case <-c: case <-time.After(11 * time.Second): + once.Do(closeFunc) return fmt.Errorf("timeout on stopping watcher") } From a019f1791929855f3e988665e25f87c76c770688 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 6 May 2019 11:30:57 +0800 Subject: [PATCH 023/194] Add etag support to Azure loadbalancer, route and routetable clients --- .../azure/azure_client.go | 131 ++++++++++++++++-- 1 file changed, 118 insertions(+), 13 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go index 8d7130eac02..defe13c913a 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go @@ -32,6 +32,11 @@ import ( "k8s.io/client-go/util/flowcontrol" ) +const ( + // The version number is taken from "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network". + azureNetworkAPIVersion = "2017-09-01" +) + // Helpers for rate limiting error/error channel creation func createRateLimitErr(isWrite bool, opName string) error { opType := "read" @@ -57,7 +62,7 @@ type InterfacesClient interface { // LoadBalancersClient defines needed functions for azure network.LoadBalancersClient type LoadBalancersClient interface { - CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) + CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) (resp *http.Response, err error) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) List(ctx context.Context, resourceGroupName string) (result []network.LoadBalancer, err error) @@ -103,13 +108,13 @@ type VirtualMachineScaleSetVMsClient interface { // RoutesClient defines needed functions for azure network.RoutesClient type RoutesClient interface { - CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) + CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) (resp *http.Response, err error) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) } // RouteTablesClient defines needed functions for azure network.RouteTablesClient type RouteTablesClient interface { - CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) + CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) (resp *http.Response, err error) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) } @@ -356,7 +361,7 @@ func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient { } } -func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) { +func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) (resp *http.Response, err error) { /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { err = createRateLimitErr(true, "LBCreateOrUpdate") @@ -369,9 +374,15 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro }() mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) - future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, loadBalancerName, parameters) - mc.Observe(err) + req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, loadBalancerName, parameters, etag) if err != nil { + mc.Observe(err) + return nil, err + } + + future, err := az.client.CreateOrUpdateSender(req) + if err != nil { + mc.Observe(err) return future.Response(), err } @@ -380,6 +391,33 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro return future.Response(), err } +// createOrUpdatePreparer prepares the CreateOrUpdate request. +func (az *azLoadBalancersClient) createOrUpdatePreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", az.client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": azureNetworkAPIVersion, + } + + preparerDecorators := []autorest.PrepareDecorator{ + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(az.client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters), + } + if etag != "" { + preparerDecorators = append(preparerDecorators, autorest.WithHeader("If-Match", autorest.String(etag))) + } + preparer := autorest.CreatePreparer(preparerDecorators...) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) { /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { @@ -752,9 +790,8 @@ func (az *azSecurityGroupsClient) createOrUpdatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", az.client.SubscriptionID), } - const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": azureNetworkAPIVersion, } preparerDecorators := []autorest.PrepareDecorator{ @@ -1051,7 +1088,7 @@ func newAzRoutesClient(config *azClientConfig) *azRoutesClient { } } -func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) { +func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) (resp *http.Response, err error) { /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { err = createRateLimitErr(true, "RouteCreateOrUpdate") @@ -1064,7 +1101,13 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName }() mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) - future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, routeTableName, routeName, routeParameters) + req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag) + if err != nil { + mc.Observe(err) + return nil, err + } + + future, err := az.client.CreateOrUpdateSender(req) if err != nil { mc.Observe(err) return future.Response(), err @@ -1075,6 +1118,35 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName return future.Response(), err } +// createOrUpdatePreparer prepares the CreateOrUpdate request. +func (az *azRoutesClient) createOrUpdatePreparer(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", az.client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": azureNetworkAPIVersion, + } + + preparerDecorators := []autorest.PrepareDecorator{ + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(az.client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithJSON(routeParameters), + autorest.WithQueryParameters(queryParameters), + } + if etag != "" { + preparerDecorators = append(preparerDecorators, autorest.WithHeader("If-Match", autorest.String(etag))) + } + preparer := autorest.CreatePreparer(preparerDecorators...) + + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) { /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { @@ -1124,7 +1196,7 @@ func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient { } } -func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) { +func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) (resp *http.Response, err error) { /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { err = createRateLimitErr(true, "RouteTableCreateOrUpdate") @@ -1137,9 +1209,15 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup }() mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) - future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters) - mc.Observe(err) + req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, parameters, etag) if err != nil { + mc.Observe(err) + return nil, err + } + + future, err := az.client.CreateOrUpdateSender(req) + if err != nil { + mc.Observe(err) return future.Response(), err } @@ -1148,6 +1226,33 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup return future.Response(), err } +// createOrUpdatePreparer prepares the CreateOrUpdate request. +func (az *azRouteTablesClient) createOrUpdatePreparer(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", az.client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": azureNetworkAPIVersion, + } + preparerDecorators := []autorest.PrepareDecorator{ + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(az.client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters), + } + if etag != "" { + preparerDecorators = append(preparerDecorators, autorest.WithHeader("If-Match", autorest.String(etag))) + } + preparer := autorest.CreatePreparer(preparerDecorators...) + + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { if !az.rateLimiterReader.TryAccept() { err = createRateLimitErr(false, "GetRouteTable") From 831939b65e7f4374501375eb62300cb35ee8076d Mon Sep 17 00:00:00 2001 From: Mayank Gaikwad <8110509+mgdevstack@users.noreply.github.com> Date: Mon, 6 May 2019 16:46:28 +0530 Subject: [PATCH 024/194] Add subdomain to verify dns resolution by subdomain --- test/e2e/network/dns.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index beacaedf03f..87543b73118 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -274,7 +274,8 @@ var _ = SIGDescribe("DNS", func() { }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - namesToResolve := []string{hostFQDN} + subdomain := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) + namesToResolve := []string{hostFQDN, subdomain} wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") From 53bace16dfed1da015103376eca7471095dbfefb Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Thu, 2 May 2019 12:02:22 -0700 Subject: [PATCH 025/194] Updated gce node-termination-handler yaml. --- cluster/gce/addons/node-termination-handler/daemonset.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/gce/addons/node-termination-handler/daemonset.yaml b/cluster/gce/addons/node-termination-handler/daemonset.yaml index 4430e929572..789a1ba41ae 100644 --- a/cluster/gce/addons/node-termination-handler/daemonset.yaml +++ b/cluster/gce/addons/node-termination-handler/daemonset.yaml @@ -48,10 +48,10 @@ spec: - effect: NoExecute operator: Exists containers: - - image: k8s.gcr.io/gke-node-termination-handler@sha256:e08ca863a547754fa7b75064bdad04f04cbef86c7b0a181ecc7304e747623181 + - image: k8s.gcr.io/gke-node-termination-handler@sha256:aca12d17b222dfed755e28a44d92721e477915fb73211d0a0f8925a1fa847cca name: node-termination-handler command: ["./node-termination-handler"] - args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--annotation=cloud.google.com/impending-node-termination"] + args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--taint=cloud.google.com/impending-node-termination::NoSchedule"] securityContext: capabilities: # Necessary to reboot node From beca6c7c02e7e450e2dc07e4e7b1da6b1c2f2720 Mon Sep 17 00:00:00 2001 From: s-ito-ts Date: Fri, 12 Apr 2019 00:12:10 +0000 Subject: [PATCH 026/194] fix shellcheck failures in /hack/verify-[a-g]*.sh. --- hack/.shellcheck_failures | 1 - hack/verify-golint.sh | 16 +++++++--------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 06f051c65fc..1e9e6d22723 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -29,7 +29,6 @@ ./hack/pin-dependency.sh ./hack/test-integration.sh ./hack/update-vendor.sh -./hack/verify-golint.sh ./hack/verify-test-featuregates.sh ./test/cmd/apply.sh ./test/cmd/apps.sh diff --git a/hack/verify-golint.sh b/hack/verify-golint.sh index 79508a9adb3..10aaf9bafc3 100755 --- a/hack/verify-golint.sh +++ b/hack/verify-golint.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" source "${KUBE_ROOT}/hack/lib/util.sh" @@ -44,12 +44,10 @@ export IFS=$'\n' # NOTE: when "go list -e ./..." is run within GOPATH, it turns the k8s.io/kubernetes # as the prefix, however if we run it outside it returns the full path of the file # with a leading underscore. We'll need to support both scenarios for all_packages. -all_packages=( - $(go list -e ./... | egrep -v "/(third_party|vendor|staging/src/k8s.io/client-go/pkg|generated|clientset_generated)" | sed -e 's|^k8s.io/kubernetes/||' -e "s|^_\(${KUBE_ROOT}/\)\{0,1\}||") -) -failing_packages=( - $(cat $failure_file) -) +all_packages=() +while IFS='' read -r line; do all_packages+=("$line"); done < <(go list -e ./... | grep -vE "/(third_party|vendor|staging/src/k8s.io/client-go/pkg|generated|clientset_generated)" | sed -e 's|^k8s.io/kubernetes/||' -e "s|^_\(${KUBE_ROOT}/\)\{0,1\}||") +failing_packages=() +while IFS='' read -r line; do failing_packages+=("$line"); done < <(cat "$failure_file") unset IFS errors=() not_failing=() @@ -63,13 +61,13 @@ for p in "${all_packages[@]}"; do # completely. # Ref: https://github.com/kubernetes/kubernetes/pull/67675 # Ref: https://github.com/golang/lint/issues/68 - failedLint=$(ls "$p"/*.go | egrep -v "(zz_generated.*.go|generated.pb.go|generated.proto|types_swagger_doc_generated.go)" | xargs -L1 golint 2>/dev/null) + failedLint=$(find "$p"/*.go | grep -vE "(zz_generated.*.go|generated.pb.go|generated.proto|types_swagger_doc_generated.go)" | xargs -L1 golint 2>/dev/null) kube::util::array_contains "$p" "${failing_packages[@]}" && in_failing=$? || in_failing=$? if [[ -n "${failedLint}" ]] && [[ "${in_failing}" -ne "0" ]]; then errors+=( "${failedLint}" ) fi if [[ -z "${failedLint}" ]] && [[ "${in_failing}" -eq "0" ]]; then - not_failing+=( $p ) + not_failing+=( "$p" ) fi done From 8afecb8c306bb0587c820d027069f4e802fa5693 Mon Sep 17 00:00:00 2001 From: toyoda Date: Wed, 24 Apr 2019 20:31:17 +0900 Subject: [PATCH 027/194] add pod status check after job completes in job e2e test --- test/e2e/apps/job.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 565147f4e2e..28e75cff1b4 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -47,6 +47,14 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Ensuring job reaches completions") err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) + + ginkgo.By("Ensuring pods for job exist") + pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) + gomega.Expect(len(pods.Items)).To(gomega.Equal(int(completions)), "failed to ensure sufficient pod for job: got %d, want %d", len(pods.Items), completions) + for _, pod := range pods.Items { + gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "failed to ensure pod status: pod %s status %s", pod.Name, pod.Status.Phase) + } }) // Pods sometimes fail, but eventually succeed. From 9a5655c91500b74bb03a2228e8a003f22c465d8e Mon Sep 17 00:00:00 2001 From: toyoda Date: Thu, 11 Apr 2019 14:53:57 +0900 Subject: [PATCH 028/194] fix shellcheck failures in /hack/make-rules/update.sh,verify.sh --- hack/.shellcheck_failures | 2 -- hack/make-rules/update.sh | 12 ++++-------- hack/make-rules/verify.sh | 32 +++++++++++++++++--------------- 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 06f051c65fc..283753f9f05 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -24,8 +24,6 @@ ./hack/lib/version.sh ./hack/make-rules/make-help.sh ./hack/make-rules/test.sh -./hack/make-rules/update.sh -./hack/make-rules/verify.sh ./hack/pin-dependency.sh ./hack/test-integration.sh ./hack/update-vendor.sh diff --git a/hack/make-rules/update.sh b/hack/make-rules/update.sh index a1f8787cbab..5b45b2c6f69 100755 --- a/hack/make-rules/update.sh +++ b/hack/make-rules/update.sh @@ -19,7 +19,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. source "${KUBE_ROOT}/hack/lib/init.sh" # If called directly, exit. @@ -32,10 +32,6 @@ fi SILENT=${SILENT:-true} ALL=${FORCE_ALL:-false} -V="" -if [[ "${SILENT}" != "true" ]]; then - V="-v" -fi trap 'exit 1' SIGINT @@ -59,10 +55,10 @@ BASH_TARGETS=" update-gofmt" for t in ${BASH_TARGETS}; do - echo -e "${color_yellow}Running ${t}${color_norm}" + echo -e "${color_yellow:?}Running ${t}${color_norm:?}" if ${SILENT} ; then if ! bash "${KUBE_ROOT}/hack/${t}.sh" 1> /dev/null; then - echo -e "${color_red}Running ${t} FAILED${color_norm}" + echo -e "${color_red:?}Running ${t} FAILED${color_norm}" if ! ${ALL}; then exit 1 fi @@ -77,4 +73,4 @@ for t in ${BASH_TARGETS}; do fi done -echo -e "${color_green}Update scripts completed successfully${color_norm}" +echo -e "${color_green:?}Update scripts completed successfully${color_norm}" diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh index 7e92b4f8187..9418ce8cf7d 100755 --- a/hack/make-rules/verify.sh +++ b/hack/make-rules/verify.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. source "${KUBE_ROOT}/hack/lib/util.sh" # If KUBE_JUNIT_REPORT_DIR is unset, and ARTIFACTS is set, then have them match. @@ -80,13 +80,13 @@ QUICK_PATTERNS+=( "verify-test-owners.sh" ) -EXCLUDED_CHECKS=$(ls ${EXCLUDED_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true) -QUICK_CHECKS=$(ls ${QUICK_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true) +while IFS='' read -r line; do EXCLUDED_CHECKS+=("$line"); done < <(ls "${EXCLUDED_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/}" 2>/dev/null || true) +while IFS='' read -r line; do QUICK_CHECKS+=("$line"); done < <(ls "${QUICK_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/}" 2>/dev/null || true) TARGET_LIST=() IFS=" " read -r -a TARGET_LIST <<< "${WHAT:-}" function is-excluded { - for e in ${EXCLUDED_CHECKS[@]}; do + for e in "${EXCLUDED_CHECKS[@]}"; do if [[ $1 -ef "${e}" ]]; then return fi @@ -95,7 +95,7 @@ function is-excluded { } function is-quick { - for e in ${QUICK_CHECKS[@]}; do + for e in "${QUICK_CHECKS[@]}"; do if [[ $1 -ef "${e}" ]]; then return fi @@ -138,9 +138,9 @@ FAILED_TESTS=() function print-failed-tests { echo -e "========================" - echo -e "${color_red}FAILED TESTS${color_norm}" + echo -e "${color_red:?}FAILED TESTS${color_norm:?}" echo -e "========================" - for t in ${FAILED_TESTS[@]}; do + for t in "${FAILED_TESTS[@]}"; do echo -e "${color_red}${t}${color_norm}" done } @@ -150,10 +150,11 @@ function run-checks { local -r runner=$2 local t - for t in $(ls ${pattern}) + for t in ${pattern} do - local check_name="$(basename "${t}")" - if [[ ! -z ${WHAT:-} ]]; then + local check_name + check_name="$(basename "${t}")" + if [[ -n ${WHAT:-} ]]; then if ! is-explicitly-chosen "${check_name}"; then continue fi @@ -168,15 +169,16 @@ function run-checks { fi fi echo -e "Verifying ${check_name}" - local start=$(date +%s) + local start + start=$(date +%s) run-cmd "${runner}" "${t}" && tr=$? || tr=$? - local elapsed=$(($(date +%s) - ${start})) + local elapsed=$(($(date +%s) - start)) if [[ ${tr} -eq 0 ]]; then - echo -e "${color_green}SUCCESS${color_norm} ${check_name}\t${elapsed}s" + echo -e "${color_green:?}SUCCESS${color_norm} ${check_name}\t${elapsed}s" else echo -e "${color_red}FAILED${color_norm} ${check_name}\t${elapsed}s" ret=1 - FAILED_TESTS+=(${t}) + FAILED_TESTS+=("${t}") fi done } @@ -190,7 +192,7 @@ function missing-target-checks { do [[ -z "${v}" ]] && continue - FAILED_TESTS+=(${v}) + FAILED_TESTS+=("${v}") ret=1 done } From 9a8f07dd9fd71f15201563e93aba4f0190c0ae2d Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 6 May 2019 06:43:33 +0000 Subject: [PATCH 029/194] remove VM API call dep in azure disk WaitForAttach add comment add unit test for WaitForAttach fnc add unit test for WaitForAttach Func --- pkg/volume/azure_dd/BUILD | 1 + pkg/volume/azure_dd/attacher.go | 39 +++++------ pkg/volume/azure_dd/attacher_test.go | 73 +++++++++++++++++++++ pkg/volume/azure_dd/azure_common.go | 31 +++------ pkg/volume/azure_dd/azure_common_test.go | 47 +++++-------- pkg/volume/azure_dd/azure_common_windows.go | 2 +- 6 files changed, 119 insertions(+), 74 deletions(-) create mode 100644 pkg/volume/azure_dd/attacher_test.go diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index fb93b436e29..4833f5344cc 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -60,6 +60,7 @@ filegroup( go_test( name = "go_default_test", srcs = [ + "attacher_test.go", "azure_common_test.go", "azure_dd_block_test.go", "azure_dd_test.go", diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 001a8bae008..5b559078069 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -22,6 +22,7 @@ import ( "path/filepath" "runtime" "strconv" + "strings" "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" @@ -133,12 +134,14 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty } func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - return "", err + // devicePath could be a LUN number or + // "/dev/disk/azure/scsi1/lunx", "/dev/sdx" on Linux node + // "/dev/diskx" on Windows node + if strings.HasPrefix(devicePath, "/dev/") { + return devicePath, nil } - diskController, err := getDiskController(a.plugin.host) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err } @@ -146,23 +149,9 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, nodeName := types.NodeName(a.plugin.host.GetHostName()) diskName := volumeSource.DiskName - lun := int32(-1) - if runtime.GOOS != "windows" { - // on Linux, usually devicePath is like "/dev/disk/azure/scsi1/lun2", get LUN directly - lun, err = getDiskLUN(devicePath) - if err != nil { - klog.V(2).Infof("azureDisk - WaitForAttach: getDiskLUN(%s) failed with error: %v", devicePath, err) - } - } - - if lun < 0 { - klog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)", - diskName, volumeSource.DataDiskURI, nodeName, devicePath) - lun, err = diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName) - if err != nil { - return "", err - } - klog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun) + lun, err := strconv.Atoi(devicePath) + if err != nil { + return "", fmt.Errorf("parse %s failed with error: %v, diskName: %s, nodeName: %s", devicePath, err, diskName, nodeName) } exec := a.plugin.host.GetExec(a.plugin.GetPluginName()) @@ -249,6 +238,14 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str if notMnt { diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host) mountOptions := util.MountOptionFromSpec(spec, options...) + if runtime.GOOS == "windows" { + // only parse devicePath on Windows node + diskNum, err := getDiskNum(devicePath) + if err != nil { + return err + } + devicePath = diskNum + } err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions) if err != nil { if cleanErr := os.Remove(deviceMountPath); cleanErr != nil { diff --git a/pkg/volume/azure_dd/attacher_test.go b/pkg/volume/azure_dd/attacher_test.go new file mode 100644 index 00000000000..50a89f6bfff --- /dev/null +++ b/pkg/volume/azure_dd/attacher_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/volume" +) + +func createVolSpec(name string, readOnly bool) *volume.Spec { + return &volume.Spec{ + Volume: &v1.Volume{ + VolumeSource: v1.VolumeSource{ + AzureDisk: &v1.AzureDiskVolumeSource{ + DiskName: name, + ReadOnly: &readOnly, + }, + }, + }, + } +} +func TestWaitForAttach(t *testing.T) { + tests := []struct { + devicePath string + expected string + expectError bool + }{ + { + devicePath: "/dev/disk/azure/scsi1/lun0", + expected: "/dev/disk/azure/scsi1/lun0", + expectError: false, + }, + { + devicePath: "/dev/sdc", + expected: "/dev/sdc", + expectError: false, + }, + { + devicePath: "/dev/disk0", + expected: "/dev/disk0", + expectError: false, + }, + } + + attacher := azureDiskAttacher{} + spec := createVolSpec("fakedisk", false) + + for _, test := range tests { + result, err := attacher.WaitForAttach(spec, test.devicePath, nil, 3000*time.Millisecond) + assert.Equal(t, result, test.expected) + assert.Equal(t, err != nil, test.expectError, fmt.Sprintf("error msg: %v", err)) + } +} diff --git a/pkg/volume/azure_dd/azure_common.go b/pkg/volume/azure_dd/azure_common.go index ae85a7f9596..a02506b2f00 100644 --- a/pkg/volume/azure_dd/azure_common.go +++ b/pkg/volume/azure_dd/azure_common.go @@ -22,7 +22,6 @@ import ( "os" "path/filepath" "regexp" - "strconv" libstrings "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" @@ -62,7 +61,9 @@ var ( string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk)) - lunPathRE = regexp.MustCompile(`/dev/disk/azure/scsi(?:.*)/lun(.+)`) + // only for Windows node + winDiskNumRE = regexp.MustCompile(`/dev/disk(.+)`) + winDiskNumFormat = "/dev/disk%d" ) func getPath(uid types.UID, volName string, host volume.VolumeHost) string { @@ -206,24 +207,12 @@ func strFirstLetterToUpper(str string) string { return libstrings.ToUpper(string(str[0])) + str[1:] } -// getDiskLUN : deviceInfo could be a LUN number or a device path, e.g. /dev/disk/azure/scsi1/lun2 -func getDiskLUN(deviceInfo string) (int32, error) { - var diskLUN string - if len(deviceInfo) <= 2 { - diskLUN = deviceInfo - } else { - // extract the LUN num from a device path - matches := lunPathRE.FindStringSubmatch(deviceInfo) - if len(matches) == 2 { - diskLUN = matches[1] - } else { - return -1, fmt.Errorf("cannot parse deviceInfo: %s", deviceInfo) - } +// getDiskNum : extract the disk num from a device path, +// deviceInfo format could be like this: e.g. /dev/disk2 +func getDiskNum(deviceInfo string) (string, error) { + matches := winDiskNumRE.FindStringSubmatch(deviceInfo) + if len(matches) == 2 { + return matches[1], nil } - - lun, err := strconv.Atoi(diskLUN) - if err != nil { - return -1, err - } - return int32(lun), nil + return "", fmt.Errorf("cannot parse deviceInfo: %s, correct format: /dev/disk?", deviceInfo) } diff --git a/pkg/volume/azure_dd/azure_common_test.go b/pkg/volume/azure_dd/azure_common_test.go index ad074923018..0d1ca64ccdb 100644 --- a/pkg/volume/azure_dd/azure_common_test.go +++ b/pkg/volume/azure_dd/azure_common_test.go @@ -183,57 +183,42 @@ func TestNormalizeStorageAccountType(t *testing.T) { } } -func TestGetDiskLUN(t *testing.T) { +func TestGetDiskNum(t *testing.T) { tests := []struct { deviceInfo string - expectedLUN int32 + expectedNum string expectError bool }{ { - deviceInfo: "0", - expectedLUN: 0, + deviceInfo: "/dev/disk0", + expectedNum: "0", expectError: false, }, { - deviceInfo: "10", - expectedLUN: 10, + deviceInfo: "/dev/disk99", + expectedNum: "99", expectError: false, }, { - deviceInfo: "11d", - expectedLUN: -1, + deviceInfo: "", + expectedNum: "", + expectError: true, + }, + { + deviceInfo: "/dev/disk", + expectedNum: "", expectError: true, }, { deviceInfo: "999", - expectedLUN: -1, - expectError: true, - }, - { - deviceInfo: "", - expectedLUN: -1, - expectError: true, - }, - { - deviceInfo: "/dev/disk/azure/scsi1/lun2", - expectedLUN: 2, - expectError: false, - }, - { - deviceInfo: "/dev/disk/azure/scsi0/lun12", - expectedLUN: 12, - expectError: false, - }, - { - deviceInfo: "/dev/disk/by-id/scsi1/lun2", - expectedLUN: -1, + expectedNum: "", expectError: true, }, } for _, test := range tests { - result, err := getDiskLUN(test.deviceInfo) - assert.Equal(t, result, test.expectedLUN) + result, err := getDiskNum(test.deviceInfo) + assert.Equal(t, result, test.expectedNum) assert.Equal(t, err != nil, test.expectError, fmt.Sprintf("error msg: %v", err)) } } diff --git a/pkg/volume/azure_dd/azure_common_windows.go b/pkg/volume/azure_dd/azure_common_windows.go index c48f191f309..c1c390999c1 100644 --- a/pkg/volume/azure_dd/azure_common_windows.go +++ b/pkg/volume/azure_dd/azure_common_windows.go @@ -84,7 +84,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error if d, ok := v["number"]; ok { if diskNum, ok := d.(float64); ok { klog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun) - return strconv.Itoa(int(diskNum)), nil + return fmt.Sprintf(winDiskNumFormat, int(diskNum)), nil } klog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location) } From 7ca1c832d21c7b6181dd445c453d3fd67061b07b Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 6 May 2019 15:58:04 +0800 Subject: [PATCH 030/194] Set ETAG when updating Azure loadbalancer, route and route table --- .../azure/azure_backoff.go | 59 +++++++++++++++---- .../azure/azure_fakes.go | 6 +- .../azure/azure_loadbalancer.go | 16 ++++- .../azure/azure_test.go | 16 ++--- 4 files changed, 73 insertions(+), 24 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index d7d6c537ced..f7ff8922919 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -197,7 +197,7 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb) + resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb, to.String(lb.Etag)) klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) if err == nil { if isSuccessHTTPResponse(resp) { @@ -207,6 +207,11 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) return fmt.Errorf("HTTP response %q", resp.Status) } } + + // Invalidate the cache because ETAG precondition mismatch. + if resp != nil && resp.StatusCode == http.StatusPreconditionFailed { + az.lbCache.Delete(*lb.Name) + } return err } @@ -219,14 +224,20 @@ func (az *Cloud) createOrUpdateLBWithRetry(service *v1.Service, lb network.LoadB ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb) + resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb, to.String(lb.Etag)) klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) - done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err) + done, retryError := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err) if done && err == nil { // Invalidate the cache right after updating az.lbCache.Delete(*lb.Name) } - return done, err + + // Invalidate the cache and abort backoff because ETAG precondition mismatch. + if resp != nil && resp.StatusCode == http.StatusPreconditionFailed { + az.nsgCache.Delete(*lb.Name) + return true, err + } + return done, retryError }) } @@ -441,7 +452,10 @@ func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error { ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable) + resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, to.String(routeTable.Etag)) + if resp != nil && resp.StatusCode == http.StatusPreconditionFailed { + az.rtCache.Delete(*routeTable.Name) + } return az.processHTTPResponse(nil, "", resp, err) } @@ -454,8 +468,19 @@ func (az *Cloud) createOrUpdateRouteTableWithRetry(routeTable network.RouteTable ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable) - return az.processHTTPRetryResponse(nil, "", resp, err) + resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, to.String(routeTable.Etag)) + done, retryError := az.processHTTPRetryResponse(nil, "", resp, err) + if done && err == nil { + az.rtCache.Delete(*routeTable.Name) + return done, nil + } + + // Invalidate the cache and abort backoff because ETAG precondition mismatch. + if resp != nil && resp.StatusCode == http.StatusPreconditionFailed { + az.rtCache.Delete(*routeTable.Name) + return true, err + } + return done, retryError }) } @@ -465,8 +490,11 @@ func (az *Cloud) CreateOrUpdateRoute(route network.Route) error { ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route) + resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, to.String(route.Etag)) klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) + if resp != nil && resp.StatusCode == http.StatusPreconditionFailed { + az.rtCache.Delete(az.RouteTableName) + } return az.processHTTPResponse(nil, "", resp, err) } @@ -479,9 +507,20 @@ func (az *Cloud) createOrUpdateRouteWithRetry(route network.Route) error { ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route) + resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, to.String(route.Etag)) klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) - return az.processHTTPRetryResponse(nil, "", resp, err) + done, retryError := az.processHTTPRetryResponse(nil, "", resp, err) + if done && err == nil { + az.rtCache.Delete(az.RouteTableName) + return done, nil + } + + // Invalidate the cache and abort backoff because ETAG precondition mismatch. + if resp != nil && resp.StatusCode == http.StatusPreconditionFailed { + az.rtCache.Delete(az.RouteTableName) + return true, err + } + return done, retryError }) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 2ce921be1bb..e38f1d7683e 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -52,7 +52,7 @@ func newFakeAzureLBClient() *fakeAzureLBClient { return fLBC } -func (fLBC *fakeAzureLBClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) { +func (fLBC *fakeAzureLBClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) (resp *http.Response, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() @@ -642,7 +642,7 @@ func newFakeRoutesClient() *fakeRoutesClient { return fRC } -func (fRC *fakeRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) { +func (fRC *fakeRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) (resp *http.Response, err error) { fRC.mutex.Lock() defer fRC.mutex.Unlock() @@ -683,7 +683,7 @@ func newFakeRouteTablesClient() *fakeRouteTablesClient { return fRTC } -func (fRTC *fakeRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) { +func (fRTC *fakeRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) (resp *http.Response, err error) { fRTC.mutex.Lock() defer fRTC.mutex.Unlock() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 58f3bb637ae..858cf3d13ed 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -147,7 +147,8 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser return nil, err } - if _, err := az.reconcilePublicIP(clusterName, updateService, lb, true /* wantLb */); err != nil { + // lb is not reused here because the ETAG may be changed in above operations, hence reconcilePublicIP() would get lb again from cache. + if _, err := az.reconcilePublicIP(clusterName, updateService, to.String(lb.Name), true /* wantLb */); err != nil { return nil, err } @@ -203,7 +204,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri } } - if _, err := az.reconcilePublicIP(clusterName, service, nil, false /* wantLb */); err != nil { + if _, err := az.reconcilePublicIP(clusterName, service, "", false /* wantLb */); err != nil { if ignoreErrors(err) != nil { return err } @@ -1323,9 +1324,10 @@ func deduplicate(collection *[]string) *[]string { } // This reconciles the PublicIP resources similar to how the LB is reconciled. -func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb *network.LoadBalancer, wantLb bool) (*network.PublicIPAddress, error) { +func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbName string, wantLb bool) (*network.PublicIPAddress, error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) + var lb *network.LoadBalancer var desiredPipName string var err error if !isInternal && wantLb { @@ -1335,6 +1337,14 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb * } } + if lbName != "" { + loadBalancer, _, err := az.getAzureLoadBalancer(lbName) + if err != nil { + return nil, err + } + lb = &loadBalancer + } + pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pips, err := az.ListPIP(service, pipResourceGroup) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go index efcfbad5602..0e2d7d781b5 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go @@ -879,13 +879,13 @@ func TestReconcilePublicIPWithNewService(t *testing.T) { az := getTestCloud() svc := getTestService("servicea", v1.ProtocolTCP, 80, 443) - pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/) + pip, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) } validatePublicIP(t, pip, &svc, true) - pip2, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb */) + pip2, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb */) if err != nil { t.Errorf("Unexpected error: %q", err) } @@ -900,7 +900,7 @@ func TestReconcilePublicIPRemoveService(t *testing.T) { az := getTestCloud() svc := getTestService("servicea", v1.ProtocolTCP, 80, 443) - pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/) + pip, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) } @@ -908,7 +908,7 @@ func TestReconcilePublicIPRemoveService(t *testing.T) { validatePublicIP(t, pip, &svc, true) // Remove the service - pip, err = az.reconcilePublicIP(testClusterName, &svc, nil, false /* wantLb */) + pip, err = az.reconcilePublicIP(testClusterName, &svc, "", false /* wantLb */) if err != nil { t.Errorf("Unexpected error: %q", err) } @@ -920,7 +920,7 @@ func TestReconcilePublicIPWithInternalService(t *testing.T) { az := getTestCloud() svc := getInternalTestService("servicea", 80, 443) - pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/) + pip, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) } @@ -932,7 +932,7 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) { az := getTestCloud() svc := getInternalTestService("servicea", 80, 443) - pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/) + pip, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) } @@ -940,14 +940,14 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) { // Update to external service svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80) - pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, nil, true /* wantLb*/) + pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) } validatePublicIP(t, pip, &svcUpdated, true) // Update to internal service again - pip, err = az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/) + pip, err = az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) } From 017f57a6b0cc052cfb041bf653f1ba984c6ec26b Mon Sep 17 00:00:00 2001 From: Casey Callendrello Date: Wed, 13 Feb 2019 18:48:45 +0100 Subject: [PATCH 031/194] proxy: add some useful metrics This adds some useful metrics around pending changes and last successful sync time. The goal is for administrators to be able to alert on proxies that, for whatever reason, are quite stale. Signed-off-by: Casey Callendrello --- pkg/proxy/BUILD | 1 + pkg/proxy/endpoints.go | 5 ++++ pkg/proxy/iptables/proxier.go | 1 + pkg/proxy/ipvs/proxier.go | 1 + pkg/proxy/metrics/metrics.go | 55 ++++++++++++++++++++++++++++++++++ pkg/proxy/service.go | 4 +++ pkg/proxy/winkernel/metrics.go | 11 +++++++ pkg/proxy/winkernel/proxier.go | 1 + 8 files changed, 79 insertions(+) diff --git a/pkg/proxy/BUILD b/pkg/proxy/BUILD index b43afc322c3..8c69b958350 100644 --- a/pkg/proxy/BUILD +++ b/pkg/proxy/BUILD @@ -18,6 +18,7 @@ go_library( deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/proxy/config:go_default_library", + "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index 713f7e17ebe..d81b2cab9e2 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/proxy/metrics" utilproxy "k8s.io/kubernetes/pkg/proxy/util" utilnet "k8s.io/utils/net" ) @@ -127,6 +128,7 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool { if endpoints == nil { return false } + metrics.EndpointChangesTotal.Inc() namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} ect.lock.Lock() @@ -154,6 +156,8 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool { // should be exported. delete(ect.lastChangeTriggerTimes, namespacedName) } + + metrics.EndpointChangesPending.Set(float64(len(ect.items))) return len(ect.items) > 0 } @@ -295,6 +299,7 @@ func (em EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]S detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames) } changes.items = make(map[types.NamespacedName]*endpointsChange) + metrics.EndpointChangesPending.Set(0) for _, lastChangeTriggerTime := range changes.lastChangeTriggerTimes { *lastChangeTriggerTimes = append(*lastChangeTriggerTimes, lastChangeTriggerTime...) } diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index e4b13b17c9f..4d380f3f20f 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -1395,6 +1395,7 @@ func (proxier *Proxier) syncProxyRules() { if proxier.healthzServer != nil { proxier.healthzServer.UpdateTimestamp() } + metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 02452930bb1..433cc5d9f55 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -1272,6 +1272,7 @@ func (proxier *Proxier) syncProxyRules() { if proxier.healthzServer != nil { proxier.healthzServer.UpdateTimestamp() } + metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker diff --git a/pkg/proxy/metrics/metrics.go b/pkg/proxy/metrics/metrics.go index 38924387ff8..54d7f0a4ca4 100644 --- a/pkg/proxy/metrics/metrics.go +++ b/pkg/proxy/metrics/metrics.go @@ -46,6 +46,16 @@ var ( }, ) + // SyncProxyRulesLastTimestamp is the timestamp proxy rules were last + // successfully synced. + SyncProxyRulesLastTimestamp = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_last_timestamp_seconds", + Help: "The last time proxy rules were successfully synced", + }, + ) + // NetworkProgrammingLatency is defined as the time it took to program the network - from the time // the service or pod has changed to the time the change was propagated and the proper kube-proxy // rules were synced. Exported for each endpoints object that were part of the rules sync. @@ -63,6 +73,46 @@ var ( Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), }, ) + + // EndpointChangesPending is the number of pending endpoint changes that + // have not yet been synced to the proxy. + EndpointChangesPending = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_endpoint_changes_pending", + Help: "Pending proxy rules Endpoint changes", + }, + ) + + // EndpointChangesTotal is the number of endpoint changes that the proxy + // has seen. + EndpointChangesTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_endpoint_changes_total", + Help: "Cumulative proxy rules Endpoint changes", + }, + ) + + // ServiceChangesPending is the number of pending service changes that + // have not yet been synced to the proxy. + ServiceChangesPending = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_service_changes_pending", + Help: "Pending proxy rules Service changes", + }, + ) + + // ServiceChangesTotal is the number of service changes that the proxy has + // seen. + ServiceChangesTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_service_changes_total", + Help: "Cumulative proxy rules Service changes", + }, + ) ) var registerMetricsOnce sync.Once @@ -72,7 +122,12 @@ func RegisterMetrics() { registerMetricsOnce.Do(func() { prometheus.MustRegister(SyncProxyRulesLatency) prometheus.MustRegister(DeprecatedSyncProxyRulesLatency) + prometheus.MustRegister(SyncProxyRulesLastTimestamp) prometheus.MustRegister(NetworkProgrammingLatency) + prometheus.MustRegister(EndpointChangesPending) + prometheus.MustRegister(EndpointChangesTotal) + prometheus.MustRegister(ServiceChangesPending) + prometheus.MustRegister(ServiceChangesTotal) }) } diff --git a/pkg/proxy/service.go b/pkg/proxy/service.go index eab394a9258..eecc643adb2 100644 --- a/pkg/proxy/service.go +++ b/pkg/proxy/service.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" apiservice "k8s.io/kubernetes/pkg/api/v1/service" + "k8s.io/kubernetes/pkg/proxy/metrics" utilproxy "k8s.io/kubernetes/pkg/proxy/util" utilnet "k8s.io/utils/net" ) @@ -198,6 +199,7 @@ func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool { if svc == nil { return false } + metrics.ServiceChangesTotal.Inc() namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name} sct.lock.Lock() @@ -214,6 +216,7 @@ func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool { if reflect.DeepEqual(change.previous, change.current) { delete(sct.items, namespacedName) } + metrics.ServiceChangesPending.Set(float64(len(sct.items))) return len(sct.items) > 0 } @@ -296,6 +299,7 @@ func (sm *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP set } // clear changes after applying them to ServiceMap. changes.items = make(map[types.NamespacedName]*serviceChange) + metrics.ServiceChangesPending.Set(0) return } diff --git a/pkg/proxy/winkernel/metrics.go b/pkg/proxy/winkernel/metrics.go index 61cf962ee0e..729cc5e626f 100644 --- a/pkg/proxy/winkernel/metrics.go +++ b/pkg/proxy/winkernel/metrics.go @@ -43,6 +43,16 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + + // SyncProxyRulesLastTimestamp is the timestamp proxy rules were last + // successfully synced. + SyncProxyRulesLastTimestamp = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_last_timestamp_seconds", + Help: "The last time proxy rules were successfully synced", + }, + ) ) var registerMetricsOnce sync.Once @@ -51,6 +61,7 @@ func RegisterMetrics() { registerMetricsOnce.Do(func() { prometheus.MustRegister(SyncProxyRulesLatency) prometheus.MustRegister(DeprecatedSyncProxyRulesLatency) + prometheus.MustRegister(SyncProxyRulesLastTimestamp) }) } diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index 061b8922c0a..4817a4a261f 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -1197,6 +1197,7 @@ func (proxier *Proxier) syncProxyRules() { if proxier.healthzServer != nil { proxier.healthzServer.UpdateTimestamp() } + SyncProxyRulesLastTimestamp.SetToCurrentTime() // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker From e21ce73fba321bb0d547ae1ed92b43155a386354 Mon Sep 17 00:00:00 2001 From: yanghaichao12 Date: Tue, 7 May 2019 14:23:30 -0400 Subject: [PATCH 032/194] Refactor:move clean method to suitable place --- pkg/kubectl/cmd/cp/cp.go | 6 ------ pkg/kubectl/cmd/cp/cp_test.go | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/kubectl/cmd/cp/cp.go b/pkg/kubectl/cmd/cp/cp.go index 2679c6f7af1..02612e06717 100644 --- a/pkg/kubectl/cmd/cp/cp.go +++ b/pkg/kubectl/cmd/cp/cp.go @@ -413,12 +413,6 @@ func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) e return nil } -// clean prevents path traversals by stripping them out. -// This is adapted from https://golang.org/src/net/http/fs.go#L74 -func clean(fileName string) string { - return path.Clean(string(os.PathSeparator) + fileName) -} - func (o *CopyOptions) untarAll(reader io.Reader, destDir, prefix string) error { // TODO: use compression here? tarReader := tar.NewReader(reader) diff --git a/pkg/kubectl/cmd/cp/cp_test.go b/pkg/kubectl/cmd/cp/cp_test.go index c7d75573d0e..d648d826028 100644 --- a/pkg/kubectl/cmd/cp/cp_test.go +++ b/pkg/kubectl/cmd/cp/cp_test.go @@ -551,6 +551,12 @@ func TestBadTar(t *testing.T) { } } +// clean prevents path traversals by stripping them out. +// This is adapted from https://golang.org/src/net/http/fs.go#L74 +func clean(fileName string) string { + return path.Clean(string(os.PathSeparator) + fileName) +} + func TestClean(t *testing.T) { tests := []struct { input string From 1d60557c8720c3ae0a23ddf691f6b7c50e90e20b Mon Sep 17 00:00:00 2001 From: Jacob Tanenbaum Date: Thu, 4 Apr 2019 14:41:15 -0400 Subject: [PATCH 033/194] Merge() and Unmerge() needlessly exported --- pkg/proxy/endpoints.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index 713f7e17ebe..c203199e460 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -290,8 +290,8 @@ func (em EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]S changes.lock.Lock() defer changes.lock.Unlock() for _, change := range changes.items { - em.Unmerge(change.previous) - em.Merge(change.current) + em.unmerge(change.previous) + em.merge(change.current) detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames) } changes.items = make(map[types.NamespacedName]*endpointsChange) @@ -302,14 +302,14 @@ func (em EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]S } // Merge ensures that the current EndpointsMap contains all pairs from the EndpointsMap passed in. -func (em EndpointsMap) Merge(other EndpointsMap) { +func (em EndpointsMap) merge(other EndpointsMap) { for svcPortName := range other { em[svcPortName] = other[svcPortName] } } // Unmerge removes the pairs from the current EndpointsMap which are contained in the EndpointsMap passed in. -func (em EndpointsMap) Unmerge(other EndpointsMap) { +func (em EndpointsMap) unmerge(other EndpointsMap) { for svcPortName := range other { delete(em, svcPortName) } From 5201cc994c0eff7298e8ebfbcdfaad2c3ea78af8 Mon Sep 17 00:00:00 2001 From: Jacob Tanenbaum Date: Thu, 4 Apr 2019 14:53:11 -0400 Subject: [PATCH 034/194] Cleanup of GetLocalEndpointIPs unexported GetLocalEndpointIPs and made it a endpointsMap struct method --- pkg/proxy/endpoints.go | 6 +++--- pkg/proxy/endpoints_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index c203199e460..b849f532d97 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -208,7 +208,7 @@ func UpdateEndpointsMap(endpointsMap EndpointsMap, changes *EndpointChangeTracke // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. result.HCEndpointsLocalIPSize = make(map[types.NamespacedName]int) - localIPs := GetLocalEndpointIPs(endpointsMap) + localIPs := endpointsMap.getLocalEndpointIPs() for nsn, ips := range localIPs { result.HCEndpointsLocalIPSize[nsn] = len(ips) } @@ -316,9 +316,9 @@ func (em EndpointsMap) unmerge(other EndpointsMap) { } // GetLocalEndpointIPs returns endpoints IPs if given endpoint is local - local means the endpoint is running in same host as kube-proxy. -func GetLocalEndpointIPs(endpointsMap EndpointsMap) map[types.NamespacedName]sets.String { +func (em EndpointsMap) getLocalEndpointIPs() map[types.NamespacedName]sets.String { localIPs := make(map[types.NamespacedName]sets.String) - for svcPortName, epList := range endpointsMap { + for svcPortName, epList := range em { for _, ep := range epList { if ep.GetIsLocal() { nsn := svcPortName.NamespacedName diff --git a/pkg/proxy/endpoints_test.go b/pkg/proxy/endpoints_test.go index 650aa1da994..9f464985149 100644 --- a/pkg/proxy/endpoints_test.go +++ b/pkg/proxy/endpoints_test.go @@ -112,7 +112,7 @@ func TestGetLocalEndpointIPs(t *testing.T) { for tci, tc := range testCases { // outputs - localIPs := GetLocalEndpointIPs(tc.endpointsMap) + localIPs := tc.endpointsMap.getLocalEndpointIPs() if !reflect.DeepEqual(localIPs, tc.expected) { t.Errorf("[%d] expected %#v, got %#v", tci, tc.expected, localIPs) From 9d4693a70f759780816c6174f98ff651dc725b88 Mon Sep 17 00:00:00 2001 From: Jacob Tanenbaum Date: Mon, 29 Apr 2019 15:28:47 -0400 Subject: [PATCH 035/194] changing UpdateEndpointsMap to Update changing UpdateEndpointsMap to be a function of the EndpointsMap object --- pkg/proxy/endpoints.go | 6 +++--- pkg/proxy/endpoints_test.go | 6 +++--- pkg/proxy/iptables/proxier.go | 2 +- pkg/proxy/iptables/proxier_test.go | 4 ++-- pkg/proxy/ipvs/proxier.go | 2 +- pkg/proxy/ipvs/proxier_test.go | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index b849f532d97..1bd993f498a 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -197,18 +197,18 @@ type UpdateEndpointMapResult struct { } // UpdateEndpointsMap updates endpointsMap base on the given changes. -func UpdateEndpointsMap(endpointsMap EndpointsMap, changes *EndpointChangeTracker) (result UpdateEndpointMapResult) { +func (em EndpointsMap) Update(changes *EndpointChangeTracker) (result UpdateEndpointMapResult) { result.StaleEndpoints = make([]ServiceEndpoint, 0) result.StaleServiceNames = make([]ServicePortName, 0) result.LastChangeTriggerTimes = make([]time.Time, 0) - endpointsMap.apply( + em.apply( changes, &result.StaleEndpoints, &result.StaleServiceNames, &result.LastChangeTriggerTimes) // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. result.HCEndpointsLocalIPSize = make(map[types.NamespacedName]int) - localIPs := endpointsMap.getLocalEndpointIPs() + localIPs := em.getLocalEndpointIPs() for nsn, ips := range localIPs { result.HCEndpointsLocalIPSize[nsn] = len(ips) } diff --git a/pkg/proxy/endpoints_test.go b/pkg/proxy/endpoints_test.go index 9f464985149..68227425fef 100644 --- a/pkg/proxy/endpoints_test.go +++ b/pkg/proxy/endpoints_test.go @@ -1213,7 +1213,7 @@ func TestUpdateEndpointsMap(t *testing.T) { fp.addEndpoints(tc.previousEndpoints[i]) } } - UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + fp.endpointsMap.Update(fp.endpointsChanges) compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. @@ -1233,7 +1233,7 @@ func TestUpdateEndpointsMap(t *testing.T) { fp.updateEndpoints(prev, curr) } } - result := UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + result := fp.endpointsMap.Update(fp.endpointsChanges) newMap := fp.endpointsMap compareEndpointsMaps(t, tci, newMap, tc.expectedResult) if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) { @@ -1373,7 +1373,7 @@ func TestLastChangeTriggerTime(t *testing.T) { tc.scenario(fp) - result := UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + result := fp.endpointsMap.Update(fp.endpointsChanges) got := result.LastChangeTriggerTimes sortTimeSlice(got) sortTimeSlice(tc.expected) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 5911f0e623e..c601a0470f9 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -666,7 +666,7 @@ func (proxier *Proxier) syncProxyRules() { // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. serviceUpdateResult := proxy.UpdateServiceMap(proxier.serviceMap, proxier.serviceChanges) - endpointUpdateResult := proxy.UpdateEndpointsMap(proxier.endpointsMap, proxier.endpointsChanges) + endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) staleServices := serviceUpdateResult.UDPStaleClusterIP // merge stale services gathered from updateEndpointsMap diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index c096753d0d9..a23cc247f20 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -2185,7 +2185,7 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsAdd(tc.previousEndpoints[i]) } } - proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + fp.endpointsMap.Update(fp.endpointsChanges) compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. @@ -2205,7 +2205,7 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsUpdate(prev, curr) } } - result := proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + result := fp.endpointsMap.Update(fp.endpointsChanges) newMap := fp.endpointsMap compareEndpointsMaps(t, tci, newMap, tc.expectedResult) if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) { diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index d1ca4fb0014..82887f9c7fa 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -753,7 +753,7 @@ func (proxier *Proxier) syncProxyRules() { // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. serviceUpdateResult := proxy.UpdateServiceMap(proxier.serviceMap, proxier.serviceChanges) - endpointUpdateResult := proxy.UpdateEndpointsMap(proxier.endpointsMap, proxier.endpointsChanges) + endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) staleServices := serviceUpdateResult.UDPStaleClusterIP // merge stale services gathered from updateEndpointsMap diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 9b599241ea2..288dadc5476 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -2486,7 +2486,7 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsAdd(tc.previousEndpoints[i]) } } - proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + fp.endpointsMap.Update(fp.endpointsChanges) compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. @@ -2506,7 +2506,7 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsUpdate(prev, curr) } } - result := proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + result := fp.endpointsMap.Update(fp.endpointsChanges) newMap := fp.endpointsMap compareEndpointsMaps(t, tci, newMap, tc.expectedResult) if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) { From c1cd89f5cd287ada32317d591cafaa736dbe7d7b Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 7 May 2019 21:47:03 +0200 Subject: [PATCH 036/194] Update kubectl exec use to put flags in the right place --- pkg/kubectl/cmd/exec/exec.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/exec/exec.go b/pkg/kubectl/cmd/exec/exec.go index 9dc19344d1c..ee1b8da9d1b 100644 --- a/pkg/kubectl/cmd/exec/exec.go +++ b/pkg/kubectl/cmd/exec/exec.go @@ -82,7 +82,7 @@ func NewCmdExec(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C Executor: &DefaultRemoteExecutor{}, } cmd := &cobra.Command{ - Use: "exec (POD | TYPE/NAME) [-c CONTAINER] -- COMMAND [args...]", + Use: "exec (POD | TYPE/NAME) [-c CONTAINER] [flags] -- COMMAND [args...]", DisableFlagsInUseLine: true, Short: i18n.T("Execute a command in a container"), Long: "Execute a command in a container.", From 1ea5a692fea037e23053b000e143669d12637d44 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 8 May 2019 19:31:36 +0800 Subject: [PATCH 037/194] Add support of shared resource group for Azure public IP To support this, a new tag "kubernetes-cluster-name" is added to public IP which indicates the kubernetes cluster name (set in kube-controller-manager). --- .../azure/azure_loadbalancer.go | 41 ++++++++-- .../azure/azure_loadbalancer_test.go | 79 +++++++++++++++++++ .../azure/azure_test.go | 17 +++- 3 files changed, 126 insertions(+), 11 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 58f3bb637ae..03601209734 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -83,6 +83,11 @@ const ( // ServiceAnnotationLoadBalancerMixedProtocols is the annotation used on the service // to create both TCP and UDP protocols when creating load balancer rules. ServiceAnnotationLoadBalancerMixedProtocols = "service.beta.kubernetes.io/azure-load-balancer-mixed-protocols" + + // serviceTagKey is the service key applied for public IP tags. + serviceTagKey = "service" + // clusterNameKey is the cluster name key applied for public IP tags. + clusterNameKey = "kubernetes-cluster-name" ) var ( @@ -465,7 +470,7 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s return lbStatus.Ingress[0].IP, nil } -func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel string) (*network.PublicIPAddress, error) { +func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel, clusterName string) (*network.PublicIPAddress, error) { pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) if err != nil { @@ -486,7 +491,10 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai DomainNameLabel: &domainNameLabel, } } - pip.Tags = map[string]*string{"service": &serviceName} + pip.Tags = map[string]*string{ + serviceTagKey: &serviceName, + clusterNameKey: &clusterName, + } if az.useStandardLoadBalancer() { pip.Sku = &network.PublicIPAddressSku{ Name: network.PublicIPAddressSkuNameStandard, @@ -711,7 +719,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, return nil, err } domainNameLabel := getPublicIPDomainNameLabel(service) - pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel) + pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel, clusterName) if err != nil { return nil, err } @@ -1344,9 +1352,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb * for i := range pips { pip := pips[i] - if pip.Tags != nil && - (pip.Tags)["service"] != nil && - *(pip.Tags)["service"] == serviceName { + if serviceOwnsPublicIP(&pip, clusterName, serviceName) { // We need to process for pips belong to this service pipName := *pip.Name if wantLb && !isInternal && pipName == desiredPipName { @@ -1369,7 +1375,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb * // Confirm desired public ip resource exists var pip *network.PublicIPAddress domainNameLabel := getPublicIPDomainNameLabel(service) - if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel); err != nil { + if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel, clusterName); err != nil { return nil, err } return pip, nil @@ -1612,3 +1618,24 @@ func getServiceTags(service *v1.Service) ([]string, error) { return nil, nil } + +func serviceOwnsPublicIP(pip *network.PublicIPAddress, clusterName, serviceName string) bool { + if pip != nil && pip.Tags != nil { + serviceTag := pip.Tags[serviceTagKey] + clusterTag := pip.Tags[clusterNameKey] + + if serviceTag != nil && *serviceTag == serviceName { + // Backward compatible for clusters upgraded from old releases. + // In such case, only "service" tag is set. + if clusterTag == nil { + return true + } + + // If cluster name tag is set, then return true if it matches. + if *clusterTag == clusterName { + return true + } + } + } + return false +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go index 06c26291c22..0efb7ee2974 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go @@ -368,3 +368,82 @@ func TestEnsureLoadBalancerDeleted(t *testing.T) { assert.Equal(t, len(result), 0, "TestCase[%d]: %s", i, c.desc) } } + +func TestServiceOwnsPublicIP(t *testing.T) { + tests := []struct { + desc string + pip *network.PublicIPAddress + clusterName string + serviceName string + expected bool + }{ + { + desc: "false should be returned when pip is nil", + clusterName: "kubernetes", + serviceName: "nginx", + expected: false, + }, + { + desc: "false should be returned when service name tag doesn't match", + pip: &network.PublicIPAddress{ + Tags: map[string]*string{ + serviceTagKey: to.StringPtr("nginx"), + }, + }, + serviceName: "web", + expected: false, + }, + { + desc: "true should be returned when service name tag matches and cluster name tag is not set", + pip: &network.PublicIPAddress{ + Tags: map[string]*string{ + serviceTagKey: to.StringPtr("nginx"), + }, + }, + clusterName: "kubernetes", + serviceName: "nginx", + expected: true, + }, + { + desc: "false should be returned when cluster name doesn't match", + pip: &network.PublicIPAddress{ + Tags: map[string]*string{ + serviceTagKey: to.StringPtr("nginx"), + clusterNameKey: to.StringPtr("kubernetes"), + }, + }, + clusterName: "k8s", + serviceName: "nginx", + expected: false, + }, + { + desc: "false should be returned when cluster name matches while service name doesn't match", + pip: &network.PublicIPAddress{ + Tags: map[string]*string{ + serviceTagKey: to.StringPtr("web"), + clusterNameKey: to.StringPtr("kubernetes"), + }, + }, + clusterName: "kubernetes", + serviceName: "nginx", + expected: false, + }, + { + desc: "true should be returned when both service name tag and cluster name match", + pip: &network.PublicIPAddress{ + Tags: map[string]*string{ + serviceTagKey: to.StringPtr("nginx"), + clusterNameKey: to.StringPtr("kubernetes"), + }, + }, + clusterName: "kubernetes", + serviceName: "nginx", + expected: true, + }, + } + + for i, c := range tests { + owns := serviceOwnsPublicIP(c.pip, c.clusterName, c.serviceName) + assert.Equal(t, owns, c.expected, "TestCase[%d]: %s", i, c.desc) + } +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go index efcfbad5602..9a8ec28b1eb 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go @@ -1367,14 +1367,23 @@ func validatePublicIP(t *testing.T, publicIP *network.PublicIPAddress, service * t.Errorf("Expected publicIP resource exists, when it is not an internal service") } - if publicIP.Tags == nil || publicIP.Tags["service"] == nil { - t.Errorf("Expected publicIP resource has tags[service]") + if publicIP.Tags == nil || publicIP.Tags[serviceTagKey] == nil { + t.Errorf("Expected publicIP resource has tags[%s]", serviceTagKey) } serviceName := getServiceName(service) - if serviceName != *(publicIP.Tags["service"]) { - t.Errorf("Expected publicIP resource has matching tags[service]") + if serviceName != *(publicIP.Tags[serviceTagKey]) { + t.Errorf("Expected publicIP resource has matching tags[%s]", serviceTagKey) } + + if publicIP.Tags[clusterNameKey] == nil { + t.Errorf("Expected publicIP resource has tags[%s]", clusterNameKey) + } + + if *(publicIP.Tags[clusterNameKey]) != testClusterName { + t.Errorf("Expected publicIP resource has matching tags[%s]", clusterNameKey) + } + // We cannot use service.Spec.LoadBalancerIP to compare with // Public IP's IPAddress // Because service properties are updated outside of cloudprovider code From 62c5c6345e1fea1bdf7d24e0b28cdb1498f2fc48 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 8 May 2019 17:28:13 +0200 Subject: [PATCH 038/194] Revert "Make external driver storage class name generation contain a more random suffix in case of double generation in the same framework context (twice in the same test)" This reverts commit c50e7fd30155d05fdde4174169945a43e26cf867 because it included API changes that shouldn't have been in that PR and fixing the storage class conflict inside the framework is probably the wrong place. --- test/e2e/framework/BUILD | 1 - test/e2e/framework/create.go | 52 +++++++++++++++--------------------- 2 files changed, 21 insertions(+), 32 deletions(-) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index fe67cc58134..6d317ac5f7b 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -92,7 +92,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library", diff --git a/test/e2e/framework/create.go b/test/e2e/framework/create.go index c250bc1b3dc..bd22e0f646c 100644 --- a/test/e2e/framework/create.go +++ b/test/e2e/framework/create.go @@ -31,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/storage/names" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -101,9 +100,9 @@ func visitManifests(cb func([]byte) error, files ...string) error { return nil } -// PatchItems modifies the given items in place such that each -// test gets its own instances, to avoid conflicts between different tests and -// between tests and normal deployments. +// PatchItems modifies the given items in place such that each test +// gets its own instances, to avoid conflicts between different tests +// and between tests and normal deployments. // // This is done by: // - creating namespaced items inside the test's namespace @@ -288,27 +287,18 @@ var factories = map[What]ItemFactory{ {"StorageClass"}: &storageClassFactory{}, } -// uniquifyName makes the name of some item unique per namespace by appending the -// generated unique name of the test namespace. -func (f *Framework) uniquifyName(item *string) { +// PatchName makes the name of some item unique by appending the +// generated unique name. +func (f *Framework) PatchName(item *string) { if *item != "" { *item = *item + "-" + f.UniqueName } } -// randomizeStorageClassName makes the name of the storage class unique per call -// by appending the generated unique name of the test namespace and a random 5 -// character string -func (f *Framework) randomizeStorageClassName(item *string) { - if *item != "" { - *item = names.SimpleNameGenerator.GenerateName(*item + "-" + f.UniqueName + "-") - } -} - -// patchNamespace moves the item into the test's namespace. Not +// PatchNamespace moves the item into the test's namespace. Not // all items can be namespaced. For those, the name also needs to be // patched. -func (f *Framework) patchNamespace(item *string) { +func (f *Framework) PatchNamespace(item *string) { if f.Namespace != nil { *item = f.Namespace.GetName() } @@ -317,31 +307,31 @@ func (f *Framework) patchNamespace(item *string) { func (f *Framework) patchItemRecursively(item interface{}) error { switch item := item.(type) { case *rbac.Subject: - f.patchNamespace(&item.Namespace) + f.PatchNamespace(&item.Namespace) case *rbac.RoleRef: // TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles // which contains all role names that are defined cluster-wide before the test starts? // All those names are excempt from renaming. That list could be populated by querying // and get extended by tests. if item.Name != "e2e-test-privileged-psp" { - f.uniquifyName(&item.Name) + f.PatchName(&item.Name) } case *rbac.ClusterRole: - f.uniquifyName(&item.Name) + f.PatchName(&item.Name) case *rbac.Role: - f.patchNamespace(&item.Namespace) + f.PatchNamespace(&item.Namespace) // Roles are namespaced, but because for RoleRef above we don't // know whether the referenced role is a ClusterRole or Role // and therefore always renames, we have to do the same here. - f.uniquifyName(&item.Name) + f.PatchName(&item.Name) case *storage.StorageClass: - f.randomizeStorageClassName(&item.Name) + f.PatchName(&item.Name) case *v1.ServiceAccount: - f.patchNamespace(&item.ObjectMeta.Namespace) + f.PatchNamespace(&item.ObjectMeta.Namespace) case *v1.Secret: - f.patchNamespace(&item.ObjectMeta.Namespace) + f.PatchNamespace(&item.ObjectMeta.Namespace) case *rbac.ClusterRoleBinding: - f.uniquifyName(&item.Name) + f.PatchName(&item.Name) for i := range item.Subjects { if err := f.patchItemRecursively(&item.Subjects[i]); err != nil { return errors.Wrapf(err, "%T", f) @@ -351,7 +341,7 @@ func (f *Framework) patchItemRecursively(item interface{}) error { return errors.Wrapf(err, "%T", f) } case *rbac.RoleBinding: - f.patchNamespace(&item.Namespace) + f.PatchNamespace(&item.Namespace) for i := range item.Subjects { if err := f.patchItemRecursively(&item.Subjects[i]); err != nil { return errors.Wrapf(err, "%T", f) @@ -361,11 +351,11 @@ func (f *Framework) patchItemRecursively(item interface{}) error { return errors.Wrapf(err, "%T", f) } case *v1.Service: - f.patchNamespace(&item.ObjectMeta.Namespace) + f.PatchNamespace(&item.ObjectMeta.Namespace) case *apps.StatefulSet: - f.patchNamespace(&item.ObjectMeta.Namespace) + f.PatchNamespace(&item.ObjectMeta.Namespace) case *apps.DaemonSet: - f.patchNamespace(&item.ObjectMeta.Namespace) + f.PatchNamespace(&item.ObjectMeta.Namespace) default: return errors.Errorf("missing support for patching item of type %T", item) } From 9546fd540eebbc6e93c74c4a1bccce9b87b88ca1 Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Wed, 8 May 2019 10:14:16 -0700 Subject: [PATCH 039/194] Impose length limit when concatenating revision history --- pkg/controller/deployment/sync.go | 8 ++++++-- .../deployment/util/deployment_util.go | 19 +++++++++++++++---- .../deployment/util/deployment_util_test.go | 12 +++++++++--- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index b59aec417db..a92904dd3d5 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -124,6 +124,10 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deploym return newRS, allOldRSs, nil } +const ( + maxRevHistoryLengthInChars = 2000 +) + // Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet. // 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's). // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes. @@ -145,7 +149,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old rsCopy := existingNewRS.DeepCopy() // Set existing new replica set's annotation - annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(d, rsCopy, newRevision, true) + annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(d, rsCopy, newRevision, true, maxRevHistoryLengthInChars) minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds @@ -209,7 +213,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old *(newRS.Spec.Replicas) = newReplicasCount // Set new replica set's annotation - deploymentutil.SetNewReplicaSetAnnotations(d, &newRS, newRevision, false) + deploymentutil.SetNewReplicaSetAnnotations(d, &newRS, newRevision, false, maxRevHistoryLengthInChars) // Create the new ReplicaSet. If it already exists, then we need to check for possible // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index a731cc7bb5e..36e83f28aef 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -227,7 +227,7 @@ func Revision(obj runtime.Object) (int64, error) { // SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if replica set's annotation is changed. -func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool { +func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool { // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS) // Then, update replica set's revision annotation @@ -261,14 +261,25 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic // If a revision annotation already existed and this replica set was updated with a new revision // then that means we are rolling back to this replica set. We need to preserve the old revisions // for historical information. - if ok && annotationChanged { + if ok && oldRevisionInt < newRevisionInt { revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation] oldRevisions := strings.Split(revisionHistoryAnnotation, ",") if len(oldRevisions[0]) == 0 { newRS.Annotations[RevisionHistoryAnnotation] = oldRevision } else { - oldRevisions = append(oldRevisions, oldRevision) - newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") + totalLen := len(revisionHistoryAnnotation) + len(oldRevision) + 1 + // index for the starting position in oldRevisions + start := 0 + for totalLen > revHistoryLimitInChars && start < len(oldRevisions) { + totalLen = totalLen - len(oldRevisions[start]) - 1 + start++ + } + if totalLen <= revHistoryLimitInChars { + oldRevisions = append(oldRevisions[start:], oldRevision) + newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") + } else { + klog.Warningf("Not appending revision due to length limit of %v reached", revHistoryLimitInChars) + } } } // If the new replica set is about to be created, we need to add replica annotations to it. diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go index 5e21f501f91..38fed0e4fbc 100644 --- a/pkg/controller/deployment/util/deployment_util_test.go +++ b/pkg/controller/deployment/util/deployment_util_test.go @@ -1274,13 +1274,19 @@ func TestAnnotationUtils(t *testing.T) { //Test Case 1: Check if anotations are copied properly from deployment to RS t.Run("SetNewReplicaSetAnnotations", func(t *testing.T) { - //Try to set the increment revision from 1 through 20 - for i := 0; i < 20; i++ { + //Try to set the increment revision from 11 through 20 + for i := 10; i < 20; i++ { nextRevision := fmt.Sprintf("%d", i+1) - SetNewReplicaSetAnnotations(&tDeployment, &tRS, nextRevision, true) + SetNewReplicaSetAnnotations(&tDeployment, &tRS, nextRevision, true, 5) //Now the ReplicaSets Revision Annotation should be i+1 + if i >= 12 { + expectedHistoryAnnotation := fmt.Sprintf("%d,%d", i-1, i) + if tRS.Annotations[RevisionHistoryAnnotation] != expectedHistoryAnnotation { + t.Errorf("Revision History Expected=%s Obtained=%s", expectedHistoryAnnotation, tRS.Annotations[RevisionHistoryAnnotation]) + } + } if tRS.Annotations[RevisionAnnotation] != nextRevision { t.Errorf("Revision Expected=%s Obtained=%s", nextRevision, tRS.Annotations[RevisionAnnotation]) } From 436544488b865ef801b1875b40895e1ee30b0fea Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Wed, 20 Feb 2019 19:22:16 +0000 Subject: [PATCH 040/194] Add ExpectError() to e2e test framework There is a lot of gomega.Expect(err).To(gomega.HaveOccurred()) callers which expect an error happens in e2e tests. However these test code seems confusing because the code readers need to take care of To() or NotTo() on each test scenario. This adds ExpectError() for more readable test code. In addition, this applies ExpectError() to e2e provisioning.go as a sample. --- test/e2e/framework/util.go | 5 +++++ test/e2e/storage/testsuites/provisioning.go | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index fd264d662c1..36c64c45733 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -2038,6 +2038,11 @@ func RandomSuffix() string { return strconv.Itoa(r.Int() % 10000) } +// ExpectError expects an error happens, otherwise an exception raises +func ExpectError(err error, explain ...interface{}) { + gomega.Expect(err).To(gomega.HaveOccurred(), explain...) +} + // ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. func ExpectNoError(err error, explain ...interface{}) { ExpectNoErrorWithOffset(1, err, explain...) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 61e5d53415e..9d76110f6bf 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -484,7 +484,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out By("checking the claims are in pending state") err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) - Expect(err).To(HaveOccurred()) + framework.ExpectError(err) verifyPVCsPending(t.Client, createdClaims) By("creating a pod referring to the claims") From 85fc0898556d5c0c4b80920bac62842bfc97d763 Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Wed, 8 May 2019 14:31:47 -0700 Subject: [PATCH 041/194] Use map to check whether stack trace is needed Signed-off-by: Ted Yu --- pkg/kubelet/server/server.go | 22 +++++++++---------- .../apiserver/pkg/server/httplog/httplog.go | 12 +++++----- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 4a9d9080d4c..7e5740d03a0 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -835,19 +835,19 @@ func isLongRunningRequest(path string) bool { return false } +var statusesNoTracePred = httplog.StatusIsNot( + http.StatusOK, + http.StatusFound, + http.StatusMovedPermanently, + http.StatusTemporaryRedirect, + http.StatusBadRequest, + http.StatusNotFound, + http.StatusSwitchingProtocols, +) + // ServeHTTP responds to HTTP requests on the Kubelet. func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { - defer httplog.NewLogged(req, &w).StacktraceWhen( - httplog.StatusIsNot( - http.StatusOK, - http.StatusFound, - http.StatusMovedPermanently, - http.StatusTemporaryRedirect, - http.StatusBadRequest, - http.StatusNotFound, - http.StatusSwitchingProtocols, - ), - ).Log() + defer httplog.NewLogged(req, &w).StacktraceWhen(statusesNoTracePred).Log() // monitor http requests var serverType string diff --git a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go index dcdba69225d..8b7f1fd0474 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -125,13 +125,13 @@ func (rl *respLogger) StacktraceWhen(pred StacktracePred) *respLogger { // StatusIsNot returns a StacktracePred which will cause stacktraces to be logged // for any status *not* in the given list. func StatusIsNot(statuses ...int) StacktracePred { + statusesNoTrace := map[int]bool{} + for _, s := range statuses { + statusesNoTrace[s] = true + } return func(status int) bool { - for _, s := range statuses { - if status == s { - return false - } - } - return true + _, ok := statusesNoTrace[status] + return !ok } } From 31ffa88a14340a587fb0544c60fcf422bd64f57c Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Wed, 8 May 2019 15:43:01 -0700 Subject: [PATCH 042/194] Update to go 1.12.5 --- build/build-image/cross/Dockerfile | 2 +- build/build-image/cross/VERSION | 2 +- build/root/WORKSPACE | 6 +++--- test/images/Makefile | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/build-image/cross/Dockerfile b/build/build-image/cross/Dockerfile index f0d0cd1f482..952c00bcb2c 100644 --- a/build/build-image/cross/Dockerfile +++ b/build/build-image/cross/Dockerfile @@ -15,7 +15,7 @@ # This file creates a standard build environment for building cross # platform go binary for the architecture kubernetes cares about. -FROM golang:1.12.4 +FROM golang:1.12.5 ENV GOARM 7 ENV KUBE_DYNAMIC_CROSSPLATFORMS \ diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index 3ff2cc2b66f..ffe90318adf 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.12.4-1 +v1.12.5-1 diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index e103e3b6da9..45f1ff382b6 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -42,8 +42,8 @@ http_archive( http_archive( name = "io_bazel_rules_go", - sha256 = "91b79f4758fd16f2c6426279ce00c1d2d8577d61c519db39675ed84657e1a95e", - urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.4/rules_go-0.17.4.tar.gz"), + sha256 = "87a089eabf919de29eb4711daa52ffbc4b22b2c045949c20503357a3cadf1037", + urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.5/rules_go-0.17.5.tar.gz"), ) load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") @@ -51,7 +51,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe go_rules_dependencies() go_register_toolchains( - go_version = "1.12.4", + go_version = "1.12.5", ) http_archive( diff --git a/test/images/Makefile b/test/images/Makefile index c5a1797e01f..f664b5fa261 100644 --- a/test/images/Makefile +++ b/test/images/Makefile @@ -17,7 +17,7 @@ include ../../hack/make-rules/Makefile.manifest REGISTRY ?= gcr.io/kubernetes-e2e-test-images GOARM=7 QEMUVERSION=v2.9.1 -GOLANG_VERSION=1.12.4 +GOLANG_VERSION=1.12.5 export ifndef WHAT From 4f3653f026ae5b0ce6e3fc606754743695857f59 Mon Sep 17 00:00:00 2001 From: John Belamaric Date: Wed, 8 May 2019 16:37:25 -0700 Subject: [PATCH 043/194] Add johnbelamaric as conformance test reviewer --- test/conformance/testdata/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/test/conformance/testdata/OWNERS b/test/conformance/testdata/OWNERS index b75318b1fb7..befcd9fa4da 100644 --- a/test/conformance/testdata/OWNERS +++ b/test/conformance/testdata/OWNERS @@ -9,6 +9,7 @@ reviewers: - spiffxp - timothysc - dims + - johnbelamaric approvers: - bgrant0607 - smarterclayton From cf17869b61b54b6d9150fa31f635782334e4a119 Mon Sep 17 00:00:00 2001 From: Han Kang Date: Wed, 8 May 2019 16:38:39 -0700 Subject: [PATCH 044/194] add sig-instrumentation to owners in component-base/metrics --- staging/src/k8s.io/component-base/metrics/OWNERS | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 staging/src/k8s.io/component-base/metrics/OWNERS diff --git a/staging/src/k8s.io/component-base/metrics/OWNERS b/staging/src/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 00000000000..44a1c2003fa --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- piosz +- brancz +reviewers: +- sig-instrumentation-pr-reviews +labels: +- sig/instrumentation From 4d4e4f4056af718b674ff7bc3dc7d0f8dee7c86b Mon Sep 17 00:00:00 2001 From: hui luo Date: Fri, 3 May 2019 15:12:20 -0700 Subject: [PATCH 045/194] Modify apimachinery,apps,instrumentation tests to import framework/log This is part of the transition to using framework/log instead of the Logf inside the framework package. This will help with import size/cycles when importing the framework or subpackages --- test/e2e/apimachinery/BUILD | 1 + test/e2e/apimachinery/aggregator.go | 11 +- test/e2e/apimachinery/chunking.go | 15 +- .../apimachinery/crd_conversion_webhook.go | 3 +- test/e2e/apimachinery/etcd_failure.go | 5 +- test/e2e/apimachinery/garbage_collector.go | 51 +++--- test/e2e/apimachinery/namespace.go | 3 +- test/e2e/apimachinery/resource_quota.go | 3 +- test/e2e/apimachinery/table_conversion.go | 13 +- test/e2e/apimachinery/watch.go | 3 +- test/e2e/apimachinery/webhook.go | 3 +- test/e2e/apps/BUILD | 1 + test/e2e/apps/cronjob.go | 3 +- test/e2e/apps/daemon_restart.go | 15 +- test/e2e/apps/daemon_set.go | 45 ++--- test/e2e/apps/deployment.go | 165 +++++++++--------- test/e2e/apps/disruption.go | 5 +- test/e2e/apps/network_partition.go | 29 +-- test/e2e/apps/rc.go | 9 +- test/e2e/apps/replica_set.go | 9 +- test/e2e/apps/statefulset.go | 27 +-- test/e2e/instrumentation/logging/BUILD | 1 + .../logging/elasticsearch/BUILD | 1 + .../logging/elasticsearch/kibana.go | 7 +- .../logging/elasticsearch/utils.go | 43 ++--- .../instrumentation/logging/generic_soak.go | 7 +- .../instrumentation/logging/stackdriver/BUILD | 1 + .../logging/stackdriver/basic.go | 3 +- .../logging/stackdriver/soak.go | 3 +- .../logging/stackdriver/utils.go | 33 ++-- test/e2e/instrumentation/logging/utils/BUILD | 1 + .../logging/utils/logging_agent.go | 5 +- .../logging/utils/logging_pod.go | 5 +- .../e2e/instrumentation/logging/utils/wait.go | 6 +- test/e2e/instrumentation/monitoring/BUILD | 1 + .../instrumentation/monitoring/accelerator.go | 7 +- .../instrumentation/monitoring/cadvisor.go | 3 +- .../monitoring/custom_metrics_deployments.go | 23 +-- .../monitoring/custom_metrics_stackdriver.go | 5 +- .../monitoring/metrics_grabber.go | 5 +- .../instrumentation/monitoring/prometheus.go | 9 +- .../instrumentation/monitoring/stackdriver.go | 13 +- 42 files changed, 321 insertions(+), 280 deletions(-) diff --git a/test/e2e/apimachinery/BUILD b/test/e2e/apimachinery/BUILD index d0482d0c57b..d2c037f795d 100644 --- a/test/e2e/apimachinery/BUILD +++ b/test/e2e/apimachinery/BUILD @@ -81,6 +81,7 @@ go_library( "//test/e2e/apps:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 48c4f7ce9f6..396254897ff 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -41,6 +41,7 @@ import ( rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" "k8s.io/utils/pointer" @@ -373,16 +374,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, "Waited %s for the sample-apiserver to be ready to handle requests.") if err != nil { currentAPIServiceJSON, _ := json.Marshal(currentAPIService) - framework.Logf("current APIService: %s", string(currentAPIServiceJSON)) + e2elog.Logf("current APIService: %s", string(currentAPIServiceJSON)) currentPodsJSON, _ := json.Marshal(currentPods) - framework.Logf("current pods: %s", string(currentPodsJSON)) + e2elog.Logf("current pods: %s", string(currentPodsJSON)) if currentPods != nil { for _, pod := range currentPods.Items { for _, container := range pod.Spec.Containers { logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name) - framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs) + e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs) } } } @@ -485,12 +486,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl } // pollTimed will call Poll but time how long Poll actually took. -// It will then framework.logf the msg with the duration of the Poll. +// It will then e2elog.Logf the msg with the duration of the Poll. // It is assumed that msg will contain one %s for the elapsed time. func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error { defer func(start time.Time, msg string) { elapsed := time.Since(start) - framework.Logf(msg, elapsed) + e2elog.Logf(msg, elapsed) }(time.Now(), msg) return wait.Poll(interval, timeout, condition) } diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index 47ac30f0a78..4a5b817abea 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -33,6 +33,7 @@ import ( "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) const numberOfTotalResources = 400 @@ -62,7 +63,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { if err == nil { return } - framework.Logf("Got an error creating template %d: %v", i, err) + e2elog.Logf("Got an error creating template %d: %v", i, err) } ginkgo.Fail("Unable to create template %d, exiting", i) }) @@ -81,7 +82,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1) list, err := client.List(opts) gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) - framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) + e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit)) if len(lastRV) == 0 { @@ -122,7 +123,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { firstToken := list.Continue firstRV := list.ResourceVersion gomega.Expect(int(list.RemainingItemCount) + len(list.Items)).To(gomega.BeNumerically("==", numberOfTotalResources)) - framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken) + e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken) ginkgo.By("retrieving the second page until the token expires") opts.Continue = firstToken @@ -130,13 +131,13 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) { _, err := client.List(opts) if err == nil { - framework.Logf("Token %s has not expired yet", firstToken) + e2elog.Logf("Token %s has not expired yet", firstToken) return false, nil } if err != nil && !errors.IsResourceExpired(err) { return false, err } - framework.Logf("got error %s", err) + e2elog.Logf("got error %s", err) status, ok := err.(errors.APIStatus) if !ok { return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err)) @@ -145,7 +146,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { if len(inconsistentToken) == 0 { return false, fmt.Errorf("expect non empty continue token") } - framework.Logf("Retrieved inconsistent continue %s", inconsistentToken) + e2elog.Logf("Retrieved inconsistent continue %s", inconsistentToken) return true, nil }) @@ -169,7 +170,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { list, err := client.List(opts) gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) gomega.Expect(int(list.RemainingItemCount) + len(list.Items) + found).To(gomega.BeNumerically("==", numberOfTotalResources)) - framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) + e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit)) gomega.Expect(list.ResourceVersion).To(gomega.Equal(lastRV)) for _, item := range list.Items { diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 6d0755671b7..3389e84f4c8 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -33,6 +33,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/utils/pointer" @@ -169,7 +170,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa }, }) if err != nil && errors.IsAlreadyExists(err) { - framework.Logf("role binding %s already exists", roleBindingCRDName) + e2elog.Logf("role binding %s already exists", roleBindingCRDName) } else { framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace) } diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index 760cca7e845..47f1bff7112 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -25,6 +25,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/apps" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -114,7 +115,7 @@ func checkExistingRCRecovers(f *framework.Framework) { options := metav1.ListOptions{LabelSelector: rcSelector.String()} pods, err := podClient.List(options) if err != nil { - framework.Logf("apiserver returned error, as expected before recovery: %v", err) + e2elog.Logf("apiserver returned error, as expected before recovery: %v", err) return false, nil } if len(pods.Items) == 0 { @@ -124,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) { err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) } - framework.Logf("apiserver has recovered") + e2elog.Logf("apiserver has recovered") return true, nil })) diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 14e242865e8..4893dbf9e36 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -38,6 +38,7 @@ import ( "k8s.io/apiserver/pkg/storage/names" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/metrics" "github.com/onsi/ginkgo" @@ -244,14 +245,14 @@ func gatherMetrics(f *framework.Framework) { var summary framework.TestDataSummary grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false) if err != nil { - framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") + e2elog.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { received, err := grabber.Grab() if err != nil { - framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") + e2elog.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summary = (*framework.MetricsForE2E)(&received) - framework.Logf(summary.PrintHumanReadable()) + e2elog.Logf(summary.PrintHumanReadable()) } } } @@ -653,15 +654,15 @@ var _ = SIGDescribe("Garbage collector", func() { _, err := rcClient.Get(rc.Name, metav1.GetOptions{}) if err == nil { pods, _ := podClient.List(metav1.ListOptions{}) - framework.Logf("%d pods remaining", len(pods.Items)) + e2elog.Logf("%d pods remaining", len(pods.Items)) count := 0 for _, pod := range pods.Items { if pod.ObjectMeta.DeletionTimestamp == nil { count++ } } - framework.Logf("%d pods has nil DeletionTimestamp", count) - framework.Logf("") + e2elog.Logf("%d pods has nil DeletionTimestamp", count) + e2elog.Logf("") return false, nil } if errors.IsNotFound(err) { @@ -673,10 +674,10 @@ var _ = SIGDescribe("Garbage collector", func() { if err2 != nil { framework.Failf("%v", err2) } - framework.Logf("%d remaining pods are:", len(pods.Items)) - framework.Logf("The ObjectMeta of the remaining pods are:") + e2elog.Logf("%d remaining pods are:", len(pods.Items)) + e2elog.Logf("The ObjectMeta of the remaining pods are:") for _, pod := range pods.Items { - framework.Logf("%#v", pod.ObjectMeta) + e2elog.Logf("%#v", pod.ObjectMeta) } framework.Failf("failed to delete the rc: %v", err) } @@ -755,15 +756,15 @@ var _ = SIGDescribe("Garbage collector", func() { _, err := rcClient.Get(rc1.Name, metav1.GetOptions{}) if err == nil { pods, _ := podClient.List(metav1.ListOptions{}) - framework.Logf("%d pods remaining", len(pods.Items)) + e2elog.Logf("%d pods remaining", len(pods.Items)) count := 0 for _, pod := range pods.Items { if pod.ObjectMeta.DeletionTimestamp == nil { count++ } } - framework.Logf("%d pods has nil DeletionTimestamp", count) - framework.Logf("") + e2elog.Logf("%d pods has nil DeletionTimestamp", count) + e2elog.Logf("") return false, nil } if errors.IsNotFound(err) { @@ -775,10 +776,10 @@ var _ = SIGDescribe("Garbage collector", func() { if err2 != nil { framework.Failf("%v", err2) } - framework.Logf("%d remaining pods are:", len(pods.Items)) - framework.Logf("ObjectMeta of remaining pods are:") + e2elog.Logf("%d remaining pods are:", len(pods.Items)) + e2elog.Logf("ObjectMeta of remaining pods are:") for _, pod := range pods.Items { - framework.Logf("%#v", pod.ObjectMeta) + e2elog.Logf("%#v", pod.ObjectMeta) } framework.Failf("failed to delete rc %s, err: %v", rc1Name, err) } @@ -830,15 +831,15 @@ var _ = SIGDescribe("Garbage collector", func() { patch1 := addRefPatch(pod3.Name, pod3.UID) pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) - framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) + e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) patch2 := addRefPatch(pod1.Name, pod1.UID) pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) - framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) + e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) patch3 := addRefPatch(pod2.Name, pod2.UID) pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) - framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) + e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) // delete one pod, should result in the deletion of all pods deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID)) @@ -858,7 +859,7 @@ var _ = SIGDescribe("Garbage collector", func() { } return false, nil }); err != nil { - framework.Logf("pods are %#v", pods.Items) + e2elog.Logf("pods are %#v", pods.Items) framework.Failf("failed to wait for all pods to be deleted: %v", err) } }) @@ -909,7 +910,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err != nil { framework.Failf("failed to create owner resource %q: %v", ownerName, err) } - framework.Logf("created owner resource %q", ownerName) + e2elog.Logf("created owner resource %q", ownerName) // Create a custom dependent resource. dependentName := names.SimpleNameGenerator.GenerateName("dependent") @@ -934,7 +935,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err != nil { framework.Failf("failed to create dependent resource %q: %v", dependentName, err) } - framework.Logf("created dependent resource %q", dependentName) + e2elog.Logf("created dependent resource %q", dependentName) // Delete the owner. background := metav1.DeletePropagationBackground @@ -948,8 +949,8 @@ var _ = SIGDescribe("Garbage collector", func() { _, err := resourceClient.Get(dependentName, metav1.GetOptions{}) return errors.IsNotFound(err), nil }); err != nil { - framework.Logf("owner: %#v", persistedOwner) - framework.Logf("dependent: %#v", persistedDependent) + e2elog.Logf("owner: %#v", persistedOwner) + e2elog.Logf("dependent: %#v", persistedDependent) framework.Failf("failed waiting for dependent resource %q to be deleted", dependentName) } @@ -1010,7 +1011,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err != nil { framework.Failf("failed to create owner resource %q: %v", ownerName, err) } - framework.Logf("created owner resource %q", ownerName) + e2elog.Logf("created owner resource %q", ownerName) // Create a custom dependent resource. dependentName := names.SimpleNameGenerator.GenerateName("dependent") @@ -1035,7 +1036,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err != nil { framework.Failf("failed to create dependent resource %q: %v", dependentName, err) } - framework.Logf("created dependent resource %q", dependentName) + e2elog.Logf("created dependent resource %q", dependentName) // Delete the owner and orphan the dependent. err = resourceClient.Delete(ownerName, getOrphanOptions()) diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index fccc7bce7af..98ebba51738 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -74,7 +75,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max } } if cnt > maxAllowedAfterDel { - framework.Logf("Remaining namespaces : %v", cnt) + e2elog.Logf("Remaining namespaces : %v", cnt) return false, nil } return true, nil diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 101b7134cd6..c05d32ffa3a 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -33,6 +33,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1591,7 +1592,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R // verify that the quota shows the expected used resource values for k, v := range used { if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) { - framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String()) + e2elog.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String()) return false, nil } } diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 61d6756d994..a8076eba414 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -35,6 +35,7 @@ import ( utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/printers" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -52,7 +53,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { c := f.ClientSet podName := "pod-1" - framework.Logf("Creating pod %s", podName) + e2elog.Logf("Creating pod %s", podName) _, err := c.CoreV1().Pods(ns).Create(newTablePod(podName)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns) @@ -60,7 +61,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { table := &metav1beta1.Table{} err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns) - framework.Logf("Table: %#v", table) + e2elog.Logf("Table: %#v", table) gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2)) gomega.Expect(len(table.Rows)).To(gomega.Equal(1)) @@ -71,7 +72,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { out := printTable(table) gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s")) gomega.Expect(out).To(gomega.MatchRegexp("\npod-1\\s")) - framework.Logf("Table:\n%s", out) + e2elog.Logf("Table:\n%s", out) }) ginkgo.It("should return chunks of table results for list calls", func() { @@ -97,7 +98,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { if err == nil { return } - framework.Logf("Got an error creating template %d: %v", i, err) + e2elog.Logf("Got an error creating template %d: %v", i, err) } ginkgo.Fail("Unable to create template %d, exiting", i) }) @@ -130,7 +131,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { table := &metav1beta1.Table{} err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get nodes in Table form across all namespaces") - framework.Logf("Table: %#v", table) + e2elog.Logf("Table: %#v", table) gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2)) gomega.Expect(len(table.Rows)).To(gomega.BeNumerically(">=", 1)) @@ -141,7 +142,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { out := printTable(table) gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s")) - framework.Logf("Table:\n%s", out) + e2elog.Logf("Table:\n%s", out) }) ginkgo.It("should return a 406 for a backend which does not implement metadata", func() { diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index 42e5bd6722f..43b569495e3 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -413,7 +414,7 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru select { case actual, ok := <-w.ResultChan(): if ok { - framework.Logf("Got : %v %v", actual.Type, actual.Object) + e2elog.Logf("Got : %v %v", actual.Type, actual.Object) } else { framework.Failf("Watch closed unexpectedly") } diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 952a3963e28..299d0a3df33 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -39,6 +39,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/utils/pointer" @@ -267,7 +268,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { }, }) if err != nil && errors.IsAlreadyExists(err) { - framework.Logf("role binding %s already exists", roleBindingName) + e2elog.Logf("role binding %s already exists", roleBindingName) } else { framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace) } diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 351da0bdbff..2b1d22ce6ca 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -63,6 +63,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/job:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 5e0d4202975..93e8cb620d0 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -423,7 +424,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error if len(aliveJobs) > 1 { return false, fmt.Errorf("More than one job is running %+v", jobs.Items) } else if len(aliveJobs) == 0 { - framework.Logf("Warning: Found 0 jobs in namespace %v", ns) + e2elog.Logf("Warning: Found 0 jobs in namespace %v", ns) return false, nil } return aliveJobs[0].Name != previousJobName, nil diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 94c498c968a..1fcbda516db 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -72,7 +73,7 @@ type RestartDaemonConfig struct { // NewRestartConfig creates a RestartDaemonConfig for the given node and daemon. func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig { if !framework.ProviderIs("gce") { - framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider) + e2elog.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider) } return &RestartDaemonConfig{ nodeName: nodeName, @@ -89,7 +90,7 @@ func (r *RestartDaemonConfig) String() string { // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout func (r *RestartDaemonConfig) waitUp() { - framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) + e2elog.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) healthzCheck := fmt.Sprintf( "curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort) @@ -99,12 +100,12 @@ func (r *RestartDaemonConfig) waitUp() { if result.Code == 0 { httpCode, err := strconv.Atoi(result.Stdout) if err != nil { - framework.Logf("Unable to parse healthz http return code: %v", err) + e2elog.Logf("Unable to parse healthz http return code: %v", err) } else if httpCode == 200 { return true, nil } } - framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v", + e2elog.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v", r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr) return false, nil }) @@ -113,7 +114,7 @@ func (r *RestartDaemonConfig) waitUp() { // kill sends a SIGTERM to the daemon func (r *RestartDaemonConfig) kill() { - framework.Logf("Killing %v", r) + e2elog.Logf("Killing %v", r) _, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider) framework.ExpectNoError(err) } @@ -301,7 +302,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { framework.ExpectNoError(err) preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if preRestarts != 0 { - framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) + e2elog.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) } for _, ip := range nodeIPs { restarter := NewRestartConfig( @@ -310,7 +311,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { } postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if postRestarts != preRestarts { - framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) + framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) } }) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 1b2be266e3b..70da1216c90 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/controller/daemon" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -78,14 +79,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { } } if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { - framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) + e2elog.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) } else { - framework.Logf("unable to dump daemonsets: %v", err) + e2elog.Logf("unable to dump daemonsets: %v", err) } if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { - framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) + e2elog.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) } else { - framework.Logf("unable to dump pods: %v", err) + e2elog.Logf("unable to dump pods: %v", err) } err = clearDaemonSetNodeLabels(f.ClientSet) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -148,7 +149,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ConformanceIt("should run and stop complex daemon", func() { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} - framework.Logf("Creating daemon %q with a node selector", dsName) + e2elog.Logf("Creating daemon %q with a node selector", dsName) ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector ds, err := c.AppsV1().DaemonSets(ns).Create(ds) @@ -195,7 +196,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.It("should run and stop complex daemon with node affinity", func() { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} - framework.Logf("Creating daemon %q with a node affinity", dsName) + e2elog.Logf("Creating daemon %q with a node affinity", dsName) ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.Affinity = &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ @@ -277,7 +278,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() { label := map[string]string{daemonsetNameLabel: dsName} - framework.Logf("Creating simple daemon set %s", dsName) + e2elog.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) @@ -326,7 +327,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() { label := map[string]string{daemonsetNameLabel: dsName} - framework.Logf("Creating simple daemon set %s", dsName) + e2elog.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) @@ -383,18 +384,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ConformanceIt("should rollback without unnecessary restarts", func() { schedulableNodes := framework.GetReadySchedulableNodesOrDie(c) gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") - framework.Logf("Create a RollingUpdate DaemonSet") + e2elog.Logf("Create a RollingUpdate DaemonSet") label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Check that daemon pods launch on every node of the cluster") + e2elog.Logf("Check that daemon pods launch on every node of the cluster") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") - framework.Logf("Update the DaemonSet to trigger a rollout") + e2elog.Logf("Update the DaemonSet to trigger a rollout") // We use a nonexistent image here, so that we make sure it won't finish newImage := "foo:non-existent" newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { @@ -428,13 +429,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { } gomega.Expect(len(newPods)).NotTo(gomega.Equal(0)) - framework.Logf("Roll back the DaemonSet before rollout is complete") + e2elog.Logf("Roll back the DaemonSet before rollout is complete") rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = image }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Make sure DaemonSet rollback is complete") + e2elog.Logf("Make sure DaemonSet rollback is complete") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -561,7 +562,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s return true, err } if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict { - framework.Logf("failed to update node due to resource version conflict") + e2elog.Logf("failed to update node due to resource version conflict") return false, nil } return false, err @@ -579,7 +580,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames return func() (bool, error) { podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { - framework.Logf("could not get the pod list: %v", err) + e2elog.Logf("could not get the pod list: %v", err) return false, nil } pods := podList.Items @@ -596,17 +597,17 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames nodesToPodCount[pod.Spec.NodeName]++ } } - framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount)) + e2elog.Logf("Number of nodes with available pods: %d", len(nodesToPodCount)) // Ensure that exactly 1 pod is running on all nodes in nodeNames. for _, nodeName := range nodeNames { if nodesToPodCount[nodeName] != 1 { - framework.Logf("Node %s is running more than one daemon pod", nodeName) + e2elog.Logf("Node %s is running more than one daemon pod", nodeName) return false, nil } } - framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount)) + e2elog.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount)) // Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in // nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any // other nodes. @@ -627,7 +628,7 @@ func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string { nodeNames := make([]string, 0) for _, node := range nodeList.Items { if !canScheduleOnNode(node, ds) { - framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints) + e2elog.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints) continue } nodeNames = append(nodeNames, node.Name) @@ -692,12 +693,12 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS } podImage := pod.Spec.Containers[0].Image if podImage != image { - framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage) + e2elog.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage) } else { nodesToUpdatedPodCount[pod.Spec.NodeName]++ } if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { - framework.Logf("Pod %s is not available", pod.Name) + e2elog.Logf("Pod %s is not available", pod.Name) unavailablePods++ } } @@ -736,7 +737,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st if len(historyList.Items) == numHistory { return true, nil } - framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) + e2elog.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) return false, nil } err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn) diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index ee963427347..5209a6930d1 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -40,6 +40,7 @@ import ( deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/replicaset" testutil "k8s.io/kubernetes/test/utils" utilpointer "k8s.io/utils/pointer" @@ -128,49 +129,49 @@ var _ = SIGDescribe("Deployment", func() { func failureTrap(c clientset.Interface, ns string) { deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { - framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) + e2elog.Logf("Could not list Deployments in namespace %q: %v", ns, err) return } for i := range deployments.Items { d := deployments.Items[i] - framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d)) + e2elog.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d)) _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1()) if err != nil { - framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) + e2elog.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) return } - testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf) + testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, e2elog.Logf) rsList := allOldRSs if newRS != nil { rsList = append(rsList, newRS) } - testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf) + testutil.LogPodsOfDeployment(c, &d, rsList, e2elog.Logf) } // We need print all the ReplicaSets if there are no Deployment object created if len(deployments.Items) != 0 { return } - framework.Logf("Log out all the ReplicaSets if there is no deployment created") + e2elog.Logf("Log out all the ReplicaSets if there is no deployment created") rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { - framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) + e2elog.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) return } for _, rs := range rss.Items { - framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs)) + e2elog.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs)) selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) if err != nil { - framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) + e2elog.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(rs.Namespace).List(options) if err != nil { - framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) + e2elog.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) continue } for _, pod := range podList.Items { - framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod)) + e2elog.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod)) } } } @@ -192,21 +193,21 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Deleting deployment %s", deploymentName) + e2elog.Logf("Deleting deployment %s", deploymentName) framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)) - framework.Logf("Ensuring deployment %s was deleted", deploymentName) + e2elog.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) + e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) gomega.Expect(err).NotTo(gomega.HaveOccurred()) options := metav1.ListOptions{LabelSelector: selector.String()} rss, err := c.AppsV1().ReplicaSets(ns).List(options) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) - framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) + e2elog.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) var pods *v1.PodList if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { pods, err = c.CoreV1().Pods(ns).List(options) @@ -230,7 +231,7 @@ func testDeleteDeployment(f *framework.Framework) { deploymentName := "test-new-deployment" podLabels := map[string]string{"name": NginxImageName} replicas := int32(1) - framework.Logf("Creating simple deployment %s", deploymentName) + e2elog.Logf("Creating simple deployment %s", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} deploy, err := c.AppsV1().Deployments(ns).Create(d) @@ -268,7 +269,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { annotations[deploymentutil.RevisionAnnotation] = rsRevision rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage) rs.Annotations = annotations - framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) + e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name) _, err := c.AppsV1().ReplicaSets(ns).Create(rs) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. @@ -277,22 +278,22 @@ func testRollingUpdateDeployment(f *framework.Framework) { // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-rolling-update-deployment" - framework.Logf("Creating deployment %q", deploymentName) + e2elog.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(d) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 3546343826724305833. - framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) + e2elog.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name) + e2elog.Logf("Ensuring status for deployment %q is the expected", deploy.Name) err = e2edeploy.WaitForDeploymentComplete(c, deploy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // There should be 1 old RS (nginx-controller, which is adopted) - framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) + e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) @@ -306,28 +307,28 @@ func testRecreateDeployment(f *framework.Framework) { // Create a deployment that brings up redis pods. deploymentName := "test-recreate-deployment" - framework.Logf("Creating deployment %q", deploymentName) + e2elog.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType) deployment, err := c.AppsV1().Deployments(ns).Create(d) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 1 - framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) + e2elog.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Waiting deployment %q to complete", deploymentName) + e2elog.Logf("Waiting deployment %q to complete", deploymentName) gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) // Update deployment to delete redis pods and bring up nginx pods. - framework.Logf("Triggering a new rollout for deployment %q", deploymentName) + e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = NginxImageName update.Spec.Template.Spec.Containers[0].Image = NginxImage }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) + e2elog.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) gomega.Expect(e2edeploy.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred()) } @@ -353,7 +354,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-cleanup-deployment" - framework.Logf("Creating deployment %s", deploymentName) + e2elog.Logf("Creating deployment %s", deploymentName) pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err) @@ -422,7 +423,7 @@ func testRolloverDeployment(f *framework.Framework) { gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) // Wait for replica set to become ready before adopting it. - framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) + e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName) gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred()) // Create a deployment to delete nginx pods and instead bring up redis-slave pods. @@ -431,7 +432,7 @@ func testRolloverDeployment(f *framework.Framework) { deploymentReplicas := int32(1) deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent" deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType - framework.Logf("Creating deployment %q", deploymentName) + e2elog.Logf("Creating deployment %q", deploymentName) newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{ MaxUnavailable: intOrStrP(0), @@ -444,15 +445,15 @@ func testRolloverDeployment(f *framework.Framework) { // Verify that the pods were scaled up and down as expected. deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) + e2elog.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) // Check if it's updated to revision 1 correctly - framework.Logf("Check revision of new replica set for deployment %q", deploymentName) + e2elog.Logf("Check revision of new replica set for deployment %q", deploymentName) err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Ensure that both replica sets have 1 created replica") + e2elog.Logf("Ensure that both replica sets have 1 created replica") oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ensureReplicas(oldRS, int32(1)) @@ -461,7 +462,7 @@ func testRolloverDeployment(f *framework.Framework) { ensureReplicas(newRS, int32(1)) // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods. - framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) + e2elog.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName @@ -470,20 +471,20 @@ func testRolloverDeployment(f *framework.Framework) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pod template update. - framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName) + e2elog.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName) err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 2 - framework.Logf("Wait for revision update of deployment %q to 2", deploymentName) + e2elog.Logf("Wait for revision update of deployment %q to 2", deploymentName) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Make sure deployment %q is complete", deploymentName) + e2elog.Logf("Make sure deployment %q is complete", deploymentName) err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Ensure that both old replica sets have no replicas") + e2elog.Logf("Ensure that both old replica sets have no replicas") oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ensureReplicas(oldRS, int32(0)) @@ -514,7 +515,7 @@ func testRollbackDeployment(f *framework.Framework) { deploymentReplicas := int32(1) deploymentImage := NginxImage deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType - framework.Logf("Creating deployment %s", deploymentName) + e2elog.Logf("Creating deployment %s", deploymentName) d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) createAnnotation := map[string]string{"action": "create", "author": "node"} d.Annotations = createAnnotation @@ -560,7 +561,7 @@ func testRollbackDeployment(f *framework.Framework) { // 3. Update the deploymentRollback to rollback to revision 1 revision := int64(1) - framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) + e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -583,7 +584,7 @@ func testRollbackDeployment(f *framework.Framework) { // 4. Update the deploymentRollback to rollback to last revision revision = 0 - framework.Logf("rolling back deployment %s to last revision", deploymentName) + e2elog.Logf("rolling back deployment %s to last revision", deploymentName) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -605,7 +606,7 @@ func testRollbackDeployment(f *framework.Framework) { // 5. Update the deploymentRollback to rollback to revision 10 // Since there's no revision 10 in history, it should stay as revision 4 revision = 10 - framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) + e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -623,7 +624,7 @@ func testRollbackDeployment(f *framework.Framework) { // 6. Update the deploymentRollback to rollback to revision 4 // Since it's already revision 4, it should be no-op revision = 4 - framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) + e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -642,11 +643,11 @@ func testRollbackDeployment(f *framework.Framework) { func randomScale(d *apps.Deployment, i int) { switch r := rand.Float32(); { case r < 0.3: - framework.Logf("%02d: scaling up", i) + e2elog.Logf("%02d: scaling up", i) *(d.Spec.Replicas)++ case r < 0.6: if *(d.Spec.Replicas) > 1 { - framework.Logf("%02d: scaling down", i) + e2elog.Logf("%02d: scaling down", i) *(d.Spec.Replicas)-- } } @@ -668,7 +669,7 @@ func testIterativeDeployments(f *framework.Framework) { d.Spec.ProgressDeadlineSeconds = &thirty d.Spec.RevisionHistoryLimit = &two d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero - framework.Logf("Creating deployment %q", deploymentName) + e2elog.Logf("Creating deployment %q", deploymentName) deployment, err := c.AppsV1().Deployments(ns).Create(d) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -681,7 +682,7 @@ func testIterativeDeployments(f *framework.Framework) { switch n := rand.Float32(); { case n < 0.2: // trigger a new deployment - framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) + e2elog.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) @@ -691,7 +692,7 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.4: // rollback to the previous version - framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) + e2elog.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { if update.Annotations == nil { update.Annotations = make(map[string]string) @@ -702,7 +703,7 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.6: // just scaling - framework.Logf("%02d: scaling deployment %q", i, deployment.Name) + e2elog.Logf("%02d: scaling deployment %q", i, deployment.Name) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { randomScale(update, i) }) @@ -711,14 +712,14 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.8: // toggling the deployment if deployment.Spec.Paused { - framework.Logf("%02d: pausing deployment %q", i, deployment.Name) + e2elog.Logf("%02d: pausing deployment %q", i, deployment.Name) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = true randomScale(update, i) }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - framework.Logf("%02d: resuming deployment %q", i, deployment.Name) + e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false randomScale(update, i) @@ -728,14 +729,14 @@ func testIterativeDeployments(f *framework.Framework) { default: // arbitrarily delete deployment pods - framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) + e2elog.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) gomega.Expect(err).NotTo(gomega.HaveOccurred()) opts := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(opts) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(podList.Items) == 0 { - framework.Logf("%02d: no deployment pods to delete", i) + e2elog.Logf("%02d: no deployment pods to delete", i) continue } for p := range podList.Items { @@ -743,7 +744,7 @@ func testIterativeDeployments(f *framework.Framework) { continue } name := podList.Items[p].Name - framework.Logf("%02d: deleting deployment pod %q", i, name) + e2elog.Logf("%02d: deleting deployment pod %q", i, name) err := c.CoreV1().Pods(ns).Delete(name, nil) if err != nil && !errors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -761,13 +762,13 @@ func testIterativeDeployments(f *framework.Framework) { }) } - framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) + e2elog.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) - framework.Logf("Waiting for deployment %q status", deploymentName) + e2elog.Logf("Waiting for deployment %q status", deploymentName) gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) - framework.Logf("Checking deployment %q for a complete condition", deploymentName) + e2elog.Logf("Checking deployment %q for a complete condition", deploymentName) gomega.Expect(e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred()) } @@ -776,7 +777,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { c := f.ClientSet deploymentName := "test-orphan-deployment" - framework.Logf("Creating Deployment %q", deploymentName) + e2elog.Logf("Creating Deployment %q", deploymentName) podLabels := map[string]string{"name": NginxImageName} replicas := int32(1) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) @@ -785,18 +786,18 @@ func testDeploymentsControllerRef(f *framework.Framework) { err = e2edeploy.WaitForDeploymentComplete(c, deploy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) + e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) rsList := listDeploymentReplicaSets(c, ns, podLabels) gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1)) - framework.Logf("Obtaining the ReplicaSet's UID") + e2elog.Logf("Obtaining the ReplicaSet's UID") orphanedRSUID := rsList.Items[0].UID - framework.Logf("Checking the ReplicaSet has the right controllerRef") + e2elog.Logf("Checking the ReplicaSet has the right controllerRef") err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) + e2elog.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) err = orphanDeploymentReplicaSets(c, deploy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -805,22 +806,22 @@ func testDeploymentsControllerRef(f *framework.Framework) { gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned") deploymentName = "test-adopt-deployment" - framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) + e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) deploy, err = c.AppsV1().Deployments(ns).Create(d) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2edeploy.WaitForDeploymentComplete(c, deploy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") + e2elog.Logf("Waiting for the ReplicaSet to have the right controllerRef") err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) + e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) rsList = listDeploymentReplicaSets(c, ns, podLabels) gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1)) - framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") + e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID)) } @@ -841,19 +842,19 @@ func testProportionalScalingDeployment(f *framework.Framework) { d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) - framework.Logf("Creating deployment %q", deploymentName) + e2elog.Logf("Creating deployment %q", deploymentName) deployment, err := c.AppsV1().Deployments(ns).Create(d) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Waiting for observed generation %d", deployment.Generation) + e2elog.Logf("Waiting for observed generation %d", deployment.Generation) gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. - framework.Logf("Waiting for all required pods to come up") + e2elog.Logf("Waiting for all required pods to come up") err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) - framework.Logf("Waiting for deployment %q to complete", deployment.Name) + e2elog.Logf("Waiting for deployment %q to complete", deployment.Name) gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) @@ -861,13 +862,13 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Update the deployment with a non-existent image so that the new replica set // will be blocked to simulate a partial rollout. - framework.Logf("Updating deployment %q with a non-existent image", deploymentName) + e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = "nginx:404" }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Waiting for observed generation %d", deployment.Generation) + e2elog.Logf("Waiting for observed generation %d", deployment.Generation) gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) // Checking state of first rollout's replicaset. @@ -876,15 +877,15 @@ func testProportionalScalingDeployment(f *framework.Framework) { // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. minAvailableReplicas := replicas - int32(maxUnavailable) - framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) + e2elog.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) // First rollout's replicaset should have .spec.replicas = 8 too. - framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) + e2elog.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) // The desired replicas wait makes sure that the RS controller has created expected number of pods. - framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) + e2elog.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) @@ -898,36 +899,36 @@ func testProportionalScalingDeployment(f *framework.Framework) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Second rollout's replicaset should have 0 available replicas. - framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") + e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0))) // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. newReplicas := replicas + int32(maxSurge) - minAvailableReplicas - framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) + e2elog.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred()) // The desired replicas wait makes sure that the RS controller has created expected number of pods. - framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) + e2elog.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Check the deployment's minimum availability. - framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) + e2elog.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) if deployment.Status.AvailableReplicas < minAvailableReplicas { gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) } // Scale the deployment to 30 replicas. newReplicas = int32(30) - framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) + e2elog.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) + e2elog.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) @@ -935,12 +936,12 @@ func testProportionalScalingDeployment(f *framework.Framework) { // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. - framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") + e2elog.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred()) // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. - framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") + e2elog.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred()) } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 4615207aaf7..645b0b87cd5 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -281,7 +282,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { return false, fmt.Errorf("pods is nil") } if len(pods.Items) < n { - framework.Logf("pods: %v < %v", len(pods.Items), n) + e2elog.Logf("pods: %v < %v", len(pods.Items), n) return false, nil } ready := 0 @@ -291,7 +292,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { } } if ready < n { - framework.Logf("running pods: %v < %v", ready, n) + e2elog.Logf("running pods: %v < %v", ready, n) return false, nil } return true, nil diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 256ea093387..371c39397b6 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -60,7 +61,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) { if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) { expected = true } else { - framework.Logf("Observed node ready status is NOT %v as expected", isReady) + e2elog.Logf("Observed node ready status is NOT %v as expected", isReady) } case <-timer: timeout = true @@ -96,9 +97,9 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod { func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage)) if err == nil { - framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) + e2elog.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { - framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) + e2elog.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) } return err } @@ -262,7 +263,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // pods on another node and that now the number of replicas is equal 'replicas'. ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { - framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) + e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) framework.ExpectNoError(err) @@ -271,7 +272,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) }) - framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) + e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } @@ -293,7 +294,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Spec.NodeName != node.Name { - framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) + e2elog.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) } } }) @@ -329,7 +330,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // pods on another node and that now the number of replicas is equal 'replicas + 1'. ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { - framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) + e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") @@ -338,7 +339,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) }) - framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) + e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } @@ -367,7 +368,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DumpDebugInfo(c, ns) } - framework.Logf("Deleting all stateful set in ns %v", ns) + e2elog.Logf("Deleting all stateful set in ns %v", ns) framework.DeleteAllStatefulSets(c, ns) }) @@ -406,12 +407,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver. // The grace period on the stateful pods is set to a value > 0. framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { - framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name) + e2elog.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name) err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute) gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") }) - framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) + e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } @@ -450,7 +451,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // running pods after the node-controller detects node unreachable. ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { - framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) + e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute) gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") @@ -459,7 +460,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) }) - framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) + e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } @@ -536,7 +537,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } } } - framework.Logf( + e2elog.Logf( "Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v", neverEvictedPods, maxTolerationTime, diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 0e6511f8082..41725c433f4 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -133,7 +134,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. - framework.Logf("Ensuring all pods for ReplicationController %q are running", name) + e2elog.Logf("Ensuring all pods for ReplicationController %q are running", name) running := int32(0) for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { @@ -149,7 +150,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri } } framework.ExpectNoError(err) - framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) + e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) running++ } @@ -159,7 +160,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri } // Verify that something is listening. - framework.Logf("Trying to dial the pod") + e2elog.Logf("Trying to dial the pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) @@ -178,7 +179,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { namespace := f.Namespace.Name name := "condition-test" - framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) + e2elog.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) quota := newPodQuota(name, "2") _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) framework.ExpectNoError(err) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index a97fb13b291..a892a2a04a9 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset" "github.com/onsi/ginkgo" @@ -122,7 +123,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s // Create a ReplicaSet for a service that serves its hostname. // The source for the Docker containter kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname - framework.Logf("Creating ReplicaSet %s", name) + e2elog.Logf("Creating ReplicaSet %s", name) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image) newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS) @@ -135,7 +136,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. - framework.Logf("Ensuring a pod for ReplicaSet %q is running", name) + e2elog.Logf("Ensuring a pod for ReplicaSet %q is running", name) running := int32(0) for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { @@ -151,7 +152,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s } } framework.ExpectNoError(err) - framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) + e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) running++ } @@ -161,7 +162,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s } // Verify that something is listening. - framework.Logf("Trying to dial the pod") + e2elog.Logf("Trying to dial the pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 4ea141d0697..eb7f5e9ef9c 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -34,6 +34,7 @@ import ( clientset "k8s.io/client-go/kubernetes" watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -86,7 +87,7 @@ var _ = SIGDescribe("StatefulSet", func() { if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DumpDebugInfo(c, ns) } - framework.Logf("Deleting all statefulset in ns %v", ns) + e2elog.Logf("Deleting all statefulset in ns %v", ns) framework.DeleteAllStatefulSets(c, ns) }) @@ -753,13 +754,13 @@ var _ = SIGDescribe("StatefulSet", func() { pod := event.Object.(*v1.Pod) switch event.Type { case watch.Deleted: - framework.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace) + e2elog.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace) if initialStatefulPodUID == "" { return false, nil } return true, nil } - framework.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.", + e2elog.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.", pod.Namespace, pod.Name, pod.UID, pod.Status.Phase) initialStatefulPodUID = pod.UID return false, nil @@ -836,7 +837,7 @@ var _ = SIGDescribe("StatefulSet", func() { if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DumpDebugInfo(c, ns) } - framework.Logf("Deleting all statefulset in ns %v", ns) + e2elog.Logf("Deleting all statefulset in ns %v", ns) framework.DeleteAllStatefulSets(c, ns) }) @@ -876,7 +877,7 @@ func kubectlExecWithRetries(args ...string) (out string) { if out, err = framework.RunKubectl(args...); err == nil { return } - framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) + e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) } framework.Failf("Failed to execute \"%v\" with retries: %v", args, err) return @@ -938,7 +939,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) { ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace) for k, v := range kv { cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v) - framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd)) + e2elog.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd)) } } @@ -969,12 +970,12 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string { func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet { m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns) - framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name) + e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name) for _, cmd := range []string{ "create database statefulset;", "use statefulset; create table foo (k varchar(20), v varchar(20));", } { - framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name))) + e2elog.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name))) } return m.ss } @@ -983,7 +984,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex) for k, v := range kv { cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v) - framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name)) + e2elog.Logf(m.mysqlExec(cmd, m.ss.Namespace, name)) } } @@ -1014,7 +1015,7 @@ func (m *redisTester) deploy(ns string) *apps.StatefulSet { func (m *redisTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex) for k, v := range kv { - framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name)) + e2elog.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name)) } } @@ -1039,12 +1040,12 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string { func (c *cockroachDBTester) deploy(ns string) *apps.StatefulSet { c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns) - framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name) + e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name) for _, cmd := range []string{ "CREATE DATABASE IF NOT EXISTS foo;", "CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);", } { - framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name))) + e2elog.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name))) } return c.ss } @@ -1053,7 +1054,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex) for k, v := range kv { cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v) - framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name)) + e2elog.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name)) } } func (c *cockroachDBTester) read(statefulPodIndex int, key string) string { diff --git a/test/e2e/instrumentation/logging/BUILD b/test/e2e/instrumentation/logging/BUILD index fed5defe4cd..5dd82e427fb 100644 --- a/test/e2e/instrumentation/logging/BUILD +++ b/test/e2e/instrumentation/logging/BUILD @@ -16,6 +16,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/config:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/elasticsearch:go_default_library", "//test/e2e/instrumentation/logging/stackdriver:go_default_library", diff --git a/test/e2e/instrumentation/logging/elasticsearch/BUILD b/test/e2e/instrumentation/logging/elasticsearch/BUILD index 57d971b0b93..c18b3639fd1 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/BUILD +++ b/test/e2e/instrumentation/logging/elasticsearch/BUILD @@ -20,6 +20,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index c32665aee12..135b02782a6 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "github.com/onsi/ginkgo" @@ -61,7 +62,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // being run as the first e2e test just after the e2e cluster has been created. err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil { - framework.Logf("Kibana is unreachable: %v", err) + e2elog.Logf("Kibana is unreachable: %v", err) return false, nil } return true, nil @@ -83,7 +84,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if err != nil { - framework.Logf("Failed to get services proxy request: %v", err) + e2elog.Logf("Failed to get services proxy request: %v", err) return false, nil } @@ -95,7 +96,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { Name("kibana-logging"). DoRaw() if err != nil { - framework.Logf("Proxy call to kibana-logging failed: %v", err) + e2elog.Logf("Proxy call to kibana-logging failed: %v", err) return false, nil } return true, nil diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index ecc30cda3d5..107369e112a 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/fields" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" ) @@ -54,7 +55,7 @@ func newEsLogProvider(f *framework.Framework) (*esLogProvider, error) { func (p *esLogProvider) Init() error { f := p.Framework // Check for the existence of the Elasticsearch service. - framework.Logf("Checking the Elasticsearch service exists.") + e2elog.Logf("Checking the Elasticsearch service exists.") s := f.ClientSet.CoreV1().Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. @@ -63,14 +64,14 @@ func (p *esLogProvider) Init() error { if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil { break } - framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) + e2elog.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) } if err != nil { return err } // Wait for the Elasticsearch pods to enter the running state. - framework.Logf("Checking to make sure the Elasticsearch pods are running") + e2elog.Logf("Checking to make sure the Elasticsearch pods are running") labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String() options := meta_v1.ListOptions{LabelSelector: labelSelector} pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) @@ -84,7 +85,7 @@ func (p *esLogProvider) Init() error { } } - framework.Logf("Checking to make sure we are talking to an Elasticsearch service.") + e2elog.Logf("Checking to make sure we are talking to an Elasticsearch service.") // Perform a few checks to make sure this looks like an Elasticsearch cluster. var statusCode int err = nil @@ -92,7 +93,7 @@ func (p *esLogProvider) Init() error { for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } // Query against the root URL for Elasticsearch. @@ -103,11 +104,11 @@ func (p *esLogProvider) Init() error { response.StatusCode(&statusCode) if err != nil { - framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) + e2elog.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) continue } if int(statusCode) != 200 { - framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) + e2elog.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) continue } break @@ -121,12 +122,12 @@ func (p *esLogProvider) Init() error { // Now assume we really are talking to an Elasticsearch instance. // Check the cluster health. - framework.Logf("Checking health of Elasticsearch service.") + e2elog.Logf("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } body, err = proxyRequest.Namespace(api.NamespaceSystem). @@ -140,17 +141,17 @@ func (p *esLogProvider) Init() error { health := make(map[string]interface{}) err := json.Unmarshal(body, &health) if err != nil { - framework.Logf("Bad json response from elasticsearch: %v", err) + e2elog.Logf("Bad json response from elasticsearch: %v", err) continue } statusIntf, ok := health["status"] if !ok { - framework.Logf("No status field found in cluster health response: %v", health) + e2elog.Logf("No status field found in cluster health response: %v", health) continue } status := statusIntf.(string) if status != "green" && status != "yellow" { - framework.Logf("Cluster health has bad status: %v", health) + e2elog.Logf("Cluster health has bad status: %v", health) continue } if err == nil && ok { @@ -174,12 +175,12 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - framework.Logf("Failed to get services proxy request: %v", errProxy) + e2elog.Logf("Failed to get services proxy request: %v", errProxy) return nil } query := fmt.Sprintf("kubernetes.pod_name:%s AND kubernetes.namespace_name:%s", name, f.Namespace.Name) - framework.Logf("Sending a search request to Elasticsearch with the following query: %s", query) + e2elog.Logf("Sending a search request to Elasticsearch with the following query: %s", query) // Ask Elasticsearch to return all the log lines that were tagged with the // pod name. Ask for ten times as many log lines because duplication is possible. @@ -191,26 +192,26 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { Param("size", strconv.Itoa(searchPageSize)). DoRaw() if err != nil { - framework.Logf("Failed to make proxy call to elasticsearch-logging: %v", err) + e2elog.Logf("Failed to make proxy call to elasticsearch-logging: %v", err) return nil } var response map[string]interface{} err = json.Unmarshal(body, &response) if err != nil { - framework.Logf("Failed to unmarshal response: %v", err) + e2elog.Logf("Failed to unmarshal response: %v", err) return nil } hits, ok := response["hits"].(map[string]interface{}) if !ok { - framework.Logf("response[hits] not of the expected type: %T", response["hits"]) + e2elog.Logf("response[hits] not of the expected type: %T", response["hits"]) return nil } h, ok := hits["hits"].([]interface{}) if !ok { - framework.Logf("Hits not of the expected type: %T", hits["hits"]) + e2elog.Logf("Hits not of the expected type: %T", hits["hits"]) return nil } @@ -219,13 +220,13 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { for _, e := range h { l, ok := e.(map[string]interface{}) if !ok { - framework.Logf("Element of hit not of expected type: %T", e) + e2elog.Logf("Element of hit not of expected type: %T", e) continue } source, ok := l["_source"].(map[string]interface{}) if !ok { - framework.Logf("_source not of the expected type: %T", l["_source"]) + e2elog.Logf("_source not of the expected type: %T", l["_source"]) continue } @@ -241,7 +242,7 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { continue } - framework.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source) + e2elog.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source) } return entries diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index a028ff9268c..c2262640e72 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -28,6 +28,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -61,14 +62,14 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti defer wg.Done() defer ginkgo.GinkgoRecover() wave := fmt.Sprintf("wave%v", strconv.Itoa(i)) - framework.Logf("Starting logging soak, wave = %v", wave) + e2elog.Logf("Starting logging soak, wave = %v", wave) RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime) - framework.Logf("Completed logging soak, wave %v", i) + e2elog.Logf("Completed logging soak, wave %v", i) }() // Niceness. time.Sleep(loggingSoak.TimeBetweenWaves) } - framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale) + e2elog.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale) wg.Wait() }) }) diff --git a/test/e2e/instrumentation/logging/stackdriver/BUILD b/test/e2e/instrumentation/logging/stackdriver/BUILD index effafb418da..d1bff9cd3ea 100644 --- a/test/e2e/instrumentation/logging/stackdriver/BUILD +++ b/test/e2e/instrumentation/logging/stackdriver/BUILD @@ -18,6 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/instrumentation/logging/stackdriver/basic.go b/test/e2e/instrumentation/logging/stackdriver/basic.go index 3524444e7eb..e98ab9ebe40 100644 --- a/test/e2e/instrumentation/logging/stackdriver/basic.go +++ b/test/e2e/instrumentation/logging/stackdriver/basic.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" @@ -155,7 +156,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd podName := fmt.Sprintf("synthlogger-%s", string(uuid.NewUUID())) err := utils.NewLoadLoggingPod(podName, "", 1, 1*time.Second).Start(f) if err != nil { - framework.Logf("Failed to create a logging pod: %v", err) + e2elog.Logf("Failed to create a logging pod: %v", err) } return false, nil }, stopCh) diff --git a/test/e2e/instrumentation/logging/stackdriver/soak.go b/test/e2e/instrumentation/logging/stackdriver/soak.go index 14df92bdcc1..d9dd44c2d9a 100644 --- a/test/e2e/instrumentation/logging/stackdriver/soak.go +++ b/test/e2e/instrumentation/logging/stackdriver/soak.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" @@ -85,7 +86,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd // Starting one pod on each node. for _, pod := range podsByRun[runIdx] { if err := pod.Start(f); err != nil { - framework.Logf("Failed to start pod: %v", err) + e2elog.Logf("Failed to start pod: %v", err) } } <-t.C diff --git a/test/e2e/instrumentation/logging/stackdriver/utils.go b/test/e2e/instrumentation/logging/stackdriver/utils.go index 27360474a83..98b203d928a 100644 --- a/test/e2e/instrumentation/logging/stackdriver/utils.go +++ b/test/e2e/instrumentation/logging/stackdriver/utils.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" "golang.org/x/oauth2/google" @@ -117,7 +118,7 @@ func ensureProjectHasSinkCapacity(sinksService *sd.ProjectsSinksService, project return err } if len(listResponse.Sinks) >= stackdriverSinkCountLimit { - framework.Logf("Reached Stackdriver sink limit. Deleting all sinks") + e2elog.Logf("Reached Stackdriver sink limit. Deleting all sinks") deleteSinks(sinksService, projectID, listResponse.Sinks) } return nil @@ -136,7 +137,7 @@ func deleteSinks(sinksService *sd.ProjectsSinksService, projectID string, sinks for _, sink := range sinks { sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name) if _, err := sinksService.Delete(sinkNameID).Do(); err != nil { - framework.Logf("Failed to delete LogSink: %v", err) + e2elog.Logf("Failed to delete LogSink: %v", err) } } } @@ -185,21 +186,21 @@ func (p *sdLogProvider) Cleanup() { sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, p.logSink.Name) sinksService := p.sdService.Projects.Sinks if _, err := sinksService.Delete(sinkNameID).Do(); err != nil { - framework.Logf("Failed to delete LogSink: %v", err) + e2elog.Logf("Failed to delete LogSink: %v", err) } } if p.subscription != nil { subsService := p.pubsubService.Projects.Subscriptions if _, err := subsService.Delete(p.subscription.Name).Do(); err != nil { - framework.Logf("Failed to delete PubSub subscription: %v", err) + e2elog.Logf("Failed to delete PubSub subscription: %v", err) } } if p.topic != nil { topicsService := p.pubsubService.Projects.Topics if _, err := topicsService.Delete(p.topic.Name).Do(); err != nil { - framework.Logf("Failed to delete PubSub topic: %v", err) + e2elog.Logf("Failed to delete PubSub topic: %v", err) } } } @@ -234,7 +235,7 @@ func (p *sdLogProvider) createSink(projectID, sinkName, topicName string) (*sd.L if err != nil { return nil, err } - framework.Logf("Using the following filter for log entries: %s", filter) + e2elog.Logf("Using the following filter for log entries: %s", filter) sink := &sd.LogSink{ Name: sinkName, Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName), @@ -280,20 +281,20 @@ func (p *sdLogProvider) authorizeSink() error { } func (p *sdLogProvider) waitSinkInit() error { - framework.Logf("Waiting for log sink to become operational") + e2elog.Logf("Waiting for log sink to become operational") return wait.Poll(1*time.Second, sinkStartupTimeout, func() (bool, error) { err := publish(p.pubsubService, p.topic, "embrace eternity") if err != nil { - framework.Logf("Failed to push message to PubSub due to %v", err) + e2elog.Logf("Failed to push message to PubSub due to %v", err) } messages, err := pullAndAck(p.pubsubService, p.subscription) if err != nil { - framework.Logf("Failed to pull messages from PubSub due to %v", err) + e2elog.Logf("Failed to pull messages from PubSub due to %v", err) return false, nil } if len(messages) > 0 { - framework.Logf("Sink %s is operational", p.logSink.Name) + e2elog.Logf("Sink %s is operational", p.logSink.Name) return true, nil } @@ -318,32 +319,32 @@ func (p *sdLogProvider) startPollingLogs() { func (p *sdLogProvider) pollLogsOnce() { messages, err := pullAndAck(p.pubsubService, p.subscription) if err != nil { - framework.Logf("Failed to pull messages from PubSub due to %v", err) + e2elog.Logf("Failed to pull messages from PubSub due to %v", err) return } for _, msg := range messages { logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data) if err != nil { - framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data) + e2elog.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data) continue } var sdLogEntry sd.LogEntry if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil { - framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err) + e2elog.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err) continue } name, ok := p.tryGetName(sdLogEntry) if !ok { - framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type) + e2elog.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type) continue } logEntry, err := convertLogEntry(sdLogEntry) if err != nil { - framework.Logf("Failed to parse Stackdriver LogEntry: %v", err) + e2elog.Logf("Failed to parse Stackdriver LogEntry: %v", err) continue } @@ -407,7 +408,7 @@ func pullAndAck(service *pubsub.Service, subs *pubsub.Subscription) ([]*pubsub.R if len(ids) > 0 { ackReq := &pubsub.AcknowledgeRequest{AckIds: ids} if _, err = subsService.Acknowledge(subs.Name, ackReq).Do(); err != nil { - framework.Logf("Failed to ack poll: %v", err) + e2elog.Logf("Failed to ack poll: %v", err) } } diff --git a/test/e2e/instrumentation/logging/utils/BUILD b/test/e2e/instrumentation/logging/utils/BUILD index b8c5ab7771e..8f81561673b 100644 --- a/test/e2e/instrumentation/logging/utils/BUILD +++ b/test/e2e/instrumentation/logging/utils/BUILD @@ -25,6 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/utils/image:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library", ], diff --git a/test/e2e/instrumentation/logging/utils/logging_agent.go b/test/e2e/instrumentation/logging/utils/logging_agent.go index 0673fed737b..7e7707d1bc4 100644 --- a/test/e2e/instrumentation/logging/utils/logging_agent.go +++ b/test/e2e/instrumentation/logging/utils/logging_agent.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/utils/integer" ) @@ -67,13 +68,13 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max for _, pod := range agentPods.Items { contStatuses := pod.Status.ContainerStatuses if len(contStatuses) == 0 { - framework.Logf("There are no container statuses for pod %s", pod.Name) + e2elog.Logf("There are no container statuses for pod %s", pod.Name) continue } restartCount := int(contStatuses[0].RestartCount) maxRestartCount = integer.IntMax(maxRestartCount, restartCount) - framework.Logf("Logging agent %s on node %s was restarted %d times", + e2elog.Logf("Logging agent %s on node %s was restarted %d times", pod.Name, pod.Spec.NodeName, restartCount) } diff --git a/test/e2e/instrumentation/logging/utils/logging_pod.go b/test/e2e/instrumentation/logging/utils/logging_pod.go index e13d0f75976..92d7638b909 100644 --- a/test/e2e/instrumentation/logging/utils/logging_pod.go +++ b/test/e2e/instrumentation/logging/utils/logging_pod.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -93,7 +94,7 @@ func (p *loadLoggingPod) Name() string { } func (p *loadLoggingPod) Start(f *framework.Framework) error { - framework.Logf("Starting load logging pod %s", p.name) + e2elog.Logf("Starting load logging pod %s", p.name) f.PodClient().Create(&api_v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: p.name, @@ -168,7 +169,7 @@ func (p *execLoggingPod) Name() string { } func (p *execLoggingPod) Start(f *framework.Framework) error { - framework.Logf("Starting repeating logging pod %s", p.name) + e2elog.Logf("Starting repeating logging pod %s", p.name) f.PodClient().Create(&api_v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: p.name, diff --git a/test/e2e/instrumentation/logging/utils/wait.go b/test/e2e/instrumentation/logging/utils/wait.go index ae4b1608bb8..391d8151266 100644 --- a/test/e2e/instrumentation/logging/utils/wait.go +++ b/test/e2e/instrumentation/logging/utils/wait.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) // LogChecker is an interface for an entity that can check whether logging @@ -195,14 +195,14 @@ func getFullIngestionTimeout(podsMap map[string]FiniteLoggingPod, slack float64) totalWant += want } if len(lossMsgs) > 0 { - framework.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n")) + e2elog.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n")) } lostFrac := 1 - float64(totalGot)/float64(totalWant) if lostFrac > slack { return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable", lostFrac*100, slack*100) } - framework.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%", + e2elog.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%", lostFrac*100, slack*100) return nil } diff --git a/test/e2e/instrumentation/monitoring/BUILD b/test/e2e/instrumentation/monitoring/BUILD index 0add13cd5cb..de6c7be921f 100644 --- a/test/e2e/instrumentation/monitoring/BUILD +++ b/test/e2e/instrumentation/monitoring/BUILD @@ -38,6 +38,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/config:go_default_library", "//test/e2e/framework/gpu:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/scheduling:go_default_library", diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go index 502fb2d5603..ac588073657 100644 --- a/test/e2e/instrumentation/monitoring/accelerator.go +++ b/test/e2e/instrumentation/monitoring/accelerator.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/gpu" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/utils/image" @@ -101,7 +102,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) { pollingFunction := checkForAcceleratorMetrics(projectID, gcmService, time.Now(), metricsMap) err = wait.Poll(pollFrequency, pollTimeout, pollingFunction) if err != nil { - framework.Logf("Missing metrics: %+v", metricsMap) + e2elog.Logf("Missing metrics: %+v", metricsMap) } framework.ExpectNoError(err) } @@ -119,9 +120,9 @@ func checkForAcceleratorMetrics(projectID string, gcmService *gcm.Service, start if len(ts) > 0 { counter = counter + 1 metricsMap[metric] = true - framework.Logf("Received %v timeseries for metric %v", len(ts), metric) + e2elog.Logf("Received %v timeseries for metric %v", len(ts), metric) } else { - framework.Logf("No timeseries for metric %v", metric) + e2elog.Logf("No timeseries for metric %v", metric) } } if counter < 3 { diff --git a/test/e2e/instrumentation/monitoring/cadvisor.go b/test/e2e/instrumentation/monitoring/cadvisor.go index f2079efcfd9..1af5e4b2a62 100644 --- a/test/e2e/instrumentation/monitoring/cadvisor.go +++ b/test/e2e/instrumentation/monitoring/cadvisor.go @@ -24,6 +24,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "github.com/onsi/ginkgo" @@ -71,7 +72,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) if maxRetries--; maxRetries <= 0 { break } - framework.Logf("failed to retrieve kubelet stats -\n %v", errors) + e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(cadvisor.SleepDuration) } framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index f30b3fef412..66c1f15566d 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -27,6 +27,7 @@ import ( rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) var ( @@ -264,20 +265,20 @@ func CreateAdapter(adapterDeploymentFile string) error { return err } stat, err := framework.RunKubectl("create", "-f", adapterURL) - framework.Logf(stat) + e2elog.Logf(stat) return err } func createClusterAdminBinding() error { stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account") if err != nil { - framework.Logf(stderr) + e2elog.Logf(stderr) return err } serviceAccount := strings.TrimSpace(stdout) - framework.Logf("current service account: %q", serviceAccount) + e2elog.Logf("current service account: %q", serviceAccount) stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount) - framework.Logf(stat) + e2elog.Logf(stat) return err } @@ -306,32 +307,32 @@ func CreateDescriptors(service *gcm.Service, projectID string) error { func CleanupDescriptors(service *gcm.Service, projectID string) { _, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, CustomMetricName)).Do() if err != nil { - framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err) + e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err) } _, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, UnusedMetricName)).Do() if err != nil { - framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err) + e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err) } } // CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments. func CleanupAdapter(adapterDeploymentFile string) { stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile) - framework.Logf(stat) + e2elog.Logf(stat) if err != nil { - framework.Logf("Failed to delete adapter deployments: %s", err) + e2elog.Logf("Failed to delete adapter deployments: %s", err) } err = exec.Command("rm", adapterDeploymentFile).Run() if err != nil { - framework.Logf("Failed to delete adapter deployment file: %s", err) + e2elog.Logf("Failed to delete adapter deployment file: %s", err) } cleanupClusterAdminBinding() } func cleanupClusterAdminBinding() { stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding) - framework.Logf(stat) + e2elog.Logf(stat) if err != nil { - framework.Logf("Failed to delete cluster admin binding: %s", err) + e2elog.Logf("Failed to delete cluster admin binding: %s", err) } } diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index fe056e90f46..ef658765a27 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -36,6 +36,7 @@ import ( cacheddiscovery "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/restmapper" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" customclient "k8s.io/metrics/pkg/client/custom_metrics" externalclient "k8s.io/metrics/pkg/client/external_metrics" ) @@ -257,11 +258,11 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) { err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{}) if err != nil { - framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err) + e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err) } err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{}) if err != nil { - framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err) + e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err) } } diff --git a/test/e2e/instrumentation/monitoring/metrics_grabber.go b/test/e2e/instrumentation/monitoring/metrics_grabber.go index afcc9039811..3f851264b9a 100644 --- a/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/metrics" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" @@ -71,7 +72,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { } } if !masterRegistered { - framework.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.") + e2elog.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.") return } response, err := grabber.GrabFromScheduler() @@ -92,7 +93,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { } } if !masterRegistered { - framework.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.") + e2elog.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.") return } response, err := grabber.GrabFromControllerManager() diff --git a/test/e2e/instrumentation/monitoring/prometheus.go b/test/e2e/instrumentation/monitoring/prometheus.go index 6ecd493da8d..2c46ef719ae 100644 --- a/test/e2e/instrumentation/monitoring/prometheus.go +++ b/test/e2e/instrumentation/monitoring/prometheus.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" ) @@ -171,7 +172,7 @@ func validateQueryReturnsCorrectValues(c clientset.Interface, query string, expe if len(samples) < minSamplesCount { return fmt.Errorf("Not enough samples for query '%v', got %v", query, samples) } - framework.Logf("Executed query '%v' returned %v", query, samples) + e2elog.Logf("Executed query '%v' returned %v", query, samples) for _, value := range samples { error := math.Abs(value-expectedValue) / expectedValue if error >= errorTolerance { @@ -238,7 +239,7 @@ func fetchPrometheusTargetDiscovery(c clientset.Interface) (TargetDiscovery, err Raw() var qres promTargetsResponse if err != nil { - framework.Logf(string(response)) + e2elog.Logf(string(response)) return qres.Data, err } err = json.Unmarshal(response, &qres) @@ -303,7 +304,7 @@ func queryPrometheus(c clientset.Interface, query string, start, end time.Time, Do(). Raw() if err != nil { - framework.Logf(string(response)) + e2elog.Logf(string(response)) return nil, err } var qres promQueryResponse @@ -369,7 +370,7 @@ func retryUntilSucceeds(validator func() error, timeout time.Duration) { if time.Since(startTime) >= timeout { break } - framework.Logf(err.Error()) + e2elog.Logf(err.Error()) time.Sleep(prometheusSleepBetweenAttempts) } framework.Failf(err.Error()) diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go index 1d7a7cd1129..01805628e67 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/test/e2e/instrumentation/monitoring/stackdriver.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" gcm "google.golang.org/api/monitoring/v3" @@ -83,7 +84,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per // and uncomment following lines (comment out the two lines above): (DON'T set the env var below) /* ts, err := google.DefaultTokenSource(oauth2.NoContext) - framework.Logf("Couldn't get application default credentials, %v", err) + e2elog.Logf("Couldn't get application default credentials, %v", err) if err != nil { framework.Failf("Error accessing application default credentials, %v", err) } @@ -110,7 +111,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU) err = wait.Poll(pollFrequency, pollTimeout, pollingFunction) if err != nil { - framework.Logf("Missing metrics: %+v\n", metricsMap) + e2elog.Logf("Missing metrics: %+v\n", metricsMap) } framework.ExpectNoError(err) } @@ -129,9 +130,9 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time, if len(ts) > 0 { counter = counter + 1 metricsMap[metric] = true - framework.Logf("Received %v timeseries for metric %v\n", len(ts), metric) + e2elog.Logf("Received %v timeseries for metric %v\n", len(ts), metric) } else { - framework.Logf("No timeseries for metric %v\n", metric) + e2elog.Logf("No timeseries for metric %v\n", metric) } var sum float64 @@ -148,10 +149,10 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time, } } sum = sum + *max.Value.DoubleValue - framework.Logf("Received %v points for metric %v\n", + e2elog.Logf("Received %v points for metric %v\n", len(t.Points), metric) } - framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit)) + e2elog.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit)) if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) { return false, nil } From e6287c61e6ad7f355a34c64d04538e13262902db Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 3 May 2019 15:51:03 -0700 Subject: [PATCH 046/194] Add OWNERS file for gce/manifests --- cluster/gce/manifests/OWNERS | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 cluster/gce/manifests/OWNERS diff --git a/cluster/gce/manifests/OWNERS b/cluster/gce/manifests/OWNERS new file mode 100644 index 00000000000..1cb12b5faa7 --- /dev/null +++ b/cluster/gce/manifests/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- tallclair +- MrHohn +approvers: +- tallclair +- MrHohn +labels: +- sig/gcp From c4df3a2c446f43b21d3dc27cebd892ab2a3a3129 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Wed, 8 May 2019 18:03:58 -0700 Subject: [PATCH 047/194] prevent `predicatesOrdering` from escaping from UT - sets `predicatesOrdering` back to original value in UT --- pkg/scheduler/algorithm/predicates/predicates.go | 5 ----- pkg/scheduler/algorithm/predicates/utils.go | 10 ++++++++++ pkg/scheduler/core/generic_scheduler_test.go | 10 ++++++---- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index c886b8fb259..be61a41902c 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -173,11 +173,6 @@ func Ordering() []string { return predicatesOrdering } -// SetPredicatesOrdering sets the ordering of predicates. -func SetPredicatesOrdering(names []string) { - predicatesOrdering = names -} - // GetPersistentVolumeInfo returns a persistent volume object by PV ID. func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { return c.Get(pvID) diff --git a/pkg/scheduler/algorithm/predicates/utils.go b/pkg/scheduler/algorithm/predicates/utils.go index 4080baf91ee..6bbbe0f6bdc 100644 --- a/pkg/scheduler/algorithm/predicates/utils.go +++ b/pkg/scheduler/algorithm/predicates/utils.go @@ -77,3 +77,13 @@ func portsConflict(existingPorts schedulernodeinfo.HostPortInfo, wantPorts []*v1 return false } + +// SetPredicatesOrderingDuringTest sets the predicatesOrdering to the specified +// value, and returns a function that restores the original value. +func SetPredicatesOrderingDuringTest(value []string) func() { + origVal := predicatesOrdering + predicatesOrdering = value + return func() { + predicatesOrdering = origVal + } +} diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 86edcbceb23..1877f2b52df 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -217,7 +217,7 @@ func TestSelectHost(t *testing.T) { } func TestGenericScheduler(t *testing.T) { - algorithmpredicates.SetPredicatesOrdering(order) + defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)() tests := []struct { name string predicates map[string]algorithmpredicates.FitPredicate @@ -479,7 +479,6 @@ func TestGenericScheduler(t *testing.T) { // makeScheduler makes a simple genericScheduler for testing. func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes []*v1.Node) *genericScheduler { - algorithmpredicates.SetPredicatesOrdering(order) cache := internalcache.New(time.Duration(0), wait.NeverStop) fwk, _ := framework.NewFramework(EmptyPluginRegistry, nil) for _, n := range nodes { @@ -503,6 +502,7 @@ func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes } func TestFindFitAllError(t *testing.T) { + defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)() predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate} nodes := makeNodeList([]string{"3", "2", "1"}) scheduler := makeScheduler(predicates, nodes) @@ -531,6 +531,7 @@ func TestFindFitAllError(t *testing.T) { } func TestFindFitSomeError(t *testing.T) { + defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)() predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate} nodes := makeNodeList([]string{"3", "2", "1"}) scheduler := makeScheduler(predicates, nodes) @@ -846,7 +847,7 @@ var startTime20190107 = metav1.Date(2019, 1, 7, 1, 1, 1, 0, time.UTC) // TestSelectNodesForPreemption tests selectNodesForPreemption. This test assumes // that podsFitsOnNode works correctly and is tested separately. func TestSelectNodesForPreemption(t *testing.T) { - algorithmpredicates.SetPredicatesOrdering(order) + defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)() tests := []struct { name string predicates map[string]algorithmpredicates.FitPredicate @@ -1005,7 +1006,7 @@ func TestSelectNodesForPreemption(t *testing.T) { // TestPickOneNodeForPreemption tests pickOneNodeForPreemption. func TestPickOneNodeForPreemption(t *testing.T) { - algorithmpredicates.SetPredicatesOrdering(order) + defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)() tests := []struct { name string predicates map[string]algorithmpredicates.FitPredicate @@ -1321,6 +1322,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) { } func TestPreempt(t *testing.T) { + defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)() failedPredMap := FailedPredicateMap{ "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrDiskConflict}, From 7b127311a4115994dda17204af89eff2683550e7 Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Wed, 8 May 2019 22:19:21 -0400 Subject: [PATCH 048/194] Make thread-safe the prebind callback of stateful plugin in scheduler framework. --- .../framework/plugins/examples/stateful/stateful.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/scheduler/framework/plugins/examples/stateful/stateful.go b/pkg/scheduler/framework/plugins/examples/stateful/stateful.go index e42022a48bb..30b2104a473 100644 --- a/pkg/scheduler/framework/plugins/examples/stateful/stateful.go +++ b/pkg/scheduler/framework/plugins/examples/stateful/stateful.go @@ -18,6 +18,7 @@ package stateful import ( "fmt" + "sync" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -31,6 +32,7 @@ import ( type MultipointExample struct { mpState map[int]string numRuns int + mu sync.RWMutex } var _ = framework.ReservePlugin(&MultipointExample{}) @@ -46,12 +48,16 @@ func (mp *MultipointExample) Name() string { // Reserve is the functions invoked by the framework at "reserve" extension point. func (mp *MultipointExample) Reserve(pc *framework.PluginContext, pod *v1.Pod, nodeName string) *framework.Status { + // Reserve is not called concurrently, and so we don't need to lock. mp.numRuns++ return nil } // Prebind is the functions invoked by the framework at "prebind" extension point. func (mp *MultipointExample) Prebind(pc *framework.PluginContext, pod *v1.Pod, nodeName string) *framework.Status { + // Prebind could be called concurrently for different pods. + mp.mu.Lock() + defer mp.mu.Unlock() mp.numRuns++ if pod == nil { return framework.NewStatus(framework.Error, "pod must not be nil") From 359d0e31ea020920478b397ff66d33fad491c1c9 Mon Sep 17 00:00:00 2001 From: Youbing Li Date: Tue, 7 May 2019 10:39:41 -0400 Subject: [PATCH 049/194] Fixes 77527 - Update Makefile about the usage of building unstripped binaries --- build/root/Makefile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/build/root/Makefile b/build/root/Makefile index c079444b589..f40fec8abc9 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -78,10 +78,11 @@ define ALL_HELP_INFO # make # make all # make all WHAT=cmd/kubelet GOFLAGS=-v -# make all GOGCFLAGS="-N -l" -# Note: Use the -N -l options to disable compiler optimizations an inlining. -# Using these build options allows you to subsequently use source -# debugging tools like delve. +# make all GOLDFLAGS="" +# Note: Specify GOLDFLAGS as an empty string for building unstripped binaries, which allows +# you to use code debugging tools like delve. When GOLDFLAGS is unspecified, it defaults +# to "-s -w" which strips debug information. Other flags that can be used for GOLDFLAGS +# are documented at https://golang.org/cmd/link/ endef .PHONY: all ifeq ($(PRINT_HELP),y) From 093027c8914b19754af3ece5a83899376efae4b8 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 8 May 2019 17:58:58 +0200 Subject: [PATCH 050/194] e2e/storage: ensure that also external storage classes are unique One previously undocumented expectation is that GetDynamicProvisionStorageClass can be called more than once per test and then each time returns a new, unique storage class. The in-memory implementation in driveroperations.go:GetStorageClass ensured that, but loading from a .yaml file didn't. This caused the multivolume tests to fail when applied to an already installed GCE driver with the -storage.testdriver parameter. --- test/e2e/storage/external/BUILD | 1 + test/e2e/storage/external/external.go | 4 ++++ test/e2e/storage/testsuites/testdriver.go | 3 +++ 3 files changed, 8 insertions(+) diff --git a/test/e2e/storage/external/BUILD b/test/e2e/storage/external/BUILD index 603442833be..3112e6c98e4 100644 --- a/test/e2e/storage/external/BUILD +++ b/test/e2e/storage/external/BUILD @@ -11,6 +11,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index d06c0ccf7a4..a539a54f347 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/storage/names" "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" @@ -242,6 +243,9 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.Pe sc, ok := items[0].(*storagev1.StorageClass) gomega.Expect(ok).To(gomega.BeTrue(), "storage class from %s", d.StorageClass.FromFile) + // Ensure that we can load more than once as required for + // GetDynamicProvisionStorageClass by adding a random suffix. + sc.Name = names.SimpleNameGenerator.GenerateName(sc.Name + "-") if fsType != "" { if sc.Parameters == nil { sc.Parameters = map[string]string{} diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index 3736390b747..b7dc23b2b19 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -88,6 +88,9 @@ type PreprovisionedPVTestDriver interface { type DynamicPVTestDriver interface { TestDriver // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume. + // The StorageClass must be created in the current test's namespace and have + // a unique name inside that namespace because GetDynamicProvisionStorageClass might + // be called more than once per test. // It will set fsType to the StorageClass, if TestDriver supports it. // It will return nil, if the TestDriver doesn't support it. GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass From b4a8beb0374dabe0a155717860364b3be8ef82c3 Mon Sep 17 00:00:00 2001 From: Matt Matejczyk Date: Thu, 9 May 2019 08:53:32 +0200 Subject: [PATCH 051/194] Revert #77552 now the #77580 has been merged The https://github.com/kubernetes/kubernetes/pull/77552 was a temporary workaround that is no longer needed now as https://github.com/kubernetes/kubernetes/pull/77580 got in. --- cluster/validate-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index 4c0788568b3..a2848149e73 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -104,7 +104,7 @@ while true; do # which are important for line counting. # Use trick from https://unix.stackexchange.com/a/383411 to avoid # newline truncation. - node=$(kubectl_retry get nodes --chunk-size=0 --no-headers; ret=$?; echo .; exit "$ret") && res="$?" || res="$?" + node=$(kubectl_retry get nodes --no-headers; ret=$?; echo .; exit "$ret") && res="$?" || res="$?" node="${node%.}" if [ "${res}" -ne "0" ]; then if [[ "${attempt}" -gt "${last_run:-$MAX_ATTEMPTS}" ]]; then From d46bd0dc7affaf3fcf91deb33a45211095aab7ad Mon Sep 17 00:00:00 2001 From: SataQiu Date: Thu, 9 May 2019 17:16:59 +0800 Subject: [PATCH 052/194] make KubernetesDir a true constant --- cmd/kubeadm/app/cmd/init.go | 2 +- cmd/kubeadm/app/cmd/upgrade/BUILD | 1 - cmd/kubeadm/app/cmd/upgrade/apply.go | 8 ++--- cmd/kubeadm/app/cmd/upgrade/apply_test.go | 6 +--- cmd/kubeadm/app/constants/constants.go | 18 ++++++++---- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 2 +- cmd/kubeadm/app/phases/upgrade/staticpods.go | 29 +++++++++++++------ .../app/phases/upgrade/staticpods_test.go | 8 ++--- 8 files changed, 43 insertions(+), 31 deletions(-) diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 3bf7ef874f6..9f4e3f1fdfc 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -334,7 +334,7 @@ func newInitData(cmd *cobra.Command, args []string, options *initOptions, out io // if dry running creates a temporary folder for saving kubeadm generated files dryRunDir := "" if options.dryRun { - if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm("kubeadm-init-dryrun"); err != nil { + if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-init-dryrun"); err != nil { return nil, errors.Wrap(err, "couldn't create a temporary directory") } } diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index c1b47c849a3..682b7683a33 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -58,7 +58,6 @@ go_test( embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/upgrade:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 7c5f7876429..e56797686b5 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -238,14 +238,14 @@ func PerformControlPlaneUpgrade(flags *applyFlags, client clientset.Interface, w } // GetPathManagerForUpgrade returns a path manager properly configured for the given InitConfiguration. -func GetPathManagerForUpgrade(internalcfg *kubeadmapi.InitConfiguration, etcdUpgrade bool) (upgrade.StaticPodPathManager, error) { +func GetPathManagerForUpgrade(kubernetesDir string, internalcfg *kubeadmapi.InitConfiguration, etcdUpgrade bool) (upgrade.StaticPodPathManager, error) { isHAEtcd := etcdutil.CheckConfigurationIsHA(&internalcfg.Etcd) - return upgrade.NewKubeStaticPodPathManagerUsingTempDirs(constants.GetStaticPodDirectory(), true, etcdUpgrade && !isHAEtcd) + return upgrade.NewKubeStaticPodPathManagerUsingTempDirs(kubernetesDir, true, etcdUpgrade && !isHAEtcd) } // PerformStaticPodUpgrade performs the upgrade of the control plane components for a static pod hosted cluster func PerformStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter, internalcfg *kubeadmapi.InitConfiguration, etcdUpgrade, renewCerts bool) error { - pathManager, err := GetPathManagerForUpgrade(internalcfg, etcdUpgrade) + pathManager, err := GetPathManagerForUpgrade(constants.KubernetesDir, internalcfg, etcdUpgrade) if err != nil { return err } @@ -257,7 +257,7 @@ func PerformStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter // DryRunStaticPodUpgrade fakes an upgrade of the control plane func DryRunStaticPodUpgrade(internalcfg *kubeadmapi.InitConfiguration) error { - dryRunManifestDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgrade-dryrun") + dryRunManifestDir, err := constants.CreateTempDirForKubeadm("", "kubeadm-upgrade-dryrun") if err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/upgrade/apply_test.go b/cmd/kubeadm/app/cmd/upgrade/apply_test.go index 5df2204252c..4f957651327 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply_test.go @@ -22,7 +22,6 @@ import ( "testing" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) func TestSessionIsInteractive(t *testing.T) { @@ -114,14 +113,11 @@ func TestGetPathManagerForUpgrade(t *testing.T) { if err != nil { t.Fatalf("unexpected error making temporary directory: %v", err) } - oldK8sDir := constants.KubernetesDir - constants.KubernetesDir = tmpdir defer func() { - constants.KubernetesDir = oldK8sDir os.RemoveAll(tmpdir) }() - pathmgr, err := GetPathManagerForUpgrade(test.cfg, test.etcdUpgrade) + pathmgr, err := GetPathManagerForUpgrade(tmpdir, test.cfg, test.etcdUpgrade) if err != nil { t.Fatalf("unexpected error creating path manager: %v", err) } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 0870de5ce62..96500595173 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -33,11 +33,9 @@ import ( "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" ) -// KubernetesDir is the directory Kubernetes owns for storing various configuration files -// This semi-constant MUST NOT be modified during runtime. It's a variable solely for use in unit testing. -var KubernetesDir = "/etc/kubernetes" - const ( + // KubernetesDir is the directory Kubernetes owns for storing various configuration files + KubernetesDir = "/etc/kubernetes" // ManifestsSubDirName defines directory name to store manifests ManifestsSubDirName = "manifests" // TempDirForKubeadm defines temporary directory for kubeadm @@ -448,8 +446,12 @@ func AddSelfHostedPrefix(componentName string) string { } // CreateTempDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp (not using /tmp as that would potentially be dangerous) -func CreateTempDirForKubeadm(dirName string) (string, error) { +func CreateTempDirForKubeadm(kubernetesDir, dirName string) (string, error) { tempDir := path.Join(KubernetesDir, TempDirForKubeadm) + if len(kubernetesDir) != 0 { + tempDir = path.Join(kubernetesDir, TempDirForKubeadm) + } + // creates target folder if not already exists if err := os.MkdirAll(tempDir, 0700); err != nil { return "", errors.Wrapf(err, "failed to create directory %q", tempDir) @@ -463,8 +465,12 @@ func CreateTempDirForKubeadm(dirName string) (string, error) { } // CreateTimestampDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp formatted with the current date -func CreateTimestampDirForKubeadm(dirName string) (string, error) { +func CreateTimestampDirForKubeadm(kubernetesDir, dirName string) (string, error) { tempDir := path.Join(KubernetesDir, TempDirForKubeadm) + if len(kubernetesDir) != 0 { + tempDir = path.Join(kubernetesDir, TempDirForKubeadm) + } + // creates target folder if not already exists if err := os.MkdirAll(tempDir, 0700); err != nil { return "", errors.Wrapf(err, "failed to create directory %q", tempDir) diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 09fd9e5fb0e..ae9a0483f47 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -180,7 +180,7 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon // GetKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not. func GetKubeletDir(dryRun bool) (string, error) { if dryRun { - return kubeadmconstants.CreateTempDirForKubeadm("kubeadm-upgrade-dryrun") + return kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-upgrade-dryrun") } return kubeadmconstants.KubeletRunDirectory, nil } diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index db9537655fa..754ee78ca95 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -19,6 +19,7 @@ package upgrade import ( "fmt" "os" + "path/filepath" "strings" "time" @@ -47,6 +48,8 @@ const ( type StaticPodPathManager interface { // MoveFile should move a file from oldPath to newPath MoveFile(oldPath, newPath string) error + // KubernetesDir is the directory Kubernetes owns for storing various configuration files + KubernetesDir() string // RealManifestPath gets the file path for the component in the "real" static pod manifest directory used by the kubelet RealManifestPath(component string) string // RealManifestDir should point to the static pod manifest directory used by the kubelet @@ -67,6 +70,7 @@ type StaticPodPathManager interface { // KubeStaticPodPathManager is a real implementation of StaticPodPathManager that is used when upgrading a static pod cluster type KubeStaticPodPathManager struct { + kubernetesDir string realManifestDir string tempManifestDir string backupManifestDir string @@ -77,9 +81,10 @@ type KubeStaticPodPathManager struct { } // NewKubeStaticPodPathManager creates a new instance of KubeStaticPodPathManager -func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string, keepManifestDir, keepEtcdDir bool) StaticPodPathManager { +func NewKubeStaticPodPathManager(kubernetesDir, tempDir, backupDir, backupEtcdDir string, keepManifestDir, keepEtcdDir bool) StaticPodPathManager { return &KubeStaticPodPathManager{ - realManifestDir: realDir, + kubernetesDir: kubernetesDir, + realManifestDir: filepath.Join(kubernetesDir, constants.ManifestsSubDirName), tempManifestDir: tempDir, backupManifestDir: backupDir, backupEtcdDir: backupEtcdDir, @@ -89,21 +94,22 @@ func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir stri } // NewKubeStaticPodPathManagerUsingTempDirs creates a new instance of KubeStaticPodPathManager with temporary directories backing it -func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string, saveManifestsDir, saveEtcdDir bool) (StaticPodPathManager, error) { - upgradedManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgraded-manifests") +func NewKubeStaticPodPathManagerUsingTempDirs(kubernetesDir string, saveManifestsDir, saveEtcdDir bool) (StaticPodPathManager, error) { + + upgradedManifestsDir, err := constants.CreateTempDirForKubeadm(kubernetesDir, "kubeadm-upgraded-manifests") if err != nil { return nil, err } - backupManifestsDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-manifests") + backupManifestsDir, err := constants.CreateTimestampDirForKubeadm(kubernetesDir, "kubeadm-backup-manifests") if err != nil { return nil, err } - backupEtcdDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-etcd") + backupEtcdDir, err := constants.CreateTimestampDirForKubeadm(kubernetesDir, "kubeadm-backup-etcd") if err != nil { return nil, err } - return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir, saveManifestsDir, saveEtcdDir), nil + return NewKubeStaticPodPathManager(kubernetesDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir, saveManifestsDir, saveEtcdDir), nil } // MoveFile should move a file from oldPath to newPath @@ -111,6 +117,11 @@ func (spm *KubeStaticPodPathManager) MoveFile(oldPath, newPath string) error { return os.Rename(oldPath, newPath) } +// KubernetesDir should point to the directory Kubernetes owns for storing various configuration files +func (spm *KubeStaticPodPathManager) KubernetesDir() string { + return spm.kubernetesDir +} + // RealManifestPath gets the file path for the component in the "real" static pod manifest directory used by the kubelet func (spm *KubeStaticPodPathManager) RealManifestPath(component string) string { return constants.GetStaticPodFilepath(component, spm.realManifestDir) @@ -202,7 +213,7 @@ func upgradeComponent(component string, renewCerts bool, waiter apiclient.Waiter // if certificate renewal should be performed if renewCerts { // renew all the certificates used by the current component - if err := renewCertsByComponent(cfg, constants.KubernetesDir, component); err != nil { + if err := renewCertsByComponent(cfg, pathMgr.KubernetesDir(), component); err != nil { return rollbackOldManifests(recoverManifests, errors.Wrapf(err, "failed to renew certificates for component %q", component), pathMgr, recoverEtcd) } } @@ -452,7 +463,7 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, if renewCerts { // renew the certificate embedded in the admin.conf file - err := renewEmbeddedCertsByName(cfg, constants.KubernetesDir, constants.AdminKubeConfigFileName) + err := renewEmbeddedCertsByName(cfg, pathMgr.KubernetesDir(), constants.AdminKubeConfigFileName) if err != nil { return rollbackOldManifests(recoverManifests, errors.Wrapf(err, "failed to upgrade the %s certificates", constants.AdminKubeConfigFileName), pathMgr, false) } diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 64e6e4fcf3a..33406c0450c 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -468,7 +468,7 @@ func TestStaticPodControlPlane(t *testing.T) { t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err) } defer os.RemoveAll(pathMgr.(*fakeStaticPodPathManager).KubernetesDir()) - constants.KubernetesDir = pathMgr.(*fakeStaticPodPathManager).KubernetesDir() + tmpKubernetesDir := pathMgr.(*fakeStaticPodPathManager).KubernetesDir() tempCertsDir, err := ioutil.TempDir("", "kubeadm-certs") if err != nil { @@ -505,7 +505,7 @@ func TestStaticPodControlPlane(t *testing.T) { if rt.skipKubeConfig == kubeConfig { continue } - if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, constants.KubernetesDir, oldcfg); err != nil { + if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpKubernetesDir, oldcfg); err != nil { t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err) } } @@ -639,7 +639,7 @@ func TestCleanupDirs(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - realManifestDir, cleanup := getTempDir(t, "realManifestDir") + realKubernetesDir, cleanup := getTempDir(t, "realKubernetesDir") defer cleanup() tempManifestDir, cleanup := getTempDir(t, "tempManifestDir") @@ -651,7 +651,7 @@ func TestCleanupDirs(t *testing.T) { backupEtcdDir, cleanup := getTempDir(t, "backupEtcdDir") defer cleanup() - mgr := NewKubeStaticPodPathManager(realManifestDir, tempManifestDir, backupManifestDir, backupEtcdDir, test.keepManifest, test.keepEtcd) + mgr := NewKubeStaticPodPathManager(realKubernetesDir, tempManifestDir, backupManifestDir, backupEtcdDir, test.keepManifest, test.keepEtcd) err := mgr.CleanupDirs() if err != nil { t.Errorf("unexpected error cleaning up: %v", err) From d8a9dfacbf80b797451248c4d9f9916b08238011 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 29 Apr 2019 12:07:56 +0200 Subject: [PATCH 053/194] apiextensions: add structural x-kubernetes-* fields to validation schemas --- api/api-rules/violation_exceptions.list | 3 ++ .../apis/apiextensions/types_jsonschema.go | 30 ++++++++++++++++++ .../apiextensions/v1beta1/types_jsonschema.go | 31 +++++++++++++++++++ .../apiextensions/validation/validation.go | 2 +- .../pkg/apiserver/validation/validation.go | 10 ++++++ 5 files changed, 75 insertions(+), 1 deletion(-) diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index 4fae72c7a94..70e1fa40f6f 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -30,6 +30,9 @@ API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiexten API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSON,Raw API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,Ref API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,Schema +API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XEmbeddedResource +API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XIntOrString +API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XPreserveUnknownFields API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrArray,JSONSchemas API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrArray,Schema API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrBool,Allows diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index af78c34fb6e..e0cba964731 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -55,6 +55,36 @@ type JSONSchemaProps struct { Definitions JSONSchemaDefinitions ExternalDocs *ExternalDocumentation Example *JSON + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + XPreserveUnknownFields bool + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. Both ObjectMeta and TypeMeta + // are validated automatically. x-kubernetes-preserve-unknown-fields + // must be true. + XEmbeddedResource bool + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool } // JSON represents any valid JSON value. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 54c0a4ae13f..84f26e600af 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -55,6 +55,37 @@ type JSONSchemaProps struct { ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + XPreserveUnknownFields bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` } // JSON represents any valid JSON value. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index 6e4c17a7cc9..da0bcfacff6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -786,7 +786,7 @@ func validateSimpleJSONPath(s string, fldPath *field.Path) field.ErrorList { return allErrs } -var allowedFieldsAtRootSchema = []string{"Description", "Type", "Format", "Title", "Maximum", "ExclusiveMaximum", "Minimum", "ExclusiveMinimum", "MaxLength", "MinLength", "Pattern", "MaxItems", "MinItems", "UniqueItems", "MultipleOf", "Required", "Items", "Properties", "ExternalDocs", "Example"} +var allowedFieldsAtRootSchema = []string{"Description", "Type", "Format", "Title", "Maximum", "ExclusiveMaximum", "Minimum", "ExclusiveMinimum", "MaxLength", "MinLength", "Pattern", "MaxItems", "MinItems", "UniqueItems", "MultipleOf", "Required", "Items", "Properties", "ExternalDocs", "Example", "XPreserveUnknownFields"} func allowedAtRootSchema(field string) bool { for _, v := range allowedFieldsAtRootSchema { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go index 8e2b4ee9ea0..6557d88317d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go @@ -194,6 +194,16 @@ func ConvertJSONSchemaPropsWithPostProcess(in *apiextensions.JSONSchemaProps, ou } } + if in.XPreserveUnknownFields { + out.VendorExtensible.AddExtension("x-kubernetes-preserve-unknown-fields", true) + } + if in.XEmbeddedResource { + out.VendorExtensible.AddExtension("x-kubernetes-embedded-resource", true) + } + if in.XIntOrString { + out.VendorExtensible.AddExtension("x-kubernetes-int-or-string", true) + } + return nil } From 2b19a4db9e5587fced16a0f33b90cba4f41ea4da Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Sun, 28 Apr 2019 21:00:51 +0800 Subject: [PATCH 054/194] fix cmd/kube-controller-manager's golint failures --- .../app/controllermanager.go | 13 +++++++++++-- .../app/import_known_versions.go | 3 ++- cmd/kube-controller-manager/app/plugins.go | 1 + hack/.golint_failures | 1 - .../admissionwebhook/broken_webhook_test.go | 2 +- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 447b216b7b3..9978079f41b 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -72,16 +72,19 @@ import ( ) const ( - // Jitter used when starting controller managers + // ControllerStartJitter is the Jitter used when starting controller managers ControllerStartJitter = 1.0 // ConfigzName is the name used for register kube-controller manager /configz, same with GroupName. ConfigzName = "kubecontrollermanager.config.k8s.io" ) +// ControllerLoopMode is the kube-controller-manager's mode of running controller loops that are cloud provider dependent type ControllerLoopMode int const ( + // IncludeCloudLoops means the kube-controller-manager include the controller loops that are cloud provider dependent IncludeCloudLoops ControllerLoopMode = iota + // ExternalLoops means the kube-controller-manager exclude the controller loops that are cloud provider dependent ExternalLoops ) @@ -284,6 +287,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error { panic("unreachable") } +// ControllerContext defines the context object for controller type ControllerContext struct { // ClientBuilder will provide a client for this controller to use ClientBuilder controller.ControllerClientBuilder @@ -328,6 +332,7 @@ type ControllerContext struct { ResyncPeriod func() time.Duration } +// IsControllerEnabled checks if the context's controllers enabled or not func (c ControllerContext) IsControllerEnabled(name string) bool { return genericcontrollermanager.IsControllerEnabled(name, ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers) } @@ -337,6 +342,7 @@ func (c ControllerContext) IsControllerEnabled(name string) bool { // The bool indicates whether the controller was enabled. type InitFunc func(ctx ControllerContext) (debuggingHandler http.Handler, enabled bool, err error) +// KnownControllers returns all known controllers's name func KnownControllers() []string { ret := sets.StringKeySet(NewControllerInitializers(IncludeCloudLoops)) @@ -351,6 +357,7 @@ func KnownControllers() []string { return ret.List() } +// ControllersDisabledByDefault is the set of controllers which is disabled by default var ControllersDisabledByDefault = sets.NewString( "bootstrapsigner", "tokencleaner", @@ -405,8 +412,9 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc return controllers } +// GetAvailableResources gets the map which contains all available resources of the apiserver // TODO: In general, any controller checking this needs to be dynamic so -// users don't have to restart their controller manager if they change the apiserver. +// users don't have to restart their controller manager if they change the apiserver. // Until we get there, the structure here needs to be exposed for the construction of a proper ControllerContext. func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) { client := clientBuilder.ClientOrDie("controller-discovery") @@ -484,6 +492,7 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien return ctx, nil } +// StartControllers starts a set of controllers with a specified ControllerContext func StartControllers(ctx ControllerContext, startSATokenController InitFunc, controllers map[string]InitFunc, unsecuredMux *mux.PathRecorderMux) error { // Always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest // If this fails, just return here and fail since other controllers won't be able to get credentials. diff --git a/cmd/kube-controller-manager/app/import_known_versions.go b/cmd/kube-controller-manager/app/import_known_versions.go index f7f3cb894ec..e92bd0dbe31 100644 --- a/cmd/kube-controller-manager/app/import_known_versions.go +++ b/cmd/kube-controller-manager/app/import_known_versions.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package app imports the API groups that the client will support // TODO: Remove this file when namespace controller and garbage collector // stops using legacyscheme.Registry.RESTMapper() package app -// These imports are the API groups the client will support. import ( + // These imports are the API groups the client will support. _ "k8s.io/kubernetes/pkg/apis/apps/install" _ "k8s.io/kubernetes/pkg/apis/authentication/install" _ "k8s.io/kubernetes/pkg/apis/authorization/install" diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index 0c9f8b3cf81..ff66bfed58c 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -27,6 +27,7 @@ import ( // Cloud providers cloudprovider "k8s.io/cloud-provider" + // ensure the cloud providers are installed _ "k8s.io/kubernetes/pkg/cloudprovider/providers" // Volume plugins "k8s.io/kubernetes/pkg/volume" diff --git a/hack/.golint_failures b/hack/.golint_failures index c42cebb1350..9ebf50f9bf4 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -1,6 +1,5 @@ cmd/cloud-controller-manager/app/apis/config/v1alpha1 cmd/kube-apiserver/app -cmd/kube-controller-manager/app cmd/kubeadm/app/apis/kubeadm/v1beta1 cmd/kubeadm/app/apis/kubeadm/v1beta2 pkg/apis/abac/latest diff --git a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go index 7a78ca2b293..7a08d4cd4da 100644 --- a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go +++ b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go @@ -150,7 +150,7 @@ func exampleDeployment(name string) *appsv1.Deployment { func brokenWebhookConfig(name string) *admissionregistrationv1beta1.ValidatingWebhookConfiguration { var path string - var failurePolicy = admissionregistrationv1beta1.Fail + failurePolicy := admissionregistrationv1beta1.Fail return &admissionregistrationv1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: name, From f6087365e9e1c6d9426e39dfc8a0d5be5a584d28 Mon Sep 17 00:00:00 2001 From: Yassine TIJANI Date: Thu, 9 May 2019 14:53:38 +0200 Subject: [PATCH 055/194] check if Memory is not nil for container stats --- pkg/kubelet/stats/helper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/stats/helper.go b/pkg/kubelet/stats/helper.go index 273205338bc..454facc4e98 100644 --- a/pkg/kubelet/stats/helper.go +++ b/pkg/kubelet/stats/helper.go @@ -51,7 +51,7 @@ func cadvisorInfoToCPUandMemoryStats(info *cadvisorapiv2.ContainerInfo) (*statsa cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total } } - if info.Spec.HasMemory { + if info.Spec.HasMemory && cstat.Memory != nil { pageFaults := cstat.Memory.ContainerData.Pgfault majorPageFaults := cstat.Memory.ContainerData.Pgmajfault memoryStats = &statsapi.MemoryStats{ From 27cccad82208a7c8c0305585244b2f8214d2df6d Mon Sep 17 00:00:00 2001 From: JulienBalestra Date: Thu, 4 Jan 2018 11:16:56 +0100 Subject: [PATCH 056/194] Kubelet provides an updated and complete status of local-static Pods --- pkg/kubelet/kubelet_getters.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index 5331f6418f9..ef74e46e72d 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -162,7 +162,15 @@ func (kl *Kubelet) getPodResourcesDir() string { // GetPods returns all pods bound to the kubelet and their spec, and the mirror // pods. func (kl *Kubelet) GetPods() []*v1.Pod { - return kl.podManager.GetPods() + pods := kl.podManager.GetPods() + // a kubelet running without apiserver requires an additional + // update of the static pod status. See #57106 + for _, p := range pods { + if status, ok := kl.statusManager.GetPodStatus(p.UID); ok { + p.Status = status + } + } + return pods } // GetRunningPods returns all pods running on kubelet from looking at the From 25dec55d61190ad1916de61bf7a06d26327bac76 Mon Sep 17 00:00:00 2001 From: Shovan Maity Date: Thu, 9 May 2019 19:30:43 +0530 Subject: [PATCH 057/194] Update client-go example README.md (fix typo) Signed-off-by: Shovan Maity --- staging/src/k8s.io/client-go/examples/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/examples/README.md b/staging/src/k8s.io/client-go/examples/README.md index 63a85778c76..b24964119d3 100644 --- a/staging/src/k8s.io/client-go/examples/README.md +++ b/staging/src/k8s.io/client-go/examples/README.md @@ -11,7 +11,7 @@ To enable these plugins in your program, import them in your main package. You can load all auth plugins: ```go -import _ "k8s.io/client-go/plugin/pkg/client/auth +import _ "k8s.io/client-go/plugin/pkg/client/auth" ``` Or you can load specific auth plugins: @@ -48,4 +48,4 @@ import _ "k8s.io/client-go/plugin/pkg/client/auth/openstack" ### Testing -- [**Fake Client**](./fake-client): Use a fake client in tests. \ No newline at end of file +- [**Fake Client**](./fake-client): Use a fake client in tests. From 109f1c479eea275274094ae38ec50c5dc55fd447 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=80b=C3=A9j=C3=ADd=C3=A9=20=C3=80yod=C3=A9l=C3=A9?= Date: Wed, 1 May 2019 18:00:10 +0000 Subject: [PATCH 058/194] Clean up kube-proxy. These are based on recommendation from [staticcheck](http://staticcheck.io/). - Removes dead type/function along with the import that the function introduced. - Removes unused struct fields. - Removes select nested in a tight for loop, the select does not have a default, so it will be blocking. --- cmd/kube-proxy/app/BUILD | 1 - cmd/kube-proxy/app/server.go | 8 +++----- cmd/kube-proxy/app/server_others_test.go | 2 -- cmd/kube-proxy/app/server_test.go | 9 --------- 4 files changed, 3 insertions(+), 17 deletions(-) diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 667faf2f26b..552a6cae68f 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -163,7 +163,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/core:go_default_library", "//pkg/proxy/apis/config:go_default_library", "//pkg/util/configz:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 5000e42e80e..2c693ae7d71 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -332,11 +332,9 @@ func (o *Options) runLoop() error { }() for { - select { - case err := <-o.errCh: - if err != nil { - return err - } + err := <-o.errCh + if err != nil { + return err } } } diff --git a/cmd/kube-proxy/app/server_others_test.go b/cmd/kube-proxy/app/server_others_test.go index ff74280f38c..50eb6ff3817 100644 --- a/cmd/kube-proxy/app/server_others_test.go +++ b/cmd/kube-proxy/app/server_others_test.go @@ -29,8 +29,6 @@ import ( func Test_getProxyMode(t *testing.T) { var cases = []struct { flag string - annotationKey string - annotationVal string iptablesVersion string ipsetVersion string kmods []string diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index aac90f57afb..371e48c6bb2 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -33,20 +33,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" componentbaseconfig "k8s.io/component-base/config" - api "k8s.io/kubernetes/pkg/apis/core" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" "k8s.io/kubernetes/pkg/util/configz" utilpointer "k8s.io/utils/pointer" ) -type fakeNodeInterface struct { - node api.Node -} - -func (fake *fakeNodeInterface) Get(hostname string, options metav1.GetOptions) (*api.Node, error) { - return &fake.node, nil -} - type fakeIPTablesVersioner struct { version string // what to return err error // what to return From 04be2c4162f5fe680fbdc47703fa6979c12ef674 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=80b=C3=A9j=C3=ADd=C3=A9=20=C3=80yod=C3=A9l=C3=A9?= Date: Thu, 9 May 2019 15:23:41 +0000 Subject: [PATCH 059/194] Clean up pkg/api. These are based on recommendation from [staticcheck](http://staticcheck.io/). --- pkg/api/service/util_test.go | 4 ++-- pkg/api/testing/copy_test.go | 2 +- pkg/api/testing/serialization_test.go | 2 +- pkg/api/v1/pod/util_test.go | 3 +-- pkg/api/v1/service/util_test.go | 4 ++-- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pkg/api/service/util_test.go b/pkg/api/service/util_test.go index 81207046ac5..7006310e70e 100644 --- a/pkg/api/service/util_test.go +++ b/pkg/api/service/util_test.go @@ -53,13 +53,13 @@ func TestGetLoadBalancerSourceRanges(t *testing.T) { annotations[api.AnnotationLoadBalancerSourceRangesKey] = v svc := api.Service{} svc.Annotations = annotations - cidrs, err := GetLoadBalancerSourceRanges(&svc) + _, err := GetLoadBalancerSourceRanges(&svc) if err != nil { t.Errorf("Unexpected error parsing: %q", v) } svc = api.Service{} svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",") - cidrs, err = GetLoadBalancerSourceRanges(&svc) + cidrs, err := GetLoadBalancerSourceRanges(&svc) if err != nil { t.Errorf("Unexpected error parsing: %q", v) } diff --git a/pkg/api/testing/copy_test.go b/pkg/api/testing/copy_test.go index 2ed8d943215..fd032cfd06f 100644 --- a/pkg/api/testing/copy_test.go +++ b/pkg/api/testing/copy_test.go @@ -69,7 +69,7 @@ func doDeepCopyTest(t *testing.T, kind schema.GroupVersionKind, f *fuzz.Fuzzer) return } - if bytes.Compare(prefuzzData.Bytes(), postfuzzData.Bytes()) != 0 { + if !bytes.Equal(prefuzzData.Bytes(), postfuzzData.Bytes()) { t.Log(diff.StringDiff(prefuzzData.String(), postfuzzData.String())) t.Errorf("Fuzzing copy modified original of %#v", kind) return diff --git a/pkg/api/testing/serialization_test.go b/pkg/api/testing/serialization_test.go index 00b7aa309e6..c22f2b929f1 100644 --- a/pkg/api/testing/serialization_test.go +++ b/pkg/api/testing/serialization_test.go @@ -387,7 +387,7 @@ func TestObjectWatchFraming(t *testing.T) { } sr = streaming.NewDecoder(framer.NewFrameReader(ioutil.NopCloser(out)), s) outEvent := &metav1.WatchEvent{} - res, _, err = sr.Decode(nil, outEvent) + _, _, err = sr.Decode(nil, outEvent) if err != nil || outEvent.Type != string(watch.Added) { t.Fatalf("%v: %#v", err, outEvent) } diff --git a/pkg/api/v1/pod/util_test.go b/pkg/api/v1/pod/util_test.go index 3d34d5d6c0a..681d5faab55 100644 --- a/pkg/api/v1/pod/util_test.go +++ b/pkg/api/v1/pod/util_test.go @@ -604,8 +604,7 @@ func TestUpdatePodCondition(t *testing.T) { } for _, test := range tests { - var resultStatus bool - resultStatus = UpdatePodCondition(test.status, &test.conditions) + resultStatus := UpdatePodCondition(test.status, &test.conditions) assert.Equal(t, test.expected, resultStatus, test.desc) } diff --git a/pkg/api/v1/service/util_test.go b/pkg/api/v1/service/util_test.go index 04c94187b81..9813017c491 100644 --- a/pkg/api/v1/service/util_test.go +++ b/pkg/api/v1/service/util_test.go @@ -53,13 +53,13 @@ func TestGetLoadBalancerSourceRanges(t *testing.T) { annotations[v1.AnnotationLoadBalancerSourceRangesKey] = v svc := v1.Service{} svc.Annotations = annotations - cidrs, err := GetLoadBalancerSourceRanges(&svc) + _, err := GetLoadBalancerSourceRanges(&svc) if err != nil { t.Errorf("Unexpected error parsing: %q", v) } svc = v1.Service{} svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",") - cidrs, err = GetLoadBalancerSourceRanges(&svc) + cidrs, err := GetLoadBalancerSourceRanges(&svc) if err != nil { t.Errorf("Unexpected error parsing: %q", v) } From 25df4e69a51d7607854bd72a92bd62437cde9b0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=80b=C3=A9j=C3=ADd=C3=A9=20=C3=80yod=C3=A9l=C3=A9?= Date: Thu, 9 May 2019 15:25:41 +0000 Subject: [PATCH 060/194] Clean up pkg/apis. These are based on recommendation from [staticcheck](http://staticcheck.io/). --- pkg/apis/apps/v1/conversion.go | 2 +- pkg/apis/apps/v1beta2/conversion.go | 2 +- pkg/apis/autoscaling/v1/conversion.go | 9 +-- pkg/apis/autoscaling/v2beta1/conversion.go | 3 +- pkg/apis/core/validation/validation.go | 83 +++++++++++----------- pkg/apis/storage/validation/validation.go | 2 +- 6 files changed, 47 insertions(+), 54 deletions(-) diff --git a/pkg/apis/apps/v1/conversion.go b/pkg/apis/apps/v1/conversion.go index 34f6078b467..8aa43f9f490 100644 --- a/pkg/apis/apps/v1/conversion.go +++ b/pkg/apis/apps/v1/conversion.go @@ -174,7 +174,7 @@ func Convert_v1_Deployment_To_apps_Deployment(in *appsv1.Deployment, out *apps.D // Copy annotation to deprecated rollbackTo field for roundtrip // TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment - if revision, _ := in.Annotations[appsv1.DeprecatedRollbackTo]; revision != "" { + if revision := in.Annotations[appsv1.DeprecatedRollbackTo]; revision != "" { if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1.DeprecatedRollbackTo, revision, err) } else { diff --git a/pkg/apis/apps/v1beta2/conversion.go b/pkg/apis/apps/v1beta2/conversion.go index 8d0e753a35c..327b61292e0 100644 --- a/pkg/apis/apps/v1beta2/conversion.go +++ b/pkg/apis/apps/v1beta2/conversion.go @@ -407,7 +407,7 @@ func Convert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, o // Copy annotation to deprecated rollbackTo field for roundtrip // TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment - if revision, _ := in.Annotations[appsv1beta2.DeprecatedRollbackTo]; revision != "" { + if revision := in.Annotations[appsv1beta2.DeprecatedRollbackTo]; revision != "" { if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1beta2.DeprecatedRollbackTo, revision, err) } else { diff --git a/pkg/apis/autoscaling/v1/conversion.go b/pkg/apis/autoscaling/v1/conversion.go index f68ac8c286b..a62a3339475 100644 --- a/pkg/apis/autoscaling/v1/conversion.go +++ b/pkg/apis/autoscaling/v1/conversion.go @@ -150,8 +150,7 @@ func Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling } func Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { - var metricType autoscaling.MetricTargetType - metricType = autoscaling.AverageValueMetricType + metricType := autoscaling.AverageValueMetricType out.Target = autoscaling.MetricTarget{ Type: metricType, @@ -327,10 +326,8 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(i if len(otherMetrics) > 0 || len(in.Status.CurrentMetrics) > 0 || len(currentConditions) > 0 { old := out.Annotations out.Annotations = make(map[string]string, len(old)+3) - if old != nil { - for k, v := range old { - out.Annotations[k] = v - } + for k, v := range old { + out.Annotations[k] = v } } diff --git a/pkg/apis/autoscaling/v2beta1/conversion.go b/pkg/apis/autoscaling/v2beta1/conversion.go index 142cf726493..1f6378670dc 100644 --- a/pkg/apis/autoscaling/v2beta1/conversion.go +++ b/pkg/apis/autoscaling/v2beta1/conversion.go @@ -198,8 +198,7 @@ func Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autosc func Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2beta1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { targetAverageValue := &in.TargetAverageValue - var metricType autoscaling.MetricTargetType - metricType = autoscaling.AverageValueMetricType + metricType := autoscaling.AverageValueMetricType out.Target = autoscaling.MetricTarget{ Type: metricType, diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 89940c52a3a..d2e08d0cd01 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -1770,8 +1770,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { // ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. // newPv is updated with fields that cannot be changed. func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = ValidatePersistentVolume(newPv) + allErrs := ValidatePersistentVolume(newPv) // PersistentVolumeSource should be immutable after creation. if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) { @@ -2293,46 +2292,44 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str devicepath := sets.NewString() devicename := sets.NewString() - if devices != nil { - for i, dev := range devices { - idxPath := fldPath.Index(i) - devName := dev.Name - devPath := dev.DevicePath - didMatch, isPVC := isMatchedDevice(devName, volumes) - if len(devName) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } - if devicename.Has(devName) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) - } - // Must be PersistentVolumeClaim volume source - if didMatch && !isPVC { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) - } - if !didMatch { - allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) - } - if len(devPath) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), "")) - } - if devicepath.Has(devPath) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique")) - } - if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')")) - } else { - devicepath.Insert(devPath) - } - // check for overlap with VolumeMount - if deviceNameAlreadyExists(devName, volmounts) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts")) - } - if devicePathAlreadyExists(devPath, volmounts) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts")) - } - if len(devName) > 0 { - devicename.Insert(devName) - } + for i, dev := range devices { + idxPath := fldPath.Index(i) + devName := dev.Name + devPath := dev.DevicePath + didMatch, isPVC := isMatchedDevice(devName, volumes) + if len(devName) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if devicename.Has(devName) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) + } + // Must be PersistentVolumeClaim volume source + if didMatch && !isPVC { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) + } + if !didMatch { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) + } + if len(devPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), "")) + } + if devicepath.Has(devPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique")) + } + if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')")) + } else { + devicepath.Insert(devPath) + } + // check for overlap with VolumeMount + if deviceNameAlreadyExists(devName, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts")) + } + if devicePathAlreadyExists(devPath, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts")) + } + if len(devName) > 0 { + devicename.Insert(devName) } } return allErrs @@ -3186,7 +3183,7 @@ func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPa if avoidPodEntry.PodSignature.PodController == nil { allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) } else { - if *(avoidPodEntry.PodSignature.PodController.Controller) != true { + if !*(avoidPodEntry.PodSignature.PodController.Controller) { allErrors = append(allErrors, field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"), *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller")) diff --git a/pkg/apis/storage/validation/validation.go b/pkg/apis/storage/validation/validation.go index 518e04a9de3..4100627d4a5 100644 --- a/pkg/apis/storage/validation/validation.go +++ b/pkg/apis/storage/validation/validation.go @@ -247,7 +247,7 @@ func validateVolumeBindingMode(mode *storage.VolumeBindingMode, fldPath *field.P func validateAllowedTopologies(topologies []api.TopologySelectorTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if topologies == nil || len(topologies) == 0 { + if len(topologies) == 0 { return allErrs } From e28c7b1b5944c1b6e35f083e7ee2b771d93a19f2 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Thu, 9 May 2019 23:44:17 +0800 Subject: [PATCH 061/194] skip cri detection for kubeadm token create --- cmd/kubeadm/app/cmd/token.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index 948b35d9899..419193648d2 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -42,6 +42,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" phaseutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" @@ -215,6 +216,12 @@ func RunCreateToken(out io.Writer, client clientset.Interface, cfgPath string, c // This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags klog.V(1).Infoln("[token] loading configurations") + + // In fact, we don't do any CRI ops at all. + // This is just to force skipping the CRI detection. + // Ref: https://github.com/kubernetes/kubeadm/issues/1559 + cfg.NodeRegistration.CRISocket = kubeadmconstants.DefaultDockerCRISocket + internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, cfg) if err != nil { return err From f9dc278e7545f205b3608f8c5f9d51bfed6d2591 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 29 Apr 2019 10:22:59 +0200 Subject: [PATCH 062/194] apiextensions: add structural schema intermediate types --- .../apiextensions/validation/validation.go | 4 + .../pkg/apiserver/schema/complete.go | 82 ++++++ .../pkg/apiserver/schema/convert.go | 276 ++++++++++++++++++ .../pkg/apiserver/schema/structural.go | 160 ++++++++++ .../pkg/apiserver/schema/validation.go | 238 +++++++++++++++ .../pkg/apiserver/schema/validation_test.go | 71 +++++ 6 files changed, 831 insertions(+) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index da0bcfacff6..e91de0eaa88 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -686,6 +686,10 @@ func (v *specStandardValidatorV3) validate(schema *apiextensions.JSONSchemaProps return allErrs } + // + // WARNING: if anything new is allowed below, NewStructural must be adapted to support it. + // + if schema.Default != nil { allErrs = append(allErrs, field.Forbidden(fldPath.Child("default"), "default is not supported")) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go new file mode 100644 index 00000000000..08e222f0d0e --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// validateStructuralCompleteness checks that every specified field or array in s is also specified +// outside of value validation. +func validateStructuralCompleteness(s *Structural, fldPath *field.Path) field.ErrorList { + if s == nil { + return nil + } + + return validateValueValidationCompleteness(s.ValueValidation, s, fldPath, fldPath) +} + +func validateValueValidationCompleteness(v *ValueValidation, s *Structural, sPath, vPath *field.Path) field.ErrorList { + if v == nil { + return nil + } + if s == nil { + return field.ErrorList{field.Required(sPath, fmt.Sprintf("because it is defined in %s", vPath.String()))} + } + + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Not, s, sPath, vPath.Child("not"))...) + for i := range v.AllOf { + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AllOf[i], s, sPath, vPath.Child("allOf").Index(i))...) + } + for i := range v.AnyOf { + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AnyOf[i], s, sPath, vPath.Child("anyOf").Index(i))...) + } + for i := range v.OneOf { + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.OneOf[i], s, sPath, vPath.Child("oneOf").Index(i))...) + } + + return allErrs +} + +func validateNestedValueValidationCompleteness(v *NestedValueValidation, s *Structural, sPath, vPath *field.Path) field.ErrorList { + if v == nil { + return nil + } + if s == nil { + return field.ErrorList{field.Required(sPath, fmt.Sprintf("because it is defined in %s", vPath.String()))} + } + + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateValueValidationCompleteness(&v.ValueValidation, s, sPath, vPath)...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Items, s.Items, sPath.Child("items"), vPath.Child("items"))...) + for k, vFld := range v.Properties { + if sFld, ok := s.Properties[k]; !ok { + allErrs = append(allErrs, field.Required(sPath.Child("properties").Key(k), fmt.Sprintf("because it is defined in %s", vPath.Child("properties").Key(k)))) + } else { + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&vFld, &sFld, sPath.Child("properties").Key(k), vPath.Child("properties").Key(k))...) + } + } + + // don't check additionalProperties as this is not allowed (and checked during validation) + + return allErrs +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go new file mode 100644 index 00000000000..2ed71b2618c --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go @@ -0,0 +1,276 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" +) + +// NewStructural converts an OpenAPI v3 schema into a structural schema. A pre-validated JSONSchemaProps will +// not fail on NewStructural. This means that we require that: +// +// - items is not an array of schemas +// - the following fields are not set: +// - id +// - schema +// - $ref +// - patternProperties +// - dependencies +// - additionalItems +// - definitions. +// +// The follow fields are not preserved: +// - externalDocs +// - example. +func NewStructural(s *apiextensions.JSONSchemaProps) (*Structural, error) { + if s == nil { + return nil, nil + } + + if err := validateUnsupportedFields(s); err != nil { + return nil, err + } + + vv, err := newValueValidation(s) + if err != nil { + return nil, err + } + + g, err := newGenerics(s) + if err != nil { + return nil, err + } + + x, err := newExtensions(s) + if err != nil { + return nil, err + } + + ss := &Structural{ + Generic: *g, + Extensions: *x, + ValueValidation: vv, + } + + if s.Items != nil { + if len(s.Items.JSONSchemas) > 0 { + // we validate that it is not an array + return nil, fmt.Errorf("OpenAPIV3Schema 'items' must be a schema, but is an array") + } + item, err := NewStructural(s.Items.Schema) + if err != nil { + return nil, err + } + ss.Items = item + } + + if len(s.Properties) > 0 { + ss.Properties = make(map[string]Structural, len(s.Properties)) + for k, x := range s.Properties { + fld, err := NewStructural(&x) + if err != nil { + return nil, err + } + ss.Properties[k] = *fld + } + } + + return ss, nil +} + +func newGenerics(s *apiextensions.JSONSchemaProps) (*Generic, error) { + if s == nil { + return nil, nil + } + g := &Generic{ + Type: s.Type, + Description: s.Description, + Title: s.Title, + Nullable: s.Nullable, + } + if s.Default != nil { + g.Default = JSON{interface{}(*s.Default)} + } + + if s.AdditionalProperties != nil { + if s.AdditionalProperties.Schema != nil { + ss, err := NewStructural(s.AdditionalProperties.Schema) + if err != nil { + return nil, err + } + g.AdditionalProperties = &StructuralOrBool{Structural: ss} + } else { + g.AdditionalProperties = &StructuralOrBool{Bool: s.AdditionalProperties.Allows} + } + } + + return g, nil +} + +func newValueValidation(s *apiextensions.JSONSchemaProps) (*ValueValidation, error) { + if s == nil { + return nil, nil + } + not, err := newNestedValueValidation(s.Not) + if err != nil { + return nil, err + } + v := &ValueValidation{ + Format: s.Format, + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + MaxProperties: s.MaxProperties, + MinProperties: s.MinProperties, + Required: s.Required, + Not: not, + } + + for _, e := range s.Enum { + v.Enum = append(v.Enum, JSON{e}) + } + + for _, x := range s.AllOf { + clause, err := newNestedValueValidation(&x) + if err != nil { + return nil, err + } + v.AllOf = append(v.AllOf, *clause) + } + + for _, x := range s.AnyOf { + clause, err := newNestedValueValidation(&x) + if err != nil { + return nil, err + } + v.AnyOf = append(v.AnyOf, *clause) + } + + for _, x := range s.OneOf { + clause, err := newNestedValueValidation(&x) + if err != nil { + return nil, err + } + v.OneOf = append(v.OneOf, *clause) + } + + return v, nil +} + +func newNestedValueValidation(s *apiextensions.JSONSchemaProps) (*NestedValueValidation, error) { + if s == nil { + return nil, nil + } + + if err := validateUnsupportedFields(s); err != nil { + return nil, err + } + + vv, err := newValueValidation(s) + if err != nil { + return nil, err + } + + g, err := newGenerics(s) + if err != nil { + return nil, err + } + + x, err := newExtensions(s) + if err != nil { + return nil, err + } + + v := &NestedValueValidation{ + ValueValidation: *vv, + ForbiddenGenerics: *g, + ForbiddenExtensions: *x, + } + + if s.Items != nil { + if len(s.Items.JSONSchemas) > 0 { + // we validate that it is not an array + return nil, fmt.Errorf("OpenAPIV3Schema 'items' must be a schema, but is an array") + } + nvv, err := newNestedValueValidation(s.Items.Schema) + if err != nil { + return nil, err + } + v.Items = nvv + } + if s.Properties != nil { + v.Properties = make(map[string]NestedValueValidation, len(s.Properties)) + for k, x := range s.Properties { + nvv, err := newNestedValueValidation(&x) + if err != nil { + return nil, err + } + v.Properties[k] = *nvv + } + } + + return v, nil +} + +func newExtensions(s *apiextensions.JSONSchemaProps) (*Extensions, error) { + if s == nil { + return nil, nil + } + + return &Extensions{ + XPreserveUnknownFields: s.XPreserveUnknownFields, + XEmbeddedResource: s.XEmbeddedResource, + XIntOrString: s.XIntOrString, + }, nil +} + +// validateUnsupportedFields checks that those fields rejected by validation are actually unset. +func validateUnsupportedFields(s *apiextensions.JSONSchemaProps) error { + if len(s.ID) > 0 { + return fmt.Errorf("OpenAPIV3Schema 'id' is not supported") + } + if len(s.Schema) > 0 { + return fmt.Errorf("OpenAPIV3Schema 'schema' is not supported") + } + if s.Ref != nil && len(*s.Ref) > 0 { + return fmt.Errorf("OpenAPIV3Schema '$ref' is not supported") + } + if len(s.PatternProperties) > 0 { + return fmt.Errorf("OpenAPIV3Schema 'patternProperties' is not supported") + } + if len(s.Dependencies) > 0 { + return fmt.Errorf("OpenAPIV3Schema 'dependencies' is not supported") + } + if s.AdditionalItems != nil { + return fmt.Errorf("OpenAPIV3Schema 'additionalItems' is not supported") + } + if len(s.Definitions) > 0 { + return fmt.Errorf("OpenAPIV3Schema 'definitions' is not supported") + } + + return nil +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go new file mode 100644 index 00000000000..996336c7dc7 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go @@ -0,0 +1,160 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen=true + +// Structural represents a structural schema. +type Structural struct { + Items *Structural + Properties map[string]Structural + + Generic + Extensions + + *ValueValidation +} + +// +k8s:deepcopy-gen=true + +// StructuralOrBool is either a structural schema or a boolean. +type StructuralOrBool struct { + Structural *Structural + Bool bool +} + +// +k8s:deepcopy-gen=true + +// Generic contains the generic schema fields not allowed in value validation. +type Generic struct { + Description string + // type specifies the type of a value. + // It can be object, array, number, integer, boolean, string. + // It is optional only if x-kubernetes-preserve-unknown-fields + // or x-kubernetes-int-or-string is true. + Type string + Title string + Default JSON + AdditionalProperties *StructuralOrBool + Nullable bool +} + +// +k8s:deepcopy-gen=true + +// Extensions contains the Kubernetes OpenAPI v3 vendor extensions. +type Extensions struct { + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + XPreserveUnknownFields bool + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. Both ObjectMeta and TypeMeta + // are validated automatically. x-kubernetes-preserve-unknown-fields + // must be true. + XEmbeddedResource bool + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool +} + +// +k8s:deepcopy-gen=true + +// ValueValidation contains all schema fields not contributing to the structure of the schema. +type ValueValidation struct { + Format string + Maximum *float64 + ExclusiveMaximum bool + Minimum *float64 + ExclusiveMinimum bool + MaxLength *int64 + MinLength *int64 + Pattern string + MaxItems *int64 + MinItems *int64 + UniqueItems bool + MultipleOf *float64 + Enum []JSON + MaxProperties *int64 + MinProperties *int64 + Required []string + AllOf []NestedValueValidation + OneOf []NestedValueValidation + AnyOf []NestedValueValidation + Not *NestedValueValidation +} + +// +k8s:deepcopy-gen=true + +// NestedValueValidation contains value validations, items and properties usable when nested +// under a logical junctor, and catch all structs for generic and vendor extensions schema fields. +type NestedValueValidation struct { + ValueValidation + + Items *NestedValueValidation + Properties map[string]NestedValueValidation + + // Anything set in the following will make the scheme + // non-structural, with the exception of these two patterns if + // x-kubernetes-int-or-string is true: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + ForbiddenGenerics Generic + ForbiddenExtensions Extensions +} + +// JSON wraps an arbitrary JSON value to be able to implement deepcopy. +type JSON struct { + Object interface{} +} + +// DeepCopy creates a deep copy of the wrapped JSON value. +func (j JSON) DeepCopy() JSON { + return JSON{runtime.DeepCopyJSONValue(j.Object)} +} + +// DeepCopyInto creates a deep copy of the wrapped JSON value and stores it in into. +func (j JSON) DeepCopyInto(into *JSON) { + into.Object = runtime.DeepCopyJSONValue(j.Object) +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go new file mode 100644 index 00000000000..f0bc9fa62bf --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go @@ -0,0 +1,238 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +var intOrStringAnyOf = []NestedValueValidation{ + {ForbiddenGenerics: Generic{ + Type: "integer", + }}, + {ForbiddenGenerics: Generic{ + Type: "string", + }}, +} + +type level int + +const ( + rootLevel level = iota + itemLevel + fieldLevel +) + +// ValidateStructural checks that s is a structural schema with the invariants: +// +// * structurality: both `ForbiddenGenerics` and `ForbiddenExtensions` only have zero values, with the two exceptions for IntOrString. +// * RawExtension: for every schema with `x-kubernetes-embedded-resource: true`, `x-kubernetes-preserve-unknown-fields: true` and `type: object` are set +// * IntOrString: for `x-kubernetes-int-or-string: true` either `type` is empty under `anyOf` and `allOf` or the schema structure is one of these: +// +// 1) anyOf: +// - type: integer +// - type: string +// 2) allOf: +// - anyOf: +// - type: integer +// - type: string +// - ... zero or more +// +// * every specified field or array in s is also specified outside of value validation. +func ValidateStructural(s *Structural, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateStructuralInvariants(s, rootLevel, fldPath)...) + allErrs = append(allErrs, validateStructuralCompleteness(s, fldPath)...) + + return allErrs +} + +// validateStructuralInvariants checks the invariants of a structural schema. +func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) field.ErrorList { + if s == nil { + return nil + } + + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateStructuralInvariants(s.Items, itemLevel, fldPath.Child("items"))...) + for k, v := range s.Properties { + allErrs = append(allErrs, validateStructuralInvariants(&v, fieldLevel, fldPath.Child("properties").Key(k))...) + } + allErrs = append(allErrs, validateGeneric(&s.Generic, fldPath)...) + allErrs = append(allErrs, validateExtensions(&s.Extensions, fldPath)...) + + // detect the two IntOrString exceptions: + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + skipAnyOf := false + skipFirstAllOfAnyOf := false + if s.XIntOrString && s.ValueValidation != nil { + if len(s.ValueValidation.AnyOf) == 2 && reflect.DeepEqual(s.ValueValidation.AnyOf, intOrStringAnyOf) { + skipAnyOf = true + } else if len(s.ValueValidation.AllOf) >= 1 && len(s.ValueValidation.AllOf[0].AnyOf) == 2 && reflect.DeepEqual(s.ValueValidation.AllOf[0].AnyOf, intOrStringAnyOf) { + skipFirstAllOfAnyOf = true + } + } + + allErrs = append(allErrs, validateValueValidation(s.ValueValidation, skipAnyOf, skipFirstAllOfAnyOf, fldPath)...) + + if s.XEmbeddedResource && s.Type != "object" { + if len(s.Type) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must be object if x-kubernetes-embedded-resource is true")) + } else { + allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), s.Type, "must be object if x-kubernetes-embedded-resource is true")) + } + } else if len(s.Type) == 0 && !s.Extensions.XIntOrString && !s.Extensions.XPreserveUnknownFields { + switch lvl { + case rootLevel: + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must not be empty at the root")) + case itemLevel: + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must not be empty for specified array items")) + case fieldLevel: + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must not be empty for specified object fields")) + } + } + + if lvl == rootLevel && len(s.Type) > 0 && s.Type != "object" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), s.Type, "must be object at the root")) + } + + if s.XEmbeddedResource && !s.XPreserveUnknownFields && s.Properties == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("properties"), "must not be empty if x-kubernetes-embedded-resource is true without x-kubernetes-preserve-unknown-fields")) + } + + return allErrs +} + +// validateGeneric checks the generic fields of a structural schema. +func validateGeneric(g *Generic, fldPath *field.Path) field.ErrorList { + if g == nil { + return nil + } + + allErrs := field.ErrorList{} + + if g.AdditionalProperties != nil { + if g.AdditionalProperties.Structural != nil { + allErrs = append(allErrs, validateStructuralInvariants(g.AdditionalProperties.Structural, fieldLevel, fldPath.Child("additionalProperties"))...) + } + } + + return allErrs +} + +// validateExtensions checks Kubernetes vendor extensions of a structural schema. +func validateExtensions(x *Extensions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if x.XIntOrString && x.XPreserveUnknownFields { + allErrs = append(allErrs, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), x.XPreserveUnknownFields, "must be false if x-kubernetes-int-or-string is true")) + } + if x.XIntOrString && x.XEmbeddedResource { + allErrs = append(allErrs, field.Invalid(fldPath.Child("x-kubernetes-embedded-resource"), x.XEmbeddedResource, "must be false if x-kubernetes-int-or-string is true")) + } + + return allErrs +} + +// validateValueValidation checks the value validation in a structural schema. +func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf bool, fldPath *field.Path) field.ErrorList { + if v == nil { + return nil + } + + allErrs := field.ErrorList{} + + if !skipAnyOf { + for i := range v.AnyOf { + allErrs = append(allErrs, validateNestedValueValidation(&v.AnyOf[i], false, false, fldPath.Child("anyOf").Index(i))...) + } + } + + for i := range v.AllOf { + skipAnyOf := false + if skipFirstAllOfAnyOf && i == 0 { + skipAnyOf = true + } + allErrs = append(allErrs, validateNestedValueValidation(&v.AllOf[i], skipAnyOf, false, fldPath.Child("allOf").Index(i))...) + } + + for i := range v.OneOf { + allErrs = append(allErrs, validateNestedValueValidation(&v.OneOf[i], false, false, fldPath.Child("oneOf").Index(i))...) + } + + allErrs = append(allErrs, validateNestedValueValidation(v.Not, false, false, fldPath.Child("not"))...) + + return allErrs +} + +// validateNestedValueValidation checks the nested value validation under a logic junctor in a structural schema. +func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllOfAnyOf bool, fldPath *field.Path) field.ErrorList { + if v == nil { + return nil + } + + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateValueValidation(&v.ValueValidation, skipAnyOf, skipAllOfAnyOf, fldPath)...) + allErrs = append(allErrs, validateNestedValueValidation(v.Items, false, false, fldPath.Child("items"))...) + + for k, fld := range v.Properties { + allErrs = append(allErrs, validateNestedValueValidation(&fld, false, false, fldPath.Child("properties").Key(k))...) + } + + if len(v.ForbiddenGenerics.Type) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("type"), "must be empty to be structural")) + } + if v.ForbiddenGenerics.AdditionalProperties != nil { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must be undefined to be structural")) + } + if v.ForbiddenGenerics.Default.Object != nil { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("default"), "must be undefined to be structural")) + } + if len(v.ForbiddenGenerics.Title) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("title"), "must be empty to be structural")) + } + if len(v.ForbiddenGenerics.Description) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("description"), "must be empty to be structural")) + } + if v.ForbiddenGenerics.Nullable { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("nullable"), "must be false to be structural")) + } + + if v.ForbiddenExtensions.XPreserveUnknownFields { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-preserve-unknown-fields"), "must be false to be structural")) + } + if v.ForbiddenExtensions.XEmbeddedResource { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-embedded-resource"), "must be false to be structural")) + } + if v.ForbiddenExtensions.XIntOrString { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-int-or-string"), "must be false to be structural")) + } + + return allErrs +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go new file mode 100644 index 00000000000..619040771a1 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "reflect" + "testing" + + fuzz "github.com/google/gofuzz" + + "k8s.io/apimachinery/pkg/util/rand" +) + +func TestValidateNestedValueValidationComplete(t *testing.T) { + fuzzer := fuzz.New() + fuzzer.Funcs( + func(s *JSON, c fuzz.Continue) { + if c.RandBool() { + s.Object = float64(42.0) + } + }, + func(s **StructuralOrBool, c fuzz.Continue) { + if c.RandBool() { + *s = &StructuralOrBool{} + } + }, + ) + fuzzer.NilChance(0) + + // check that we didn't forget to check any forbidden generic field + tt := reflect.TypeOf(Generic{}) + for i := 0; i < tt.NumField(); i++ { + vv := &NestedValueValidation{} + x := reflect.ValueOf(&vv.ForbiddenGenerics).Elem() + i := rand.Intn(x.NumField()) + fuzzer.Fuzz(x.Field(i).Addr().Interface()) + + errs := validateNestedValueValidation(vv, false, false, nil) + if len(errs) == 0 && !reflect.DeepEqual(vv.ForbiddenGenerics, Generic{}) { + t.Errorf("expected ForbiddenGenerics validation errors for: %#v", vv) + } + } + + // check that we didn't forget to check any forbidden extension field + tt = reflect.TypeOf(Extensions{}) + for i := 0; i < tt.NumField(); i++ { + vv := &NestedValueValidation{} + x := reflect.ValueOf(&vv.ForbiddenExtensions).Elem() + i := rand.Intn(x.NumField()) + fuzzer.Fuzz(x.Field(i).Addr().Interface()) + + errs := validateNestedValueValidation(vv, false, false, nil) + if len(errs) == 0 && !reflect.DeepEqual(vv.ForbiddenExtensions, Extensions{}) { + t.Errorf("expected ForbiddenExtensions validation errors for: %#v", vv) + } + } +} From 6f519f5f5251fc5e15087f7b520437225ebb554d Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 29 Apr 2019 14:27:32 +0200 Subject: [PATCH 063/194] apiextensions: add NonStructuralSchema condition to API --- .../pkg/apis/apiextensions/types.go | 16 ++++++++++++++++ .../pkg/apis/apiextensions/v1beta1/types.go | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index 3af24046bf2..ce6aae71182 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -264,6 +264,22 @@ const ( // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in // the group and are therefore accepted. NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index ad2e1347ce1..220a494bce2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -279,6 +279,22 @@ const ( // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in // the group and are therefore accepted. NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" ) From 5e53522a9ef56f33d06c457c072c379131e127ef Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Wed, 8 May 2019 15:05:07 -0700 Subject: [PATCH 064/194] In GuaranteedUpdate, retry on any error if we are working with stale data --- .../registry/generic/registry/store_test.go | 44 +++++++++++++++++++ .../apiserver/pkg/storage/etcd3/store.go | 23 +++++----- 2 files changed, 56 insertions(+), 11 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go index d8c0c62cf10..7b16f614e5b 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go @@ -1851,3 +1851,47 @@ func TestMarkAsDeleting(t *testing.T) { }) } } + +type staleGuaranteedUpdateStorage struct { + storage.Interface + cachedObj runtime.Object +} + +// GuaranteedUpdate overwrites the method with one that always suggests the cachedObj. +func (s *staleGuaranteedUpdateStorage) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ ...runtime.Object) error { + return s.Interface.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, s.cachedObj) +} + +func TestDeleteWithCachedObject(t *testing.T) { + podName := "foo" + podWithFinalizer := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName, Finalizers: []string{"foo.com/x"}}, + Spec: example.PodSpec{NodeName: "machine"}, + } + podWithNoFinalizer := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName}, + Spec: example.PodSpec{NodeName: "machine"}, + } + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test") + destroyFunc, registry := newTestGenericStoreRegistry(t, scheme, false) + defer destroyFunc() + // cached object does not have any finalizer. + registry.Storage.Storage = &staleGuaranteedUpdateStorage{Interface: registry.Storage.Storage, cachedObj: podWithNoFinalizer} + // created object with pending finalizer. + _, err := registry.Create(ctx, podWithFinalizer, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + // The object shouldn't be deleted, because the persisted object has pending finalizers. + _, _, err = registry.Delete(ctx, podName, nil) + if err != nil { + t.Fatal(err) + } + // The object should still be there + _, err = registry.Get(ctx, podName, &metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go index 1216b1f1e27..5a349c7a3eb 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -301,19 +301,20 @@ func (s *store) GuaranteedUpdate( ret, ttl, err := s.updateState(origState, tryUpdate) if err != nil { - // It's possible we were working with stale data - if mustCheckData && apierrors.IsConflict(err) { - // Actually fetch - origState, err = getCurrentState() - if err != nil { - return err - } - mustCheckData = false - // Retry - continue + // If our data is already up to date, return the error + if !mustCheckData { + return err } - return err + // It's possible we were working with stale data + // Actually fetch + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + // Retry + continue } data, err := runtime.Encode(s.codec, ret) From 0a9bf0d05e46eb802b47c9005231829a4091d3ff Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Thu, 9 May 2019 10:51:45 -0700 Subject: [PATCH 065/194] Correct CriticalPodAdmissionHandler in godoc --- pkg/kubelet/preemption/preemption.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/preemption/preemption.go b/pkg/kubelet/preemption/preemption.go index 751fcc246c9..f6e871376f8 100644 --- a/pkg/kubelet/preemption/preemption.go +++ b/pkg/kubelet/preemption/preemption.go @@ -35,7 +35,7 @@ import ( const message = "Preempted in order to admit critical pod" -// CriticalPodAdmissionFailureHandler is an AdmissionFailureHandler that handles admission failure for Critical Pods. +// CriticalPodAdmissionHandler is an AdmissionFailureHandler that handles admission failure for Critical Pods. // If the ONLY admission failures are due to insufficient resources, then CriticalPodAdmissionHandler evicts pods // so that the critical pod can be admitted. For evictions, the CriticalPodAdmissionHandler evicts a set of pods that // frees up the required resource requests. The set of pods is designed to minimize impact, and is prioritized according to the ordering: From 69393291b64a2281533cb2567f67edca478b4a93 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 6 May 2019 11:49:19 -0400 Subject: [PATCH 066/194] Add a new field for storing volume expansion secrets Fix pv secret visitor tests Allow SecretRef for resizing to be set if not already set --- api/openapi-spec/swagger.json | 4 + pkg/api/persistentvolume/util.go | 17 + pkg/api/persistentvolume/util_test.go | 68 +- pkg/api/v1/persistentvolume/util.go | 6 + pkg/api/v1/persistentvolume/util_test.go | 10 + pkg/apis/core/types.go | 9 + pkg/apis/core/v1/zz_generated.conversion.go | 2 + pkg/apis/core/validation/validation.go | 22 +- pkg/apis/core/validation/validation_test.go | 66 +- pkg/apis/core/zz_generated.deepcopy.go | 5 + .../src/k8s.io/api/core/v1/generated.pb.go | 3100 +++++++++-------- .../src/k8s.io/api/core/v1/generated.proto | 9 + staging/src/k8s.io/api/core/v1/types.go | 9 + .../core/v1/types_swagger_doc_generated.go | 1 + .../api/core/v1/zz_generated.deepcopy.go | 5 + 15 files changed, 1798 insertions(+), 1535 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 5fad24c09f2..1653d4d933a 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -6055,6 +6055,10 @@ "io.k8s.api.core.v1.CSIPersistentVolumeSource": { "description": "Represents storage that is managed by an external CSI volume driver (Beta feature)", "properties": { + "controllerExpandSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed." + }, "controllerPublishSecretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", "description": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed." diff --git a/pkg/api/persistentvolume/util.go b/pkg/api/persistentvolume/util.go index 68fd93cc06a..5add392d87c 100644 --- a/pkg/api/persistentvolume/util.go +++ b/pkg/api/persistentvolume/util.go @@ -28,6 +28,23 @@ func DropDisabledFields(pvSpec *api.PersistentVolumeSpec, oldPVSpec *api.Persist if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && !volumeModeInUse(oldPVSpec) { pvSpec.VolumeMode = nil } + + if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandCSIVolumes) && !hasExpansionSecrets(oldPVSpec) { + if pvSpec.CSI != nil { + pvSpec.CSI.ControllerExpandSecretRef = nil + } + } +} + +func hasExpansionSecrets(oldPVSpec *api.PersistentVolumeSpec) bool { + if oldPVSpec == nil || oldPVSpec.CSI == nil { + return false + } + + if oldPVSpec.CSI.ControllerExpandSecretRef != nil { + return true + } + return false } func volumeModeInUse(oldPVSpec *api.PersistentVolumeSpec) bool { diff --git a/pkg/api/persistentvolume/util_test.go b/pkg/api/persistentvolume/util_test.go index 70d3a625baf..f4e00d80ae1 100644 --- a/pkg/api/persistentvolume/util_test.go +++ b/pkg/api/persistentvolume/util_test.go @@ -32,14 +32,20 @@ func TestDropDisabledFields(t *testing.T) { return &api.PersistentVolumeSpec{VolumeMode: mode} } + secretRef := &api.SecretReference{ + Name: "expansion-secret", + Namespace: "default", + } + modeBlock := api.PersistentVolumeBlock tests := map[string]struct { - oldSpec *api.PersistentVolumeSpec - newSpec *api.PersistentVolumeSpec - expectOldSpec *api.PersistentVolumeSpec - expectNewSpec *api.PersistentVolumeSpec - blockEnabled bool + oldSpec *api.PersistentVolumeSpec + newSpec *api.PersistentVolumeSpec + expectOldSpec *api.PersistentVolumeSpec + expectNewSpec *api.PersistentVolumeSpec + blockEnabled bool + csiExpansionEnabled bool }{ "disabled block clears new": { blockEnabled: false, @@ -84,11 +90,47 @@ func TestDropDisabledFields(t *testing.T) { oldSpec: specWithMode(&modeBlock), expectOldSpec: specWithMode(&modeBlock), }, + "disabled csi expansion clears secrets": { + csiExpansionEnabled: false, + newSpec: specWithCSISecrets(secretRef), + expectNewSpec: specWithCSISecrets(nil), + oldSpec: nil, + expectOldSpec: nil, + }, + "enabled csi expansion preserve secrets": { + csiExpansionEnabled: true, + newSpec: specWithCSISecrets(secretRef), + expectNewSpec: specWithCSISecrets(secretRef), + oldSpec: nil, + expectOldSpec: nil, + }, + "enabled csi expansion preserve secrets when both old and new have it": { + csiExpansionEnabled: true, + newSpec: specWithCSISecrets(secretRef), + expectNewSpec: specWithCSISecrets(secretRef), + oldSpec: specWithCSISecrets(secretRef), + expectOldSpec: specWithCSISecrets(secretRef), + }, + "disabled csi expansion old pv had secrets": { + csiExpansionEnabled: false, + newSpec: specWithCSISecrets(secretRef), + expectNewSpec: specWithCSISecrets(secretRef), + oldSpec: specWithCSISecrets(secretRef), + expectOldSpec: specWithCSISecrets(secretRef), + }, + "enabled csi expansion preserves secrets when old pv did not had secrets": { + csiExpansionEnabled: true, + newSpec: specWithCSISecrets(secretRef), + expectNewSpec: specWithCSISecrets(secretRef), + oldSpec: specWithCSISecrets(nil), + expectOldSpec: specWithCSISecrets(nil), + }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, tc.blockEnabled)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandCSIVolumes, tc.csiExpansionEnabled)() DropDisabledFields(tc.newSpec, tc.oldSpec) if !reflect.DeepEqual(tc.newSpec, tc.expectNewSpec) { @@ -100,3 +142,19 @@ func TestDropDisabledFields(t *testing.T) { }) } } + +func specWithCSISecrets(secret *api.SecretReference) *api.PersistentVolumeSpec { + pvSpec := &api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + CSI: &api.CSIPersistentVolumeSource{ + Driver: "com.google.gcepd", + VolumeHandle: "foobar", + }, + }, + } + + if secret != nil { + pvSpec.CSI.ControllerExpandSecretRef = secret + } + return pvSpec +} diff --git a/pkg/api/v1/persistentvolume/util.go b/pkg/api/v1/persistentvolume/util.go index e43ae58d2ee..003b2e70d97 100644 --- a/pkg/api/v1/persistentvolume/util.go +++ b/pkg/api/v1/persistentvolume/util.go @@ -119,6 +119,12 @@ func VisitPVSecretNames(pv *corev1.PersistentVolume, visitor Visitor) bool { return false } } + if source.CSI.ControllerExpandSecretRef != nil { + if !visitor(source.CSI.ControllerExpandSecretRef.Namespace, source.CSI.ControllerExpandSecretRef.Name, false /* kubeletVisible */) { + return false + } + } + if source.CSI.NodePublishSecretRef != nil { if !visitor(source.CSI.NodePublishSecretRef.Namespace, source.CSI.NodePublishSecretRef.Name, true /* kubeletVisible */) { return false diff --git a/pkg/api/v1/persistentvolume/util_test.go b/pkg/api/v1/persistentvolume/util_test.go index 0ed1a06bfef..1f891cb48d4 100644 --- a/pkg/api/v1/persistentvolume/util_test.go +++ b/pkg/api/v1/persistentvolume/util_test.go @@ -142,9 +142,17 @@ func TestPVSecrets(t *testing.T) { NodeStageSecretRef: &corev1.SecretReference{ Name: "Spec.PersistentVolumeSource.CSI.NodeStageSecretRef", Namespace: "csi"}}}}}, + {Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"}, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + ControllerExpandSecretRef: &corev1.SecretReference{ + Name: "Spec.PersistentVolumeSource.CSI.ControllerExpandSecretRef", + Namespace: "csi"}}}}}, } extractedNames := sets.NewString() extractedNamesWithNamespace := sets.NewString() + for _, pv := range pvs { VisitPVSecretNames(pv, func(namespace, name string, kubeletVisible bool) bool { extractedNames.Insert(name) @@ -172,6 +180,7 @@ func TestPVSecrets(t *testing.T) { "Spec.PersistentVolumeSource.CSI.ControllerPublishSecretRef", "Spec.PersistentVolumeSource.CSI.NodePublishSecretRef", "Spec.PersistentVolumeSource.CSI.NodeStageSecretRef", + "Spec.PersistentVolumeSource.CSI.ControllerExpandSecretRef", ) secretPaths := collectSecretPaths(t, nil, "", reflect.TypeOf(&api.PersistentVolume{})) secretPaths = secretPaths.Difference(excludedSecretPaths) @@ -219,6 +228,7 @@ func TestPVSecrets(t *testing.T) { "csi/Spec.PersistentVolumeSource.CSI.ControllerPublishSecretRef", "csi/Spec.PersistentVolumeSource.CSI.NodePublishSecretRef", "csi/Spec.PersistentVolumeSource.CSI.NodeStageSecretRef", + "csi/Spec.PersistentVolumeSource.CSI.ControllerExpandSecretRef", ) if missingNames := expectedNamespacedNames.Difference(extractedNamesWithNamespace); len(missingNames) > 0 { t.Logf("Missing expected namespaced names:\n%s", strings.Join(missingNames.List(), "\n")) diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index bff59a8e98c..1b1f7b6b942 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -1601,6 +1601,15 @@ type CSIPersistentVolumeSource struct { // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference } // Represents a source location of a volume to mount, managed by an external CSI driver diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 20148c6233e..70952edd841 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -2310,6 +2310,7 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource( out.ControllerPublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef)) out.NodeStageSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef)) out.NodePublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef)) + out.ControllerExpandSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerExpandSecretRef)) return nil } @@ -2327,6 +2328,7 @@ func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource( out.ControllerPublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef)) out.NodeStageSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef)) out.NodePublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef)) + out.ControllerExpandSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerExpandSecretRef)) return nil } diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 89940c52a3a..7f914a59320 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -1479,6 +1479,19 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldP } } + if csi.ControllerExpandSecretRef != nil { + if len(csi.ControllerExpandSecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("controllerExpandSecretRef", "name"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerExpandSecretRef.Name, fldPath.Child("name"))...) + } + if len(csi.ControllerExpandSecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("controllerExpandSecretRef", "namespace"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerExpandSecretRef.Namespace, fldPath.Child("namespace"))...) + } + } + if csi.NodePublishSecretRef != nil { if len(csi.NodePublishSecretRef.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "name"), "")) @@ -1773,12 +1786,17 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.E allErrs := field.ErrorList{} allErrs = ValidatePersistentVolume(newPv) + // if oldPV does not have ControllerExpandSecretRef then allow it to be set + if (oldPv.Spec.CSI != nil && oldPv.Spec.CSI.ControllerExpandSecretRef == nil) && + (newPv.Spec.CSI != nil && newPv.Spec.CSI.ControllerExpandSecretRef != nil) { + newPv = newPv.DeepCopy() + newPv.Spec.CSI.ControllerExpandSecretRef = nil + } + // PersistentVolumeSource should be immutable after creation. if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "persistentvolumesource"), "is immutable after creation")) } - newPv.Status = oldPv.Status - allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...) // Allow setting NodeAffinity if oldPv NodeAffinity was not set diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index ad7aa82a6cd..57b5a8866a7 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -455,10 +455,31 @@ func TestValidatePersistentVolumeSourceUpdate(t *testing.T) { Type: newHostPathType(string(core.HostPathDirectory)), }, } + + validCSIVolume := testVolume("csi-volume", "", core.PersistentVolumeSpec{ + Capacity: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("1G"), + }, + AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce}, + PersistentVolumeSource: core.PersistentVolumeSource{ + CSI: &core.CSIPersistentVolumeSource{ + Driver: "come.google.gcepd", + VolumeHandle: "foobar", + }, + }, + StorageClassName: "gp2", + }) + + expandSecretRef := &core.SecretReference{ + Name: "expansion-secret", + Namespace: "default", + } + scenarios := map[string]struct { - isExpectedFailure bool - oldVolume *core.PersistentVolume - newVolume *core.PersistentVolume + isExpectedFailure bool + csiExpansionEnabled bool + oldVolume *core.PersistentVolume + newVolume *core.PersistentVolume }{ "condition-no-update": { isExpectedFailure: false, @@ -475,6 +496,21 @@ func TestValidatePersistentVolumeSourceUpdate(t *testing.T) { oldVolume: validVolume, newVolume: invalidPvSourceUpdateDeep, }, + "csi-expansion-enabled-with-pv-secret": { + csiExpansionEnabled: true, + isExpectedFailure: false, + oldVolume: validCSIVolume, + newVolume: getCSIVolumeWithSecret(validCSIVolume, expandSecretRef), + }, + "csi-expansion-enabled-with-old-pv-secret": { + csiExpansionEnabled: true, + isExpectedFailure: true, + oldVolume: getCSIVolumeWithSecret(validCSIVolume, expandSecretRef), + newVolume: getCSIVolumeWithSecret(validCSIVolume, &core.SecretReference{ + Name: "foo-secret", + Namespace: "default", + }), + }, } for name, scenario := range scenarios { errs := ValidatePersistentVolumeUpdate(scenario.newVolume, scenario.oldVolume) @@ -487,6 +523,14 @@ func TestValidatePersistentVolumeSourceUpdate(t *testing.T) { } } +func getCSIVolumeWithSecret(pv *core.PersistentVolume, secret *core.SecretReference) *core.PersistentVolume { + pvCopy := pv.DeepCopy() + if secret != nil { + pvCopy.Spec.CSI.ControllerExpandSecretRef = secret + } + return pvCopy +} + func testLocalVolume(path string, affinity *core.VolumeNodeAffinity) core.PersistentVolumeSpec { return core.PersistentVolumeSpec{ Capacity: core.ResourceList{ @@ -1834,6 +1878,22 @@ func TestValidateCSIVolumeSource(t *testing.T) { errtype: field.ErrorTypeInvalid, errfield: "driver", }, + { + name: "controllerExpandSecretRef: invalid name missing", + csi: &core.CSIPersistentVolumeSource{Driver: "com.google.gcepd", VolumeHandle: "foobar", ControllerExpandSecretRef: &core.SecretReference{Namespace: "default"}}, + errtype: field.ErrorTypeRequired, + errfield: "controllerExpandSecretRef.name", + }, + { + name: "controllerExpandSecretRef: invalid namespace missing", + csi: &core.CSIPersistentVolumeSource{Driver: "com.google.gcepd", VolumeHandle: "foobar", ControllerExpandSecretRef: &core.SecretReference{Name: "foobar"}}, + errtype: field.ErrorTypeRequired, + errfield: "controllerExpandSecretRef.namespace", + }, + { + name: "valid controllerExpandSecretRef", + csi: &core.CSIPersistentVolumeSource{Driver: "com.google.gcepd", VolumeHandle: "foobar", ControllerExpandSecretRef: &core.SecretReference{Name: "foobar", Namespace: "default"}}, + }, } defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)() diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index 7618b10e8a0..fd0aa11353a 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -237,6 +237,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index cd605693292..218e438899c 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -1707,6 +1707,16 @@ func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + if m.ControllerExpandSecretRef != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerExpandSecretRef.Size())) + n9, err := m.ControllerExpandSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } return i, nil } @@ -1771,11 +1781,11 @@ func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n9, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) + n10, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } return i, nil } @@ -1874,11 +1884,11 @@ func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n10, err := m.SecretRef.MarshalTo(dAtA[i:]) + n11, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } dAtA[i] = 0x30 i++ @@ -1937,11 +1947,11 @@ func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) + n12, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } dAtA[i] = 0x30 i++ @@ -1989,11 +1999,11 @@ func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) + n13, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } return i, nil } @@ -2033,11 +2043,11 @@ func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n13, err := m.SecretRef.MarshalTo(dAtA[i:]) + n14, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } return i, nil } @@ -2117,11 +2127,11 @@ func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n14, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 if len(m.Conditions) > 0 { for _, msg := range m.Conditions { dAtA[i] = 0x12 @@ -2155,11 +2165,11 @@ func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) + n16, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2193,11 +2203,11 @@ func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n16, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n17, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -2269,11 +2279,11 @@ func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -2305,11 +2315,11 @@ func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -2345,11 +2355,11 @@ func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n19, err := m.ListMeta.MarshalTo(dAtA[i:]) + n20, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2421,11 +2431,11 @@ func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2469,11 +2479,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n22, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2588,11 +2598,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n22, err := m.Resources.MarshalTo(dAtA[i:]) + n23, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { dAtA[i] = 0x4a @@ -2609,32 +2619,32 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n23, err := m.LivenessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.ReadinessProbe != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n24, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) + n24, err := m.LivenessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n24 } - if m.Lifecycle != nil { - dAtA[i] = 0x62 + if m.ReadinessProbe != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n25, err := m.Lifecycle.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) + n25, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n25 } + if m.Lifecycle != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) + n26, err := m.Lifecycle.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) @@ -2647,11 +2657,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n26, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n27, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 } dAtA[i] = 0x80 i++ @@ -2811,32 +2821,32 @@ func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n27, err := m.Waiting.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.Running != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n28, err := m.Running.MarshalTo(dAtA[i:]) + n28, err := m.Waiting.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n28 } - if m.Terminated != nil { - dAtA[i] = 0x1a + if m.Running != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n29, err := m.Terminated.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) + n29, err := m.Running.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n29 } + if m.Terminated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) + n30, err := m.Terminated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } return i, nil } @@ -2858,11 +2868,11 @@ func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n30, err := m.StartedAt.MarshalTo(dAtA[i:]) + n31, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 return i, nil } @@ -2898,19 +2908,19 @@ func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n31, err := m.StartedAt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n32, err := m.FinishedAt.MarshalTo(dAtA[i:]) + n32, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n32 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) + n33, err := m.FinishedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) @@ -2966,19 +2976,19 @@ func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n33, err := m.State.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n34, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + n34, err := m.State.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) + n35, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 dAtA[i] = 0x20 i++ if m.Ready { @@ -3079,21 +3089,21 @@ func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n35, err := m.FieldRef.MarshalTo(dAtA[i:]) + n36, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 } if m.ResourceFieldRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n36, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n37, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 } if m.Mode != nil { dAtA[i] = 0x20 @@ -3161,11 +3171,11 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n37, err := m.SizeLimit.MarshalTo(dAtA[i:]) + n38, err := m.SizeLimit.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 } return i, nil } @@ -3193,11 +3203,11 @@ func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n38, err := m.TargetRef.MarshalTo(dAtA[i:]) + n39, err := m.TargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 } dAtA[i] = 0x1a i++ @@ -3313,11 +3323,11 @@ func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n39, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if len(m.Subsets) > 0 { for _, msg := range m.Subsets { dAtA[i] = 0x12 @@ -3351,11 +3361,11 @@ func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) + n41, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3394,21 +3404,21 @@ func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n41, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n42, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 } if m.SecretRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n42, err := m.SecretRef.MarshalTo(dAtA[i:]) + n43, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n43 } return i, nil } @@ -3440,11 +3450,11 @@ func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n43, err := m.ValueFrom.MarshalTo(dAtA[i:]) + n44, err := m.ValueFrom.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 } return i, nil } @@ -3468,42 +3478,42 @@ func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n44, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n45, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n45, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n45 } - if m.ConfigMapKeyRef != nil { - dAtA[i] = 0x1a + if m.ResourceFieldRef != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n46, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n46, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n46 } - if m.SecretKeyRef != nil { - dAtA[i] = 0x22 + if m.ConfigMapKeyRef != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n47, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) + n47, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n47 } + if m.SecretKeyRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) + n48, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } return i, nil } @@ -3525,19 +3535,19 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n49, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n49 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) + n50, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -3549,27 +3559,27 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n50, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n51, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + n51, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n51 - dAtA[i] = 0x3a + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n52, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) + n52, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n52 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) + n53, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 dAtA[i] = 0x40 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) @@ -3580,20 +3590,20 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n53, err := m.EventTime.MarshalTo(dAtA[i:]) + n54, err := m.EventTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 if m.Series != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n54, err := m.Series.MarshalTo(dAtA[i:]) + n55, err := m.Series.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n55 } dAtA[i] = 0x62 i++ @@ -3603,11 +3613,11 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n55, err := m.Related.MarshalTo(dAtA[i:]) + n56, err := m.Related.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n56 } dAtA[i] = 0x72 i++ @@ -3638,11 +3648,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n56, err := m.ListMeta.MarshalTo(dAtA[i:]) + n57, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n57 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3679,11 +3689,11 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n57, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + n58, err := m.LastObservedTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n58 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) @@ -3842,11 +3852,11 @@ func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n59 } dAtA[i] = 0x20 i++ @@ -3908,11 +3918,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n59, err := m.SecretRef.MarshalTo(dAtA[i:]) + n60, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n60 } dAtA[i] = 0x20 i++ @@ -4136,11 +4146,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n60, err := m.Port.MarshalTo(dAtA[i:]) + n61, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n61 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -4209,32 +4219,32 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n61, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n62, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n62, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n62 } - if m.TCPSocket != nil { - dAtA[i] = 0x1a + if m.HTTPGet != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n63, err := m.TCPSocket.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) + n63, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n63 } + if m.TCPSocket != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) + n64, err := m.TCPSocket.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + } return i, nil } @@ -4372,11 +4382,11 @@ func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n64, err := m.SecretRef.MarshalTo(dAtA[i:]) + n65, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n65 } dAtA[i] = 0x58 i++ @@ -4464,11 +4474,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n65, err := m.SecretRef.MarshalTo(dAtA[i:]) + n66, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n66 } dAtA[i] = 0x58 i++ @@ -4537,21 +4547,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n66, err := m.PostStart.MarshalTo(dAtA[i:]) + n67, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n67 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n67, err := m.PreStop.MarshalTo(dAtA[i:]) + n68, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n68 } return i, nil } @@ -4574,19 +4584,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n68, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n68 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n69, err := m.Spec.MarshalTo(dAtA[i:]) + n69, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n69 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n70, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 return i, nil } @@ -4633,11 +4643,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n70, err := (&v).MarshalTo(dAtA[i:]) + n71, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n71 } } if len(m.Min) > 0 { @@ -4664,11 +4674,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) + n72, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n72 } } if len(m.Default) > 0 { @@ -4695,11 +4705,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) + n73, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n73 } } if len(m.DefaultRequest) > 0 { @@ -4726,11 +4736,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) + n74, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n74 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4757,11 +4767,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n74, err := (&v).MarshalTo(dAtA[i:]) + n75, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n75 } } return i, nil @@ -4785,11 +4795,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(dAtA[i:]) + n76, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n76 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4853,11 +4863,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n76, err := m.ListMeta.MarshalTo(dAtA[i:]) + n77, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n77 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5031,27 +5041,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n77, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n77 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n78, err := m.Spec.MarshalTo(dAtA[i:]) + n78, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n78 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n79, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n79, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n79 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n80, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n80 return i, nil } @@ -5073,11 +5083,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n80, err := m.ListMeta.MarshalTo(dAtA[i:]) + n81, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n81 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5166,27 +5176,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n81, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n81 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n82, err := m.Spec.MarshalTo(dAtA[i:]) + n82, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n82 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n83, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n83, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n83 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n84, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n84 return i, nil } @@ -5235,11 +5245,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n84, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n85, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n85 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -5282,19 +5292,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n85, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n85 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n86, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n86, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n86 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n87, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n87 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5325,11 +5335,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n87, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n88, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n88 } return i, nil } @@ -5353,32 +5363,32 @@ func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n88, err := m.Assigned.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n88 - } - if m.Active != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n89, err := m.Active.MarshalTo(dAtA[i:]) + n89, err := m.Assigned.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n89 } - if m.LastKnownGood != nil { - dAtA[i] = 0x1a + if m.Active != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n90, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) + n90, err := m.Active.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n90 } + if m.LastKnownGood != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) + n91, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n91 + } dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) @@ -5404,11 +5414,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n91, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n92, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n92 return i, nil } @@ -5430,11 +5440,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n92, err := m.ListMeta.MarshalTo(dAtA[i:]) + n93, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n93 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5511,11 +5521,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n93, err := (&v).MarshalTo(dAtA[i:]) + n94, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n94 } } return i, nil @@ -5685,11 +5695,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n94, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n95, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n95 } return i, nil } @@ -5733,11 +5743,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n95, err := (&v).MarshalTo(dAtA[i:]) + n96, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n96 } } if len(m.Allocatable) > 0 { @@ -5764,11 +5774,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n96, err := (&v).MarshalTo(dAtA[i:]) + n97, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n97 } } dAtA[i] = 0x1a @@ -5802,19 +5812,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n97, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n97 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n98, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n98, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n98 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) + n99, err := m.NodeInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n99 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5858,11 +5868,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n99, err := m.Config.MarshalTo(dAtA[i:]) + n100, err := m.Config.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n100 } return i, nil } @@ -6015,27 +6025,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n100, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n100 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n101, err := m.Spec.MarshalTo(dAtA[i:]) + n101, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n101 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n102, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n102, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n102 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n103, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n103 return i, nil } @@ -6057,27 +6067,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n103, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n103 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n104, err := m.Spec.MarshalTo(dAtA[i:]) + n104, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n104 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n105, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n105, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n105 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n106, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n106 return i, nil } @@ -6107,19 +6117,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n106, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n106 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n107, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n107, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n107 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n108, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n108 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6149,11 +6159,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n108, err := m.ListMeta.MarshalTo(dAtA[i:]) + n109, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n108 + i += n109 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6202,11 +6212,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n109, err := m.Resources.MarshalTo(dAtA[i:]) + n110, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n109 + i += n110 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -6215,11 +6225,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n110, err := m.Selector.MarshalTo(dAtA[i:]) + n111, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n110 + i += n111 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -6237,11 +6247,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n111, err := m.DataSource.MarshalTo(dAtA[i:]) + n112, err := m.DataSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n111 + i += n112 } return i, nil } @@ -6304,11 +6314,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n112, err := (&v).MarshalTo(dAtA[i:]) + n113, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n112 + i += n113 } } if len(m.Conditions) > 0 { @@ -6374,11 +6384,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n113, err := m.ListMeta.MarshalTo(dAtA[i:]) + n114, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n113 + i += n114 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6413,163 +6423,163 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n114, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n114 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n115, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n115, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n115 } - if m.HostPath != nil { - dAtA[i] = 0x1a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n116, err := m.HostPath.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n116, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n116 } - if m.Glusterfs != nil { - dAtA[i] = 0x22 + if m.HostPath != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n117, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n117, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n117 } - if m.NFS != nil { - dAtA[i] = 0x2a + if m.Glusterfs != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n118, err := m.NFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n118, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n118 } - if m.RBD != nil { - dAtA[i] = 0x32 + if m.NFS != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n119, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n119, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n119 } - if m.ISCSI != nil { - dAtA[i] = 0x3a + if m.RBD != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n120, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n120, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n120 } - if m.Cinder != nil { - dAtA[i] = 0x42 + if m.ISCSI != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n121, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n121, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n121 } - if m.CephFS != nil { - dAtA[i] = 0x4a + if m.Cinder != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n122, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n122, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n122 } - if m.FC != nil { - dAtA[i] = 0x52 + if m.CephFS != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n123, err := m.FC.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n123, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n123 } - if m.Flocker != nil { - dAtA[i] = 0x5a + if m.FC != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n124, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n124, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n124 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.Flocker != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n125, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n125, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n125 } - if m.AzureFile != nil { - dAtA[i] = 0x6a + if m.FlexVolume != nil { + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n126, err := m.AzureFile.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n126, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n126 } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 + if m.AzureFile != nil { + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n127, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n127, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n127 } - if m.Quobyte != nil { - dAtA[i] = 0x7a + if m.VsphereVolume != nil { + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n128, err := m.Quobyte.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n128, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n128 } + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n129, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n129 + } if m.AzureDisk != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n129, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n130, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n130 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6577,11 +6587,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n130, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n131, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n131 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6589,11 +6599,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n131, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n132, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n132 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6601,11 +6611,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n132, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n133, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n133 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6613,11 +6623,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n133, err := m.Local.MarshalTo(dAtA[i:]) + n134, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n134 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6625,11 +6635,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n134, err := m.StorageOS.MarshalTo(dAtA[i:]) + n135, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n135 } if m.CSI != nil { dAtA[i] = 0xb2 @@ -6637,11 +6647,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n135, err := m.CSI.MarshalTo(dAtA[i:]) + n136, err := m.CSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n136 } return i, nil } @@ -6685,21 +6695,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n136, err := (&v).MarshalTo(dAtA[i:]) + n137, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n137 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n137, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n138, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n138 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6719,11 +6729,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n138, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n139, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n139 } dAtA[i] = 0x2a i++ @@ -6758,11 +6768,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n139, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + n140, err := m.NodeAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n140 } return i, nil } @@ -6841,27 +6851,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n140 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n141, err := m.Spec.MarshalTo(dAtA[i:]) + n141, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n141 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n142, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n142, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n142 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n143, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 return i, nil } @@ -6926,11 +6936,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n143, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n144, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n144 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -7076,19 +7086,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n144, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n144 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n145, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n145, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n145 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n146, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n146 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7275,11 +7285,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n146, err := m.ListMeta.MarshalTo(dAtA[i:]) + n147, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n147 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7339,11 +7349,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n147, err := m.SinceTime.MarshalTo(dAtA[i:]) + n148, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n148 } dAtA[i] = 0x30 i++ @@ -7454,11 +7464,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n148, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n149, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n149 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -7508,11 +7518,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size())) - n149, err := m.WindowsOptions.MarshalTo(dAtA[i:]) + n150, err := m.WindowsOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n150 } return i, nil } @@ -7536,11 +7546,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n150, err := m.PodController.MarshalTo(dAtA[i:]) + n151, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n150 + i += n151 } return i, nil } @@ -7664,11 +7674,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n151, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n152, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n152 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -7700,11 +7710,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n152, err := m.Affinity.MarshalTo(dAtA[i:]) + n153, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n153 } dAtA[i] = 0x9a i++ @@ -7785,11 +7795,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n153, err := m.DNSConfig.MarshalTo(dAtA[i:]) + n154, err := m.DNSConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n154 } if m.ShareProcessNamespace != nil { dAtA[i] = 0xd8 @@ -7891,11 +7901,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n154, err := m.StartTime.MarshalTo(dAtA[i:]) + n155, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n155 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7950,19 +7960,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n155, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n155 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n156, err := m.Status.MarshalTo(dAtA[i:]) + n156, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n156 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n157, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n157 return i, nil } @@ -7984,19 +7994,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n157, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n157 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n158, err := m.Template.MarshalTo(dAtA[i:]) + n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n158 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n159, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n159 return i, nil } @@ -8018,11 +8028,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n159, err := m.ListMeta.MarshalTo(dAtA[i:]) + n160, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n160 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8056,19 +8066,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n160, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n160 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n161, err := m.Spec.MarshalTo(dAtA[i:]) + n161, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n161 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n162, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n162 return i, nil } @@ -8148,19 +8158,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n162, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n162 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n163, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n163, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n163 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) + n164, err := m.EvictionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n164 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8193,11 +8203,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n164, err := m.Preference.MarshalTo(dAtA[i:]) + n165, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n165 return i, nil } @@ -8219,11 +8229,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n165, err := m.Handler.MarshalTo(dAtA[i:]) + n166, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n166 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -8377,11 +8387,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n166, err := m.SecretRef.MarshalTo(dAtA[i:]) + n167, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n167 } dAtA[i] = 0x40 i++ @@ -8448,11 +8458,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n167, err := m.SecretRef.MarshalTo(dAtA[i:]) + n168, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n168 } dAtA[i] = 0x40 i++ @@ -8483,11 +8493,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n168, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n169, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n169 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -8519,27 +8529,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n169, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n169 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n170, err := m.Spec.MarshalTo(dAtA[i:]) + n170, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n170 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n171, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n171, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n171 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n172, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n172 return i, nil } @@ -8569,11 +8579,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n172, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n173, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n173 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8603,11 +8613,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n173, err := m.ListMeta.MarshalTo(dAtA[i:]) + n174, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n174 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8669,11 +8679,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n174, err := m.Template.MarshalTo(dAtA[i:]) + n175, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n175 } dAtA[i] = 0x20 i++ @@ -8752,11 +8762,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n175, err := m.Divisor.MarshalTo(dAtA[i:]) + n176, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n176 return i, nil } @@ -8778,27 +8788,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n176, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n176 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n177, err := m.Spec.MarshalTo(dAtA[i:]) + n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n177 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n178, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n178, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n178 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n179, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n179 return i, nil } @@ -8820,11 +8830,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n179, err := m.ListMeta.MarshalTo(dAtA[i:]) + n180, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n180 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8879,11 +8889,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n180, err := (&v).MarshalTo(dAtA[i:]) + n181, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n181 } } if len(m.Scopes) > 0 { @@ -8905,11 +8915,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n181, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + n182, err := m.ScopeSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n182 } return i, nil } @@ -8953,11 +8963,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n182, err := (&v).MarshalTo(dAtA[i:]) + n183, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n183 } } if len(m.Used) > 0 { @@ -8984,11 +8994,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) + n184, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n184 } } return i, nil @@ -9033,11 +9043,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n184, err := (&v).MarshalTo(dAtA[i:]) + n185, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n185 } } if len(m.Requests) > 0 { @@ -9064,11 +9074,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n185, err := (&v).MarshalTo(dAtA[i:]) + n186, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n186 } } return i, nil @@ -9135,11 +9145,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n186, err := m.SecretRef.MarshalTo(dAtA[i:]) + n187, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n187 } dAtA[i] = 0x20 i++ @@ -9207,11 +9217,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n187, err := m.SecretRef.MarshalTo(dAtA[i:]) + n188, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n188 } dAtA[i] = 0x20 i++ @@ -9341,11 +9351,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n188, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n189, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n189 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -9421,11 +9431,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n189, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n190 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -9457,11 +9467,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n191 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -9497,11 +9507,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n191, err := m.ListMeta.MarshalTo(dAtA[i:]) + n192, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n192 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9535,11 +9545,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n192, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n193, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n193 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9659,11 +9669,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n193, err := m.Capabilities.MarshalTo(dAtA[i:]) + n194, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n194 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -9679,11 +9689,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n194, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n195, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n195 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -9735,11 +9745,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size())) - n195, err := m.WindowsOptions.MarshalTo(dAtA[i:]) + n196, err := m.WindowsOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n196 } return i, nil } @@ -9762,11 +9772,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n196, err := m.Reference.MarshalTo(dAtA[i:]) + n197, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n196 + i += n197 return i, nil } @@ -9788,27 +9798,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n197, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n198, err := m.Spec.MarshalTo(dAtA[i:]) + n198, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n198 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n199, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n199, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n199 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n200, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n200 return i, nil } @@ -9830,11 +9840,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n200, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n201, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n200 + i += n201 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -9890,11 +9900,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n201, err := m.ListMeta.MarshalTo(dAtA[i:]) + n202, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n201 + i += n202 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9959,11 +9969,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n202, err := m.ListMeta.MarshalTo(dAtA[i:]) + n203, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n202 + i += n203 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -10008,11 +10018,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n203, err := m.TargetPort.MarshalTo(dAtA[i:]) + n204, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n203 + i += n204 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -10159,11 +10169,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n204, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n205, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n204 + i += n205 } return i, nil } @@ -10186,11 +10196,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n205, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n206, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n205 + i += n206 return i, nil } @@ -10213,11 +10223,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n206, err := m.ClientIP.MarshalTo(dAtA[i:]) + n207, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n206 + i += n207 } return i, nil } @@ -10261,11 +10271,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n207, err := m.SecretRef.MarshalTo(dAtA[i:]) + n208, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n207 + i += n208 } return i, nil } @@ -10309,11 +10319,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n208, err := m.SecretRef.MarshalTo(dAtA[i:]) + n209, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n208 + i += n209 } return i, nil } @@ -10362,11 +10372,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n209, err := m.Port.MarshalTo(dAtA[i:]) + n210, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n209 + i += n210 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -10405,11 +10415,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n210, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n211, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n210 + i += n211 } return i, nil } @@ -10574,11 +10584,11 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n211, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n212, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n211 + i += n212 return i, nil } @@ -10675,11 +10685,11 @@ func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n212, err := m.Required.MarshalTo(dAtA[i:]) + n213, err := m.Required.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n212 + i += n213 } return i, nil } @@ -10703,42 +10713,42 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n213, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n213 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n214, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n214, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n214 } - if m.ConfigMap != nil { - dAtA[i] = 0x1a + if m.DownwardAPI != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n215, err := m.ConfigMap.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n215, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n215 } - if m.ServiceAccountToken != nil { - dAtA[i] = 0x22 + if m.ConfigMap != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n216, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n216, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n216 } + if m.ServiceAccountToken != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) + n217, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n217 + } return i, nil } @@ -10761,163 +10771,163 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n217, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n217 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n218, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n218, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n218 } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a + if m.EmptyDir != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n219, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n219, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n219 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n220, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n220, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n220 } - if m.GitRepo != nil { - dAtA[i] = 0x2a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n221, err := m.GitRepo.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n221, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n221 } - if m.Secret != nil { - dAtA[i] = 0x32 + if m.GitRepo != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n222, err := m.Secret.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n222, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n222 } - if m.NFS != nil { - dAtA[i] = 0x3a + if m.Secret != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n223, err := m.NFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n223, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n223 } - if m.ISCSI != nil { - dAtA[i] = 0x42 + if m.NFS != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n224, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n224, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n224 } - if m.Glusterfs != nil { - dAtA[i] = 0x4a + if m.ISCSI != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n225, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n225, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n225 } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 + if m.Glusterfs != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n226, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n226, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n226 } - if m.RBD != nil { - dAtA[i] = 0x5a + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n227, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n227, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n227 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.RBD != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n228, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n228, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n228 } - if m.Cinder != nil { - dAtA[i] = 0x6a + if m.FlexVolume != nil { + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n229, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n229, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n229 } - if m.CephFS != nil { - dAtA[i] = 0x72 + if m.Cinder != nil { + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n230, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n230, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n230 } - if m.Flocker != nil { - dAtA[i] = 0x7a + if m.CephFS != nil { + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n231, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n231, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n231 } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n232, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n232 + } if m.DownwardAPI != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n232, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n233, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n232 + i += n233 } if m.FC != nil { dAtA[i] = 0x8a @@ -10925,11 +10935,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n233, err := m.FC.MarshalTo(dAtA[i:]) + n234, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n233 + i += n234 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -10937,11 +10947,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n234, err := m.AzureFile.MarshalTo(dAtA[i:]) + n235, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n234 + i += n235 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -10949,11 +10959,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n235, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n236, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n235 + i += n236 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -10961,11 +10971,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n236, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n237, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n236 + i += n237 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -10973,11 +10983,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n237, err := m.Quobyte.MarshalTo(dAtA[i:]) + n238, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n237 + i += n238 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -10985,11 +10995,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n238, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n239, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n238 + i += n239 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -10997,11 +11007,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n239, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n240, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n239 + i += n240 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -11009,11 +11019,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n240, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n241, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n240 + i += n241 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -11021,11 +11031,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n241, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n242, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n241 + i += n242 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -11033,11 +11043,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n242, err := m.Projected.MarshalTo(dAtA[i:]) + n243, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n242 + i += n243 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -11045,11 +11055,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n243, err := m.StorageOS.MarshalTo(dAtA[i:]) + n244, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n243 + i += n244 } if m.CSI != nil { dAtA[i] = 0xe2 @@ -11057,11 +11067,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n244, err := m.CSI.MarshalTo(dAtA[i:]) + n245, err := m.CSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n244 + i += n245 } return i, nil } @@ -11121,11 +11131,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n245, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n246, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n245 + i += n246 return i, nil } @@ -11299,6 +11309,10 @@ func (m *CSIPersistentVolumeSource) Size() (n int) { l = m.NodePublishSecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ControllerExpandSecretRef != nil { + l = m.ControllerExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -14886,6 +14900,7 @@ func (this *CSIPersistentVolumeSource) String() string { `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ControllerExpandSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerExpandSecretRef), "SecretReference", "SecretReference", 1) + `,`, `}`, }, "") return s @@ -19050,6 +19065,39 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerExpandSecretRef == nil { + m.ControllerExpandSecretRef = &SecretReference{} + } + if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52365,817 +52413,819 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12981 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6d, 0x70, 0x64, 0x57, - 0x5a, 0x18, 0xbc, 0xb7, 0xbb, 0x25, 0x75, 0x3f, 0xfa, 0x3e, 0x33, 0x63, 0x6b, 0xe4, 0x99, 0xe9, - 0xf1, 0xf5, 0xee, 0x78, 0xbc, 0xb6, 0xa5, 0xf5, 0xd8, 0x5e, 0x9b, 0xb5, 0xd7, 0x20, 0xa9, 0xa5, - 0x99, 0xf6, 0x8c, 0x34, 0xed, 0xd3, 0x9a, 0x99, 0x5d, 0xe3, 0x5d, 0xf6, 0xaa, 0xfb, 0x48, 0xba, - 0x56, 0xeb, 0xde, 0xf6, 0xbd, 0xb7, 0x35, 0x23, 0xbf, 0x50, 0xef, 0xfb, 0x2e, 0x2f, 0xbc, 0x6c, - 0xa0, 0x52, 0x5b, 0xc9, 0x56, 0x3e, 0x80, 0x22, 0x55, 0x84, 0x14, 0x10, 0x48, 0x2a, 0x04, 0x02, - 0x84, 0x25, 0x09, 0x81, 0xa4, 0x8a, 0xe4, 0xc7, 0x86, 0xa4, 0x2a, 0xb5, 0x54, 0x51, 0x51, 0x40, - 0xa4, 0x42, 0xf1, 0x23, 0x90, 0x0a, 0xf9, 0x83, 0x42, 0x85, 0xd4, 0xf9, 0xbc, 0xe7, 0xdc, 0xbe, - 0xb7, 0xbb, 0x35, 0xd6, 0xc8, 0x86, 0xda, 0x7f, 0xdd, 0xe7, 0x79, 0xce, 0x73, 0xce, 0x3d, 0x9f, - 0xcf, 0xf3, 0x9c, 0xe7, 0x03, 0x5e, 0xdb, 0x79, 0x35, 0x9c, 0x73, 0xfd, 0xf9, 0x9d, 0xce, 0x06, - 0x09, 0x3c, 0x12, 0x91, 0x70, 0x7e, 0x8f, 0x78, 0x4d, 0x3f, 0x98, 0x17, 0x00, 0xa7, 0xed, 0xce, - 0x37, 0xfc, 0x80, 0xcc, 0xef, 0xbd, 0x30, 0xbf, 0x45, 0x3c, 0x12, 0x38, 0x11, 0x69, 0xce, 0xb5, - 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38, 0x73, 0x4e, 0xdb, 0x9d, 0xa3, 0x38, 0x73, 0x7b, 0x2f, 0xcc, - 0x3e, 0xbf, 0xe5, 0x46, 0xdb, 0x9d, 0x8d, 0xb9, 0x86, 0xbf, 0x3b, 0xbf, 0xe5, 0x6f, 0xf9, 0xf3, - 0x0c, 0x75, 0xa3, 0xb3, 0xc9, 0xfe, 0xb1, 0x3f, 0xec, 0x17, 0x27, 0x31, 0xfb, 0x52, 0xdc, 0xcc, - 0xae, 0xd3, 0xd8, 0x76, 0x3d, 0x12, 0xec, 0xcf, 0xb7, 0x77, 0xb6, 0x58, 0xbb, 0x01, 0x09, 0xfd, - 0x4e, 0xd0, 0x20, 0xc9, 0x86, 0x7b, 0xd6, 0x0a, 0xe7, 0x77, 0x49, 0xe4, 0xa4, 0x74, 0x77, 0x76, - 0x3e, 0xab, 0x56, 0xd0, 0xf1, 0x22, 0x77, 0xb7, 0xbb, 0x99, 0x4f, 0xf7, 0xab, 0x10, 0x36, 0xb6, - 0xc9, 0xae, 0xd3, 0x55, 0xef, 0xc5, 0xac, 0x7a, 0x9d, 0xc8, 0x6d, 0xcd, 0xbb, 0x5e, 0x14, 0x46, - 0x41, 0xb2, 0x92, 0xfd, 0x4d, 0x0b, 0x2e, 0x2f, 0xdc, 0xab, 0x2f, 0xb7, 0x9c, 0x30, 0x72, 0x1b, - 0x8b, 0x2d, 0xbf, 0xb1, 0x53, 0x8f, 0xfc, 0x80, 0xdc, 0xf5, 0x5b, 0x9d, 0x5d, 0x52, 0x67, 0x03, - 0x81, 0x9e, 0x83, 0xe2, 0x1e, 0xfb, 0x5f, 0xad, 0xcc, 0x58, 0x97, 0xad, 0xab, 0xa5, 0xc5, 0xa9, - 0xdf, 0x3c, 0x28, 0x7f, 0xec, 0xf0, 0xa0, 0x5c, 0xbc, 0x2b, 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x60, - 0x78, 0x33, 0x5c, 0xdf, 0x6f, 0x93, 0x99, 0x1c, 0xc3, 0x9d, 0x10, 0xb8, 0xc3, 0x2b, 0x75, 0x5a, - 0x8a, 0x05, 0x14, 0xcd, 0x43, 0xa9, 0xed, 0x04, 0x91, 0x1b, 0xb9, 0xbe, 0x37, 0x93, 0xbf, 0x6c, - 0x5d, 0x1d, 0x5a, 0x9c, 0x16, 0xa8, 0xa5, 0x9a, 0x04, 0xe0, 0x18, 0x87, 0x76, 0x23, 0x20, 0x4e, - 0xf3, 0xb6, 0xd7, 0xda, 0x9f, 0x29, 0x5c, 0xb6, 0xae, 0x16, 0xe3, 0x6e, 0x60, 0x51, 0x8e, 0x15, - 0x86, 0xfd, 0xc3, 0x39, 0x28, 0x2e, 0x6c, 0x6e, 0xba, 0x9e, 0x1b, 0xed, 0xa3, 0xbb, 0x30, 0xe6, - 0xf9, 0x4d, 0x22, 0xff, 0xb3, 0xaf, 0x18, 0xbd, 0x76, 0x79, 0xae, 0x7b, 0x29, 0xcd, 0xad, 0x69, - 0x78, 0x8b, 0x53, 0x87, 0x07, 0xe5, 0x31, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xd1, 0xb6, 0xdf, - 0x54, 0x64, 0x73, 0x8c, 0x6c, 0x39, 0x8d, 0x6c, 0x2d, 0x46, 0x5b, 0x9c, 0x3c, 0x3c, 0x28, 0x8f, - 0x6a, 0x05, 0x58, 0x27, 0x82, 0x36, 0x60, 0x92, 0xfe, 0xf5, 0x22, 0x57, 0xd1, 0xcd, 0x33, 0xba, - 0x4f, 0x65, 0xd1, 0xd5, 0x50, 0x17, 0xcf, 0x1c, 0x1e, 0x94, 0x27, 0x13, 0x85, 0x38, 0x49, 0xd0, - 0x7e, 0x1f, 0x26, 0x16, 0xa2, 0xc8, 0x69, 0x6c, 0x93, 0x26, 0x9f, 0x41, 0xf4, 0x12, 0x14, 0x3c, - 0x67, 0x97, 0x88, 0xf9, 0xbd, 0x2c, 0x06, 0xb6, 0xb0, 0xe6, 0xec, 0x92, 0xa3, 0x83, 0xf2, 0xd4, - 0x1d, 0xcf, 0x7d, 0xaf, 0x23, 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x5d, 0x03, 0x68, 0x92, 0x3d, - 0xb7, 0x41, 0x6a, 0x4e, 0xb4, 0x2d, 0xe6, 0x1b, 0x89, 0xba, 0x50, 0x51, 0x10, 0xac, 0x61, 0xd9, - 0x0f, 0xa0, 0xb4, 0xb0, 0xe7, 0xbb, 0xcd, 0x9a, 0xdf, 0x0c, 0xd1, 0x0e, 0x4c, 0xb6, 0x03, 0xb2, - 0x49, 0x02, 0x55, 0x34, 0x63, 0x5d, 0xce, 0x5f, 0x1d, 0xbd, 0x76, 0x35, 0xf5, 0x63, 0x4d, 0xd4, - 0x65, 0x2f, 0x0a, 0xf6, 0x17, 0x1f, 0x17, 0xed, 0x4d, 0x26, 0xa0, 0x38, 0x49, 0xd9, 0xfe, 0x57, - 0x39, 0x38, 0xb7, 0xf0, 0x7e, 0x27, 0x20, 0x15, 0x37, 0xdc, 0x49, 0xae, 0xf0, 0xa6, 0x1b, 0xee, - 0xac, 0xc5, 0x23, 0xa0, 0x96, 0x56, 0x45, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x87, 0x11, 0xfa, 0xfb, - 0x0e, 0xae, 0x8a, 0x4f, 0x3e, 0x23, 0x90, 0x47, 0x2b, 0x4e, 0xe4, 0x54, 0x38, 0x08, 0x4b, 0x1c, - 0xb4, 0x0a, 0xa3, 0x0d, 0xb6, 0x21, 0xb7, 0x56, 0xfd, 0x26, 0x61, 0x93, 0x59, 0x5a, 0x7c, 0x96, - 0xa2, 0x2f, 0xc5, 0xc5, 0x47, 0x07, 0xe5, 0x19, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, - 0xb2, 0xd5, 0xfe, 0x2a, 0x30, 0x4a, 0x90, 0xb2, 0xb7, 0xae, 0x6a, 0x5b, 0x65, 0x88, 0x6d, 0x95, - 0xb1, 0xf4, 0x6d, 0x82, 0x5e, 0x80, 0xc2, 0x8e, 0xeb, 0x35, 0x67, 0x86, 0x19, 0xad, 0x8b, 0x74, - 0xce, 0x6f, 0xba, 0x5e, 0xf3, 0xe8, 0xa0, 0x3c, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, - 0x89, 0x05, 0x65, 0x06, 0x5b, 0x71, 0x5b, 0xa4, 0x46, 0x82, 0xd0, 0x0d, 0x23, 0xe2, 0x45, 0xc6, - 0x80, 0x5e, 0x03, 0x08, 0x49, 0x23, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0xd4, 0x15, 0x04, 0x6b, - 0x58, 0xf4, 0x40, 0x08, 0xb7, 0x9d, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x40, 0xa8, 0x4b, 0x00, - 0x8e, 0x71, 0x8c, 0x03, 0x21, 0xdf, 0xef, 0x40, 0x40, 0x9f, 0x85, 0xc9, 0xb8, 0xb1, 0xb0, 0xed, - 0x34, 0xe4, 0x00, 0xb2, 0x2d, 0x53, 0x37, 0x41, 0x38, 0x89, 0x6b, 0xff, 0x7d, 0x4b, 0x2c, 0x1e, - 0xfa, 0xd5, 0x1f, 0xf1, 0x6f, 0xb5, 0x7f, 0xd9, 0x82, 0x91, 0x45, 0xd7, 0x6b, 0xba, 0xde, 0x16, - 0xfa, 0x12, 0x14, 0xe9, 0xdd, 0xd4, 0x74, 0x22, 0x47, 0x9c, 0x7b, 0x9f, 0xd2, 0xf6, 0x96, 0xba, - 0x2a, 0xe6, 0xda, 0x3b, 0x5b, 0xb4, 0x20, 0x9c, 0xa3, 0xd8, 0x74, 0xb7, 0xdd, 0xde, 0x78, 0x97, - 0x34, 0xa2, 0x55, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x38, 0x72, - 0x82, 0x2d, 0x12, 0x89, 0x03, 0x30, 0xf5, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0x6b, 0x90, - 0xf8, 0x5a, 0x58, 0x67, 0x55, 0xb1, 0x20, 0x61, 0xff, 0x95, 0x61, 0x38, 0xbf, 0x54, 0xaf, 0x66, - 0xac, 0xab, 0x2b, 0x30, 0xdc, 0x0c, 0xdc, 0x3d, 0x12, 0x88, 0x71, 0x56, 0x54, 0x2a, 0xac, 0x14, - 0x0b, 0x28, 0x7a, 0x15, 0xc6, 0xf8, 0x85, 0x74, 0xc3, 0xf1, 0x9a, 0x2d, 0x39, 0xc4, 0x67, 0x05, - 0xf6, 0xd8, 0x5d, 0x0d, 0x86, 0x0d, 0xcc, 0x63, 0x2e, 0xaa, 0x2b, 0x89, 0xcd, 0x98, 0x75, 0xd9, - 0x7d, 0xc5, 0x82, 0x29, 0xde, 0xcc, 0x42, 0x14, 0x05, 0xee, 0x46, 0x27, 0x22, 0xe1, 0xcc, 0x10, - 0x3b, 0xe9, 0x96, 0xd2, 0x46, 0x2b, 0x73, 0x04, 0xe6, 0xee, 0x26, 0xa8, 0xf0, 0x43, 0x70, 0x46, - 0xb4, 0x3b, 0x95, 0x04, 0xe3, 0xae, 0x66, 0xd1, 0xf7, 0x5a, 0x30, 0xdb, 0xf0, 0xbd, 0x28, 0xf0, - 0x5b, 0x2d, 0x12, 0xd4, 0x3a, 0x1b, 0x2d, 0x37, 0xdc, 0xe6, 0xeb, 0x14, 0x93, 0x4d, 0x76, 0x12, - 0x64, 0xcc, 0xa1, 0x42, 0x12, 0x73, 0x78, 0xe9, 0xf0, 0xa0, 0x3c, 0xbb, 0x94, 0x49, 0x0a, 0xf7, - 0x68, 0x06, 0xed, 0x00, 0xa2, 0x57, 0x69, 0x3d, 0x72, 0xb6, 0x48, 0xdc, 0xf8, 0xc8, 0xe0, 0x8d, - 0x3f, 0x76, 0x78, 0x50, 0x46, 0x6b, 0x5d, 0x24, 0x70, 0x0a, 0x59, 0xf4, 0x1e, 0x9c, 0xa5, 0xa5, - 0x5d, 0xdf, 0x5a, 0x1c, 0xbc, 0xb9, 0x99, 0xc3, 0x83, 0xf2, 0xd9, 0xb5, 0x14, 0x22, 0x38, 0x95, - 0xf4, 0xec, 0x12, 0x9c, 0x4b, 0x9d, 0x2a, 0x34, 0x05, 0xf9, 0x1d, 0xc2, 0x59, 0x90, 0x12, 0xa6, - 0x3f, 0xd1, 0x59, 0x18, 0xda, 0x73, 0x5a, 0x1d, 0xb1, 0x4a, 0x31, 0xff, 0xf3, 0x99, 0xdc, 0xab, - 0x96, 0xfd, 0xaf, 0xf3, 0x30, 0xb9, 0x54, 0xaf, 0x3e, 0xd4, 0x16, 0xd0, 0xef, 0x80, 0x5c, 0xcf, - 0x3b, 0x20, 0xbe, 0x51, 0xf2, 0x99, 0x37, 0xca, 0xff, 0x9d, 0xb2, 0x7e, 0x0b, 0x6c, 0xfd, 0x7e, - 0x5b, 0xc6, 0xfa, 0x3d, 0xe1, 0x55, 0xbb, 0x97, 0x31, 0x85, 0x43, 0x6c, 0x0a, 0x53, 0xd9, 0x85, - 0x5b, 0x7e, 0xc3, 0x69, 0x25, 0xcf, 0x9d, 0x0f, 0x65, 0x1e, 0x1b, 0x30, 0xb6, 0xe4, 0xb4, 0x9d, - 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0xd1, 0xd3, 0x90, 0x77, 0x9a, 0x4d, 0xc6, 0xea, 0x94, 0x16, - 0xcf, 0x1d, 0x1e, 0x94, 0xf3, 0x0b, 0x4d, 0x7a, 0xe7, 0x82, 0xc2, 0xda, 0xc7, 0x14, 0x03, 0x7d, - 0x12, 0x0a, 0xcd, 0xc0, 0x6f, 0xcf, 0xe4, 0x18, 0x26, 0x5d, 0xf2, 0x85, 0x4a, 0xe0, 0xb7, 0x13, - 0xa8, 0x0c, 0xc7, 0xfe, 0xb5, 0x1c, 0x5c, 0x58, 0x22, 0xed, 0xed, 0x95, 0x7a, 0xc6, 0xe1, 0x79, - 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x8a, 0x58, 0x15, 0x65, 0x58, - 0x41, 0xd1, 0x65, 0x28, 0xb4, 0x63, 0x8e, 0x6e, 0x4c, 0x72, 0x83, 0x8c, 0x97, 0x63, 0x10, 0x8a, - 0xd1, 0x09, 0x49, 0x20, 0x56, 0x8c, 0xc2, 0xb8, 0x13, 0x92, 0x00, 0x33, 0x48, 0x7c, 0x2d, 0xd2, - 0x0b, 0x53, 0x1c, 0x8f, 0x89, 0x6b, 0x91, 0x42, 0xb0, 0x86, 0x85, 0x6a, 0x50, 0x0a, 0x13, 0x33, - 0x3b, 0xd0, 0xe6, 0x1c, 0x67, 0xf7, 0xa6, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0x9c, 0x0f, 0xf7, 0xbd, - 0x37, 0xbf, 0x9e, 0x03, 0xc4, 0x87, 0xf0, 0x2f, 0xd8, 0xc0, 0xdd, 0xe9, 0x1e, 0xb8, 0xc1, 0xb7, - 0xc4, 0x49, 0x8d, 0xde, 0xff, 0xb4, 0xe0, 0xc2, 0x92, 0xeb, 0x35, 0x49, 0x90, 0xb1, 0x00, 0x1f, - 0x8d, 0x20, 0x79, 0xbc, 0x1b, 0xdb, 0x58, 0x62, 0x85, 0x13, 0x58, 0x62, 0xf6, 0x1f, 0x5b, 0x80, - 0xf8, 0x67, 0x7f, 0xe4, 0x3e, 0xf6, 0x4e, 0xf7, 0xc7, 0x9e, 0xc0, 0xb2, 0xb0, 0x6f, 0xc1, 0xc4, - 0x52, 0xcb, 0x25, 0x5e, 0x54, 0xad, 0x2d, 0xf9, 0xde, 0xa6, 0xbb, 0x85, 0x3e, 0x03, 0x13, 0x91, - 0xbb, 0x4b, 0xfc, 0x4e, 0x54, 0x27, 0x0d, 0xdf, 0x63, 0x62, 0x1c, 0x95, 0xe8, 0xd1, 0xe1, 0x41, - 0x79, 0x62, 0xdd, 0x80, 0xe0, 0x04, 0xa6, 0xfd, 0x3b, 0x74, 0xfc, 0xfc, 0xdd, 0xb6, 0xef, 0x11, - 0x2f, 0x5a, 0xf2, 0xbd, 0x26, 0x17, 0xf7, 0x3f, 0x03, 0x85, 0x88, 0x8e, 0x07, 0x1f, 0xbb, 0x2b, - 0x72, 0xa3, 0xd0, 0x51, 0x38, 0x3a, 0x28, 0x3f, 0xd6, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, - 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, - 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, - 0x4b, 0xde, 0x86, 0x93, 0xa2, 0xee, 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, - 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x67, - 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, 0x14, 0x58, 0xf2, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, - 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0x7a, 0x51, 0x77, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, - 0x66, 0xff, 0x33, 0x0b, 0xce, 0x24, 0xbe, 0xe8, 0x96, 0x1b, 0x46, 0xe8, 0x9d, 0xae, 0xaf, 0x9a, - 0x1b, 0xec, 0xab, 0x68, 0x6d, 0xf6, 0x4d, 0x6a, 0x29, 0xcb, 0x12, 0xed, 0x8b, 0x6e, 0xc0, 0x90, - 0x1b, 0x91, 0x5d, 0xf9, 0x31, 0x4f, 0xf5, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0xaa, 0xb4, 0x26, - 0xe6, 0x04, 0xec, 0xbf, 0x9e, 0x87, 0x12, 0x5f, 0xb6, 0xab, 0x4e, 0xfb, 0x14, 0xe6, 0xa2, 0x0a, - 0x05, 0x46, 0x9d, 0x77, 0xfc, 0xe9, 0xf4, 0x8e, 0x8b, 0xee, 0xcc, 0x51, 0x79, 0x9b, 0x33, 0x47, - 0xea, 0x6a, 0xa0, 0x45, 0x98, 0x91, 0x40, 0x0e, 0xc0, 0x86, 0xeb, 0x39, 0xc1, 0x3e, 0x2d, 0x9b, - 0xc9, 0x33, 0x82, 0xcf, 0xf7, 0x26, 0xb8, 0xa8, 0xf0, 0x39, 0x59, 0xd5, 0xd7, 0x18, 0x80, 0x35, - 0xa2, 0xb3, 0xaf, 0x40, 0x49, 0x21, 0x1f, 0x87, 0xc7, 0x99, 0xfd, 0x2c, 0x4c, 0x26, 0xda, 0xea, - 0x57, 0x7d, 0x4c, 0x67, 0x91, 0x7e, 0x85, 0x9d, 0x02, 0xa2, 0xd7, 0xcb, 0xde, 0x9e, 0x38, 0x45, - 0xdf, 0x87, 0xb3, 0xad, 0x94, 0xc3, 0x49, 0x4c, 0xd5, 0xe0, 0x87, 0xd9, 0x05, 0xf1, 0xd9, 0x67, - 0xd3, 0xa0, 0x38, 0xb5, 0x0d, 0x7a, 0xed, 0xfb, 0x6d, 0xba, 0xe6, 0x9d, 0x96, 0xce, 0x41, 0xdf, - 0x16, 0x65, 0x58, 0x41, 0xe9, 0x11, 0x76, 0x56, 0x75, 0xfe, 0x26, 0xd9, 0xaf, 0x93, 0x16, 0x69, - 0x44, 0x7e, 0xf0, 0xa1, 0x76, 0xff, 0x22, 0x1f, 0x7d, 0x7e, 0x02, 0x8e, 0x0a, 0x02, 0xf9, 0x9b, - 0x64, 0x9f, 0x4f, 0x85, 0xfe, 0x75, 0xf9, 0x9e, 0x5f, 0xf7, 0x73, 0x16, 0x8c, 0xab, 0xaf, 0x3b, - 0x85, 0xad, 0xbe, 0x68, 0x6e, 0xf5, 0x8b, 0x3d, 0x17, 0x78, 0xc6, 0x26, 0xff, 0x7a, 0x0e, 0xce, - 0x2b, 0x1c, 0xca, 0xee, 0xf3, 0x3f, 0x62, 0x55, 0xcd, 0x43, 0xc9, 0x53, 0x5a, 0x20, 0xcb, 0x54, - 0xbf, 0xc4, 0x3a, 0xa0, 0x18, 0x87, 0x72, 0x6d, 0x5e, 0xac, 0xaa, 0x19, 0xd3, 0xd5, 0xa3, 0x42, - 0x15, 0xba, 0x08, 0xf9, 0x8e, 0xdb, 0x14, 0x77, 0xc6, 0xa7, 0xe4, 0x68, 0xdf, 0xa9, 0x56, 0x8e, - 0x0e, 0xca, 0x4f, 0x66, 0xa9, 0xe6, 0xe9, 0x65, 0x15, 0xce, 0xdd, 0xa9, 0x56, 0x30, 0xad, 0x8c, - 0x16, 0x60, 0x52, 0xbe, 0x3e, 0xdc, 0xa5, 0x1c, 0x94, 0xef, 0x89, 0xab, 0x45, 0xe9, 0x38, 0xb1, - 0x09, 0xc6, 0x49, 0x7c, 0x54, 0x81, 0xa9, 0x9d, 0xce, 0x06, 0x69, 0x91, 0x88, 0x7f, 0xf0, 0x4d, - 0xc2, 0x35, 0x80, 0xa5, 0x58, 0xd8, 0xba, 0x99, 0x80, 0xe3, 0xae, 0x1a, 0xf6, 0x9f, 0xb3, 0x23, - 0x5e, 0x8c, 0x5e, 0x2d, 0xf0, 0xe9, 0xc2, 0xa2, 0xd4, 0x3f, 0xcc, 0xe5, 0x3c, 0xc8, 0xaa, 0xb8, - 0x49, 0xf6, 0xd7, 0x7d, 0xca, 0x6c, 0xa7, 0xaf, 0x0a, 0x63, 0xcd, 0x17, 0x7a, 0xae, 0xf9, 0x5f, - 0xc8, 0xc1, 0x39, 0x35, 0x02, 0x06, 0x5f, 0xf7, 0x17, 0x7d, 0x0c, 0x5e, 0x80, 0xd1, 0x26, 0xd9, - 0x74, 0x3a, 0xad, 0x48, 0xa9, 0xa3, 0x87, 0xf8, 0x93, 0x44, 0x25, 0x2e, 0xc6, 0x3a, 0xce, 0x31, - 0x86, 0xed, 0x27, 0x46, 0xd9, 0xdd, 0x1a, 0x39, 0x74, 0x8d, 0xab, 0x5d, 0x63, 0x65, 0xee, 0x9a, - 0xa7, 0x60, 0xc8, 0xdd, 0xa5, 0xbc, 0x56, 0xce, 0x64, 0xa1, 0xaa, 0xb4, 0x10, 0x73, 0x18, 0xfa, - 0x04, 0x8c, 0x34, 0xfc, 0xdd, 0x5d, 0xc7, 0x6b, 0xb2, 0x2b, 0xaf, 0xb4, 0x38, 0x4a, 0xd9, 0xb1, - 0x25, 0x5e, 0x84, 0x25, 0x0c, 0x5d, 0x80, 0x82, 0x13, 0x6c, 0x71, 0xb5, 0x44, 0x69, 0xb1, 0x48, - 0x5b, 0x5a, 0x08, 0xb6, 0x42, 0xcc, 0x4a, 0xa9, 0x54, 0x75, 0xdf, 0x0f, 0x76, 0x5c, 0x6f, 0xab, - 0xe2, 0x06, 0x62, 0x4b, 0xa8, 0xbb, 0xf0, 0x9e, 0x82, 0x60, 0x0d, 0x0b, 0xad, 0xc0, 0x50, 0xdb, - 0x0f, 0xa2, 0x70, 0x66, 0x98, 0x0d, 0xf7, 0x93, 0x19, 0x07, 0x11, 0xff, 0xda, 0x9a, 0x1f, 0x44, - 0xf1, 0x07, 0xd0, 0x7f, 0x21, 0xe6, 0xd5, 0xd1, 0xb7, 0x41, 0x9e, 0x78, 0x7b, 0x33, 0x23, 0x8c, - 0xca, 0x6c, 0x1a, 0x95, 0x65, 0x6f, 0xef, 0xae, 0x13, 0xc4, 0xa7, 0xf4, 0xb2, 0xb7, 0x87, 0x69, - 0x1d, 0xf4, 0x79, 0x28, 0xc9, 0x2d, 0x1e, 0x0a, 0x75, 0x55, 0xea, 0x12, 0x93, 0x07, 0x03, 0x26, - 0xef, 0x75, 0xdc, 0x80, 0xec, 0x12, 0x2f, 0x0a, 0xe3, 0x33, 0x4d, 0x42, 0x43, 0x1c, 0x53, 0x43, - 0x9f, 0x97, 0x3a, 0xd2, 0x55, 0xbf, 0xe3, 0x45, 0xe1, 0x4c, 0x89, 0x75, 0x2f, 0xf5, 0xf5, 0xea, - 0x6e, 0x8c, 0x97, 0x54, 0xa2, 0xf2, 0xca, 0xd8, 0x20, 0x85, 0x30, 0x8c, 0xb7, 0xdc, 0x3d, 0xe2, - 0x91, 0x30, 0xac, 0x05, 0xfe, 0x06, 0x99, 0x01, 0xd6, 0xf3, 0xf3, 0xe9, 0x8f, 0x3a, 0xfe, 0x06, - 0x59, 0x9c, 0x3e, 0x3c, 0x28, 0x8f, 0xdf, 0xd2, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x03, 0x13, 0x54, - 0xae, 0x71, 0x63, 0xa2, 0xa3, 0xfd, 0x88, 0x32, 0xe9, 0x03, 0x1b, 0x95, 0x70, 0x82, 0x08, 0x7a, - 0x13, 0x4a, 0x2d, 0x77, 0x93, 0x34, 0xf6, 0x1b, 0x2d, 0x32, 0x33, 0xc6, 0x28, 0xa6, 0x6e, 0xab, - 0x5b, 0x12, 0x89, 0xcb, 0x45, 0xea, 0x2f, 0x8e, 0xab, 0xa3, 0xbb, 0xf0, 0x58, 0x44, 0x82, 0x5d, - 0xd7, 0x73, 0xe8, 0x76, 0x10, 0xf2, 0x02, 0x7b, 0x1a, 0x1b, 0x67, 0xeb, 0xed, 0x92, 0x18, 0xba, - 0xc7, 0xd6, 0x53, 0xb1, 0x70, 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, - 0x9a, 0xdf, 0x72, 0x1b, 0xfb, 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, - 0x07, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0xb4, 0xc1, 0xde, 0x42, 0x3a, 0x81, 0x1b, 0xed, 0xd3, - 0xf5, 0x4b, 0x1e, 0x44, 0x33, 0x93, 0x3d, 0x45, 0x61, 0x1d, 0x55, 0x3d, 0x98, 0xe8, 0x85, 0x38, - 0x49, 0x90, 0x6e, 0xed, 0x30, 0x6a, 0xba, 0xde, 0xcc, 0x14, 0x3b, 0x31, 0xd4, 0xce, 0xa8, 0xd3, - 0x42, 0xcc, 0x61, 0xec, 0x1d, 0x84, 0xfe, 0xb8, 0x4d, 0x4f, 0xd0, 0x69, 0x86, 0x18, 0xbf, 0x83, - 0x48, 0x00, 0x8e, 0x71, 0x28, 0x53, 0x13, 0x45, 0xfb, 0x33, 0x88, 0xa1, 0xaa, 0xed, 0xb2, 0xbe, - 0xfe, 0x79, 0x4c, 0xcb, 0xd1, 0x2d, 0x18, 0x21, 0xde, 0xde, 0x4a, 0xe0, 0xef, 0xce, 0x9c, 0xc9, - 0xde, 0xb3, 0xcb, 0x1c, 0x85, 0x1f, 0xe8, 0xb1, 0x80, 0x27, 0x8a, 0xb1, 0x24, 0x81, 0x1e, 0xc0, - 0x4c, 0xca, 0x8c, 0xf0, 0x09, 0x38, 0xcb, 0x26, 0xe0, 0x75, 0x51, 0x77, 0x66, 0x3d, 0x03, 0xef, - 0xa8, 0x07, 0x0c, 0x67, 0x52, 0x47, 0x5f, 0x80, 0x71, 0xbe, 0xa1, 0xf8, 0x23, 0x6a, 0x38, 0x73, - 0x8e, 0x7d, 0xcd, 0xe5, 0xec, 0xcd, 0xc9, 0x11, 0x17, 0xcf, 0x89, 0x0e, 0x8d, 0xeb, 0xa5, 0x21, - 0x36, 0xa9, 0xd9, 0x1b, 0x30, 0xa1, 0xce, 0x2d, 0xb6, 0x74, 0x50, 0x19, 0x86, 0x18, 0xb7, 0x23, - 0xf4, 0x5b, 0x25, 0x3a, 0x53, 0x8c, 0x13, 0xc2, 0xbc, 0x9c, 0xcd, 0x94, 0xfb, 0x3e, 0x59, 0xdc, - 0x8f, 0x08, 0x97, 0xaa, 0xf3, 0xda, 0x4c, 0x49, 0x00, 0x8e, 0x71, 0xec, 0xff, 0xcd, 0xb9, 0xc6, - 0xf8, 0x70, 0x1c, 0xe0, 0x3a, 0x78, 0x0e, 0x8a, 0xdb, 0x7e, 0x18, 0x51, 0x6c, 0xd6, 0xc6, 0x50, - 0xcc, 0x27, 0xde, 0x10, 0xe5, 0x58, 0x61, 0xa0, 0xd7, 0x60, 0xbc, 0xa1, 0x37, 0x20, 0xee, 0x32, - 0x35, 0x04, 0x46, 0xeb, 0xd8, 0xc4, 0x45, 0xaf, 0x42, 0x91, 0x99, 0x40, 0x34, 0xfc, 0x96, 0x60, - 0xb2, 0xe4, 0x85, 0x5c, 0xac, 0x89, 0xf2, 0x23, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x0a, 0x0c, 0xd3, - 0x2e, 0x54, 0x6b, 0xe2, 0x16, 0x51, 0xaa, 0x9a, 0x1b, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xd7, 0x72, - 0xda, 0x28, 0x53, 0x89, 0x94, 0xa0, 0x1a, 0x8c, 0xdc, 0x77, 0xdc, 0xc8, 0xf5, 0xb6, 0x04, 0xbb, - 0xf0, 0x4c, 0xcf, 0x2b, 0x85, 0x55, 0xba, 0xc7, 0x2b, 0xf0, 0x4b, 0x4f, 0xfc, 0xc1, 0x92, 0x0c, - 0xa5, 0x18, 0x74, 0x3c, 0x8f, 0x52, 0xcc, 0x0d, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, - 0x25, 0x19, 0xf4, 0x0e, 0x80, 0x5c, 0x96, 0xa4, 0x29, 0x4c, 0x0f, 0x9e, 0xeb, 0x4f, 0x74, 0x5d, - 0xd5, 0x59, 0x9c, 0xa0, 0x57, 0x6a, 0xfc, 0x1f, 0x6b, 0xf4, 0xec, 0x88, 0xb1, 0x55, 0xdd, 0x9d, - 0x41, 0xdf, 0x49, 0x4f, 0x02, 0x27, 0x88, 0x48, 0x73, 0x21, 0x12, 0x83, 0xf3, 0xc9, 0xc1, 0x64, - 0x8a, 0x75, 0x77, 0x97, 0xe8, 0xa7, 0x86, 0x20, 0x82, 0x63, 0x7a, 0xf6, 0x2f, 0xe5, 0x61, 0x26, - 0xab, 0xbb, 0x74, 0xd1, 0x91, 0x07, 0x6e, 0xb4, 0x44, 0xb9, 0x21, 0xcb, 0x5c, 0x74, 0xcb, 0xa2, - 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, 0xa1, 0xbb, 0x25, 0x45, 0xc2, 0xa1, 0x78, 0xf6, 0xeb, 0xac, 0x14, - 0x0b, 0x28, 0xc5, 0x0b, 0x88, 0x13, 0x0a, 0xdb, 0x16, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, - 0xd7, 0x37, 0x15, 0xfa, 0xe8, 0x9b, 0x8c, 0x21, 0x1a, 0x3a, 0xd9, 0x21, 0x42, 0x5f, 0x04, 0xd8, - 0x74, 0x3d, 0x37, 0xdc, 0x66, 0xd4, 0x87, 0x8f, 0x4d, 0x5d, 0xf1, 0x52, 0x2b, 0x8a, 0x0a, 0xd6, - 0x28, 0xa2, 0x97, 0x61, 0x54, 0x6d, 0xc0, 0x6a, 0x85, 0x3d, 0xf4, 0x69, 0x86, 0x13, 0xf1, 0x69, - 0x54, 0xc1, 0x3a, 0x9e, 0xfd, 0x6e, 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0x35, 0xe8, 0xf8, - 0xe6, 0x7a, 0x8f, 0xaf, 0xfd, 0xeb, 0x79, 0x98, 0x34, 0x1a, 0xeb, 0x84, 0x03, 0x9c, 0x59, 0xd7, - 0xe9, 0x3d, 0xe7, 0x44, 0x44, 0xec, 0x3f, 0xbb, 0xff, 0x56, 0xd1, 0xef, 0x42, 0xba, 0x03, 0x78, - 0x7d, 0xf4, 0x45, 0x28, 0xb5, 0x9c, 0x90, 0xe9, 0xae, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x8b, 0xe5, - 0x08, 0x27, 0x8c, 0xb4, 0xab, 0x86, 0xd3, 0x8e, 0x49, 0xd2, 0x0b, 0x99, 0xf2, 0x3e, 0xd2, 0x78, - 0x4a, 0x75, 0x82, 0x32, 0x48, 0xfb, 0x98, 0xc3, 0xd0, 0xab, 0x30, 0x16, 0x10, 0xb6, 0x2a, 0x96, - 0x28, 0x2b, 0xc7, 0x96, 0xd9, 0x50, 0xcc, 0xf3, 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0x98, 0x95, 0x1f, - 0xee, 0xc1, 0xca, 0x3f, 0x03, 0x23, 0xec, 0x87, 0x5a, 0x01, 0x6a, 0x36, 0xaa, 0xbc, 0x18, 0x4b, - 0x78, 0x72, 0xc1, 0x14, 0x07, 0x5c, 0x30, 0x9f, 0x84, 0x89, 0x8a, 0x43, 0x76, 0x7d, 0x6f, 0xd9, - 0x6b, 0xb6, 0x7d, 0xd7, 0x8b, 0xd0, 0x0c, 0x14, 0xd8, 0xed, 0xc0, 0xf7, 0x76, 0x81, 0x52, 0xc0, - 0x05, 0xca, 0x98, 0xdb, 0x5b, 0x70, 0xae, 0xe2, 0xdf, 0xf7, 0xee, 0x3b, 0x41, 0x73, 0xa1, 0x56, - 0xd5, 0xe4, 0xdc, 0x35, 0x29, 0x67, 0x71, 0x63, 0xa4, 0xd4, 0x33, 0x55, 0xab, 0xc9, 0xef, 0xda, - 0x15, 0xb7, 0x45, 0x32, 0xb4, 0x11, 0x7f, 0x33, 0x67, 0xb4, 0x14, 0xe3, 0xab, 0x07, 0x23, 0x2b, - 0xf3, 0xc1, 0xe8, 0x2d, 0x28, 0x6e, 0xba, 0xa4, 0xd5, 0xc4, 0x64, 0x53, 0x2c, 0xb1, 0xa7, 0xb3, - 0xed, 0x2b, 0x56, 0x28, 0xa6, 0xd4, 0x3e, 0x71, 0x29, 0x6d, 0x45, 0x54, 0xc6, 0x8a, 0x0c, 0xda, - 0x81, 0x29, 0x29, 0x06, 0x48, 0xa8, 0x58, 0x70, 0xcf, 0xf4, 0x92, 0x2d, 0x4c, 0xe2, 0x67, 0x0f, - 0x0f, 0xca, 0x53, 0x38, 0x41, 0x06, 0x77, 0x11, 0xa6, 0x62, 0xd9, 0x2e, 0x3d, 0x5a, 0x0b, 0x6c, - 0xf8, 0x99, 0x58, 0xc6, 0x24, 0x4c, 0x56, 0x6a, 0xff, 0xa8, 0x05, 0x8f, 0x77, 0x8d, 0x8c, 0x90, - 0xb4, 0x4f, 0x78, 0x16, 0x92, 0x92, 0x6f, 0xae, 0xbf, 0xe4, 0x6b, 0xff, 0xac, 0x05, 0x67, 0x97, - 0x77, 0xdb, 0xd1, 0x7e, 0xc5, 0x35, 0x5f, 0x77, 0x5e, 0x81, 0xe1, 0x5d, 0xd2, 0x74, 0x3b, 0xbb, - 0x62, 0xe6, 0xca, 0xf2, 0xf8, 0x59, 0x65, 0xa5, 0x47, 0x07, 0xe5, 0xf1, 0x7a, 0xe4, 0x07, 0xce, - 0x16, 0xe1, 0x05, 0x58, 0xa0, 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x2f, - 0xd3, 0x53, 0x77, 0x36, 0x27, 0x07, 0x74, 0xee, 0xad, 0x8e, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0x1e, - 0x66, 0x24, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x69, 0xc1, 0xa4, 0x5c, 0xf7, 0x0b, 0xcd, 0x66, 0x40, - 0xc2, 0x10, 0xcd, 0x42, 0xce, 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xae, 0x5a, 0xc3, 0x39, 0xb7, - 0x8d, 0x6a, 0x50, 0xe2, 0x66, 0x37, 0xf1, 0xe2, 0x1a, 0xc8, 0x78, 0x87, 0xf5, 0x60, 0x5d, 0xd6, - 0xc4, 0x31, 0x11, 0xc9, 0xc1, 0xb1, 0x33, 0x33, 0x6f, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, - 0xa0, 0xab, 0x50, 0xf4, 0xfc, 0x26, 0xb7, 0x82, 0xe2, 0xb7, 0x1f, 0x5b, 0xb2, 0x6b, 0xa2, 0x0c, - 0x2b, 0xa8, 0xfd, 0x43, 0x16, 0x8c, 0xc9, 0x2f, 0x1b, 0x90, 0x99, 0xa4, 0x5b, 0x2b, 0x66, 0x24, - 0xe3, 0xad, 0x45, 0x99, 0x41, 0x06, 0x31, 0x78, 0xc0, 0xfc, 0x71, 0x78, 0x40, 0xfb, 0x47, 0x72, - 0x30, 0x21, 0xbb, 0x53, 0xef, 0x6c, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x72, 0xf8, 0x90, 0x13, 0xb9, - 0x62, 0x9f, 0x4a, 0x17, 0x3e, 0x8c, 0xf9, 0x89, 0xaf, 0xe5, 0x05, 0x59, 0x1b, 0xc7, 0x84, 0x50, - 0x0b, 0xa6, 0x3d, 0x3f, 0x62, 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x52, 0x3f, 0x2f, 0xa8, - 0x4f, 0xaf, 0x25, 0xa9, 0xe0, 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0xf2, 0xd9, 0xe2, 0x86, 0x3e, - 0x0b, 0xe9, 0xfa, 0x0e, 0xfb, 0x57, 0x2d, 0x28, 0x49, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, - 0x90, 0x4d, 0x82, 0x1c, 0x1a, 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0xc5, 0x37, 0x0f, 0xff, 0x1f, 0x62, - 0x49, 0x83, 0xe9, 0xbb, 0x55, 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x8c, 0x1b, 0xe6, 0x0f, - 0x58, 0x9f, 0x35, 0xb1, 0x96, 0x32, 0x48, 0xed, 0x80, 0x6c, 0xba, 0x0f, 0x92, 0x0c, 0x52, 0x8d, - 0x95, 0x62, 0x01, 0x45, 0xef, 0xc0, 0x58, 0x43, 0x2a, 0x3a, 0xe3, 0x63, 0xe0, 0x4a, 0x4f, 0xa5, - 0xbb, 0x7a, 0x9f, 0xe1, 0x16, 0xd2, 0x4b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0x9f, 0xdb, 0xf3, 0xfd, - 0x9e, 0xdb, 0x63, 0xba, 0xd9, 0x8f, 0xcf, 0x3f, 0x66, 0xc1, 0x30, 0x57, 0x97, 0x0d, 0xa6, 0x5f, - 0xd4, 0x9e, 0xab, 0xe2, 0xb1, 0xbb, 0x4b, 0x0b, 0xc5, 0xf3, 0x13, 0x5a, 0x85, 0x12, 0xfb, 0xc1, - 0xd4, 0x06, 0xf9, 0x6c, 0xd3, 0x70, 0xde, 0xaa, 0xde, 0xc1, 0xbb, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, - 0x5f, 0xcb, 0xd3, 0xa3, 0x2a, 0x46, 0x35, 0x6e, 0x70, 0xeb, 0xd1, 0xdd, 0xe0, 0xb9, 0x47, 0x75, - 0x83, 0x6f, 0xc1, 0x64, 0x43, 0x7b, 0xdc, 0x8a, 0x67, 0xf2, 0x6a, 0xcf, 0x45, 0xa2, 0xbd, 0x83, - 0x71, 0x95, 0xd1, 0x92, 0x49, 0x04, 0x27, 0xa9, 0xa2, 0xef, 0x84, 0x31, 0x3e, 0xcf, 0xa2, 0x15, - 0x6e, 0xb1, 0xf0, 0x89, 0xec, 0xf5, 0xa2, 0x37, 0xc1, 0x56, 0x62, 0x5d, 0xab, 0x8e, 0x0d, 0x62, - 0xf6, 0x2f, 0x15, 0x61, 0x68, 0x79, 0x8f, 0x78, 0xd1, 0x29, 0x1c, 0x48, 0x0d, 0x98, 0x70, 0xbd, - 0x3d, 0xbf, 0xb5, 0x47, 0x9a, 0x1c, 0x7e, 0x9c, 0xcb, 0xf5, 0x31, 0x41, 0x7a, 0xa2, 0x6a, 0x90, - 0xc0, 0x09, 0x92, 0x8f, 0x42, 0xc2, 0xbc, 0x0e, 0xc3, 0x7c, 0xee, 0x85, 0x78, 0x99, 0xaa, 0x0c, - 0x66, 0x83, 0x28, 0x76, 0x41, 0x2c, 0xfd, 0x72, 0xed, 0xb3, 0xa8, 0x8e, 0xde, 0x85, 0x89, 0x4d, - 0x37, 0x08, 0x23, 0x2a, 0x1a, 0x86, 0x91, 0xb3, 0xdb, 0x7e, 0x08, 0x89, 0x52, 0x8d, 0xc3, 0x8a, - 0x41, 0x09, 0x27, 0x28, 0xa3, 0x2d, 0x18, 0xa7, 0x42, 0x4e, 0xdc, 0xd4, 0xc8, 0xb1, 0x9b, 0x52, - 0x2a, 0xa3, 0x5b, 0x3a, 0x21, 0x6c, 0xd2, 0xa5, 0x87, 0x49, 0x83, 0x09, 0x45, 0x45, 0xc6, 0x51, - 0xa8, 0xc3, 0x84, 0x4b, 0x43, 0x1c, 0x46, 0xcf, 0x24, 0x66, 0xb6, 0x52, 0x32, 0xcf, 0x24, 0xcd, - 0x38, 0xe5, 0x4b, 0x50, 0x22, 0x74, 0x08, 0x29, 0x61, 0xa1, 0x18, 0x9f, 0x1f, 0xac, 0xaf, 0xab, - 0x6e, 0x23, 0xf0, 0x4d, 0x59, 0x7e, 0x59, 0x52, 0xc2, 0x31, 0x51, 0xb4, 0x04, 0xc3, 0x21, 0x09, - 0x5c, 0x12, 0x0a, 0x15, 0x79, 0x8f, 0x69, 0x64, 0x68, 0xdc, 0xe2, 0x93, 0xff, 0xc6, 0xa2, 0x2a, - 0x5d, 0x5e, 0x0e, 0x93, 0x86, 0x98, 0x56, 0x5c, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, 0x28, 0x7a, - 0x13, 0x46, 0x02, 0xd2, 0x62, 0xca, 0xa2, 0xf1, 0xc1, 0x17, 0x39, 0xd7, 0x3d, 0xf1, 0x7a, 0x58, - 0x12, 0x40, 0x37, 0x01, 0x05, 0x84, 0xf2, 0x10, 0xae, 0xb7, 0xa5, 0x8c, 0x39, 0x84, 0xae, 0xfb, - 0x09, 0xd1, 0xfe, 0x19, 0x1c, 0x63, 0x48, 0xeb, 0x62, 0x9c, 0x52, 0x0d, 0x5d, 0x87, 0x69, 0x55, - 0x5a, 0xf5, 0xc2, 0xc8, 0xf1, 0x1a, 0x84, 0xa9, 0xb9, 0x4b, 0x31, 0x57, 0x84, 0x93, 0x08, 0xb8, - 0xbb, 0x8e, 0xfd, 0xd3, 0x94, 0x9d, 0xa1, 0xa3, 0x75, 0x0a, 0xbc, 0xc0, 0x1b, 0x26, 0x2f, 0x70, - 0x3e, 0x73, 0xe6, 0x32, 0xf8, 0x80, 0x43, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0xd5, 0x63, - 0xcd, 0x76, 0x60, 0x8a, 0xae, 0xf4, 0xdb, 0x1b, 0x21, 0x09, 0xf6, 0x48, 0x93, 0x2d, 0xcc, 0xdc, - 0xc3, 0x2d, 0x4c, 0xf5, 0xca, 0x7c, 0x2b, 0x41, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x91, 0x9a, 0x93, - 0xbc, 0x61, 0xa4, 0xc5, 0xb5, 0x22, 0x47, 0x07, 0xe5, 0x29, 0xed, 0x43, 0x74, 0x4d, 0x89, 0xfd, - 0x25, 0xf9, 0x8d, 0xea, 0x35, 0xbf, 0xa1, 0x16, 0x4b, 0xe2, 0x35, 0x5f, 0x2d, 0x07, 0x1c, 0xe3, - 0xd0, 0x3d, 0x4a, 0x45, 0x90, 0xe4, 0x6b, 0x3e, 0x15, 0x50, 0x30, 0x83, 0xd8, 0x2f, 0x02, 0x2c, - 0x3f, 0x20, 0x0d, 0xbe, 0xd4, 0xf5, 0x07, 0x48, 0x2b, 0xfb, 0x01, 0xd2, 0xfe, 0x0f, 0x16, 0x4c, - 0xac, 0x2c, 0x19, 0x62, 0xe2, 0x1c, 0x00, 0x97, 0x8d, 0xee, 0xdd, 0x5b, 0x93, 0xba, 0x75, 0xae, - 0x1e, 0x55, 0xa5, 0x58, 0xc3, 0x40, 0xe7, 0x21, 0xdf, 0xea, 0x78, 0x42, 0x64, 0x19, 0x39, 0x3c, - 0x28, 0xe7, 0x6f, 0x75, 0x3c, 0x4c, 0xcb, 0x34, 0x0b, 0xc1, 0xfc, 0xc0, 0x16, 0x82, 0x7d, 0xdd, - 0xe4, 0x50, 0x19, 0x86, 0xee, 0xdf, 0x77, 0x9b, 0xdc, 0x19, 0x41, 0xe8, 0xfd, 0xef, 0xdd, 0xab, - 0x56, 0x42, 0xcc, 0xcb, 0xed, 0xaf, 0xe6, 0x61, 0x76, 0xa5, 0x45, 0x1e, 0x7c, 0x40, 0x87, 0x8c, - 0x41, 0xed, 0x1b, 0x8f, 0xc7, 0x2f, 0x1e, 0xd7, 0x86, 0xb5, 0xff, 0x78, 0x6c, 0xc2, 0x08, 0x7f, - 0xcc, 0x96, 0xee, 0x19, 0xaf, 0xa5, 0xb5, 0x9e, 0x3d, 0x20, 0x73, 0xfc, 0x51, 0x5c, 0x18, 0xb8, - 0xab, 0x9b, 0x56, 0x94, 0x62, 0x49, 0x7c, 0xf6, 0x33, 0x30, 0xa6, 0x63, 0x1e, 0xcb, 0x9a, 0xfc, - 0xff, 0xcd, 0xc3, 0x14, 0xed, 0xc1, 0x23, 0x9d, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x16, 0xc5, - 0xfd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0xbc, 0x90, 0x35, 0x1b, 0xa7, 0x3d, 0x07, 0xdf, 0x6b, 0xc1, - 0x99, 0x95, 0x96, 0xdf, 0xd8, 0x49, 0x58, 0xfd, 0xbe, 0x0c, 0xa3, 0xf4, 0x1c, 0x0f, 0x0d, 0x6f, - 0x30, 0xc3, 0x3f, 0x50, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x73, 0xa7, 0x5a, 0x49, 0x73, 0x2b, - 0x14, 0x20, 0xac, 0xe3, 0xd9, 0xdf, 0xb0, 0xe0, 0xe2, 0xf5, 0xa5, 0xe5, 0x78, 0x29, 0x76, 0x79, - 0x36, 0x52, 0x29, 0xb0, 0xa9, 0x75, 0x25, 0x96, 0x02, 0x2b, 0xac, 0x17, 0x02, 0xfa, 0x51, 0xf1, - 0xda, 0xfd, 0x29, 0x0b, 0xce, 0x5c, 0x77, 0x23, 0x7a, 0x2d, 0x27, 0x7d, 0xec, 0xe8, 0xbd, 0x1c, - 0xba, 0x91, 0x1f, 0xec, 0x27, 0x7d, 0xec, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xe7, 0x32, - 0x33, 0xaa, 0x9c, 0xa9, 0x8a, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0x4d, 0x37, 0x60, 0xa2, - 0xc4, 0xbe, 0x38, 0x61, 0xd5, 0x87, 0x55, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x1f, 0x59, 0x50, 0xbe, - 0xde, 0xea, 0x84, 0x11, 0x09, 0x36, 0xc3, 0x8c, 0xd3, 0xf1, 0x45, 0x28, 0x11, 0x29, 0xb8, 0x8b, - 0x5e, 0x2b, 0x56, 0x53, 0x49, 0xf4, 0xdc, 0xd5, 0x4f, 0xe1, 0x0d, 0xe0, 0x43, 0x70, 0x3c, 0x23, - 0xf0, 0x15, 0x40, 0x44, 0x6f, 0x4b, 0xf7, 0x7d, 0x64, 0x4e, 0x54, 0xcb, 0x5d, 0x50, 0x9c, 0x52, - 0xc3, 0xfe, 0x51, 0x0b, 0xce, 0xa9, 0x0f, 0xfe, 0xc8, 0x7d, 0xa6, 0xfd, 0xf3, 0x39, 0x18, 0xbf, - 0xb1, 0xbe, 0x5e, 0xbb, 0x4e, 0x22, 0x71, 0x6d, 0xf7, 0xd7, 0xad, 0x63, 0x4d, 0x45, 0xd8, 0x4b, - 0x0a, 0xec, 0x44, 0x6e, 0x6b, 0x8e, 0xbb, 0xd0, 0xcf, 0x55, 0xbd, 0xe8, 0x76, 0x50, 0x8f, 0x02, - 0xd7, 0xdb, 0x4a, 0x55, 0x2a, 0x4a, 0xe6, 0x22, 0x9f, 0xc5, 0x5c, 0xa0, 0x17, 0x61, 0x98, 0xf9, - 0xf0, 0xcb, 0x49, 0x78, 0x42, 0x09, 0x51, 0xac, 0xf4, 0xe8, 0xa0, 0x5c, 0xba, 0x83, 0xab, 0xfc, - 0x0f, 0x16, 0xa8, 0xe8, 0x0e, 0x8c, 0x6e, 0x47, 0x51, 0xfb, 0x06, 0x71, 0x9a, 0x24, 0x90, 0xc7, - 0xe1, 0xa5, 0xb4, 0xe3, 0x90, 0x0e, 0x02, 0x47, 0x8b, 0x4f, 0x90, 0xb8, 0x2c, 0xc4, 0x3a, 0x1d, - 0xbb, 0x0e, 0x10, 0xc3, 0x4e, 0x48, 0xa1, 0x62, 0xff, 0xbe, 0x05, 0x23, 0xdc, 0x9d, 0x32, 0x40, - 0xaf, 0x43, 0x81, 0x3c, 0x20, 0x0d, 0xc1, 0x2a, 0xa7, 0x76, 0x38, 0xe6, 0xb4, 0xf8, 0xf3, 0x00, - 0xfd, 0x8f, 0x59, 0x2d, 0x74, 0x03, 0x46, 0x68, 0x6f, 0xaf, 0x2b, 0xdf, 0xd2, 0x27, 0xb3, 0xbe, - 0x58, 0x4d, 0x3b, 0x67, 0xce, 0x44, 0x11, 0x96, 0xd5, 0x99, 0xaa, 0xbb, 0xd1, 0xae, 0xd3, 0x13, - 0x3b, 0xea, 0xc5, 0x58, 0xac, 0x2f, 0xd5, 0x38, 0x92, 0xa0, 0xc6, 0x55, 0xdd, 0xb2, 0x10, 0xc7, - 0x44, 0xec, 0x75, 0x28, 0xd1, 0x49, 0x5d, 0x68, 0xb9, 0x4e, 0x6f, 0x2d, 0xfb, 0xb3, 0x50, 0x92, - 0x1a, 0xef, 0x50, 0x78, 0x72, 0x31, 0xaa, 0x52, 0x21, 0x1e, 0xe2, 0x18, 0x6e, 0x6f, 0xc2, 0x59, - 0x66, 0xea, 0xe0, 0x44, 0xdb, 0xc6, 0x1e, 0xeb, 0xbf, 0x98, 0x9f, 0x13, 0x92, 0x27, 0x9f, 0x99, - 0x19, 0xcd, 0x59, 0x62, 0x4c, 0x52, 0x8c, 0xa5, 0x50, 0xfb, 0x0f, 0x0b, 0xf0, 0x44, 0xb5, 0x9e, - 0xed, 0x69, 0xfb, 0x2a, 0x8c, 0x71, 0xbe, 0x94, 0x2e, 0x6d, 0xa7, 0x25, 0xda, 0x55, 0x0f, 0x81, - 0xeb, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x22, 0xe4, 0xdd, 0xf7, 0xbc, 0xa4, 0xdd, 0x71, 0xf5, 0xad, - 0x35, 0x4c, 0xcb, 0x29, 0x98, 0xb2, 0xb8, 0xfc, 0xee, 0x50, 0x60, 0xc5, 0xe6, 0xbe, 0x01, 0x13, - 0x6e, 0xd8, 0x08, 0xdd, 0xaa, 0x47, 0xcf, 0x19, 0xed, 0xa4, 0x52, 0x5a, 0x11, 0xda, 0x69, 0x05, - 0xc5, 0x09, 0x6c, 0xed, 0x22, 0x1b, 0x1a, 0x98, 0x4d, 0xee, 0xeb, 0xda, 0x44, 0x25, 0x80, 0x36, - 0xfb, 0xba, 0x90, 0x59, 0xf1, 0x09, 0x09, 0x80, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x8a, 0x9c, 0x8d, - 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0xb4, 0x05, - 0xc5, 0x58, 0xe4, 0x54, 0x80, 0xa5, 0x1b, 0x0b, 0x35, 0x8a, 0x89, 0xbb, 0xeb, 0x98, 0x6c, 0x30, - 0x9c, 0x04, 0x1b, 0xbc, 0x00, 0x93, 0xb2, 0x99, 0x3a, 0x09, 0xd9, 0xa5, 0x38, 0xca, 0x3a, 0xa6, - 0x6c, 0x8b, 0x45, 0xb1, 0xea, 0x56, 0x12, 0x1f, 0xbd, 0x02, 0xe3, 0xae, 0xe7, 0x46, 0xae, 0x13, - 0xf9, 0x01, 0x63, 0x29, 0xb8, 0x62, 0x80, 0x99, 0xee, 0x55, 0x75, 0x00, 0x36, 0xf1, 0xec, 0xff, - 0x52, 0x80, 0x69, 0x36, 0x6d, 0xdf, 0x5a, 0x61, 0x1f, 0x99, 0x15, 0x76, 0xa7, 0x7b, 0x85, 0x9d, - 0x04, 0x7f, 0xff, 0x61, 0x2e, 0xb3, 0x77, 0xa1, 0xa4, 0x8c, 0x9f, 0xa5, 0xf7, 0x83, 0x95, 0xe1, - 0xfd, 0xd0, 0x9f, 0xfb, 0x90, 0xef, 0xd6, 0xf9, 0xd4, 0x77, 0xeb, 0xbf, 0x6d, 0x41, 0x6c, 0x03, - 0x8a, 0x6e, 0x40, 0xa9, 0xed, 0x33, 0x3b, 0x8b, 0x40, 0x1a, 0x2f, 0x3d, 0x91, 0x7a, 0x51, 0xf1, - 0x4b, 0x91, 0x8f, 0x5f, 0x4d, 0xd6, 0xc0, 0x71, 0x65, 0xb4, 0x08, 0x23, 0xed, 0x80, 0xd4, 0x23, - 0xe6, 0xf3, 0xdb, 0x97, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0x2f, 0x58, 0x00, 0xfc, - 0x69, 0xd8, 0xf1, 0xb6, 0xc8, 0x29, 0xa8, 0xbb, 0x2b, 0x50, 0x08, 0xdb, 0xa4, 0xd1, 0xcb, 0x02, - 0x26, 0xee, 0x4f, 0xbd, 0x4d, 0x1a, 0xf1, 0x80, 0xd3, 0x7f, 0x98, 0xd5, 0xb6, 0xbf, 0x0f, 0x60, - 0x22, 0x46, 0xab, 0x46, 0x64, 0x17, 0x3d, 0x6f, 0xf8, 0x00, 0x9e, 0x4f, 0xf8, 0x00, 0x96, 0x18, - 0xb6, 0xa6, 0x59, 0x7d, 0x17, 0xf2, 0xbb, 0xce, 0x03, 0xa1, 0x3a, 0x7b, 0xb6, 0x77, 0x37, 0x28, - 0xfd, 0xb9, 0x55, 0xe7, 0x01, 0x17, 0x12, 0x9f, 0x95, 0x0b, 0x64, 0xd5, 0x79, 0x70, 0xc4, 0xed, - 0x5c, 0xd8, 0x21, 0x75, 0xcb, 0x0d, 0xa3, 0x2f, 0xff, 0xe7, 0xf8, 0x3f, 0x5b, 0x76, 0xb4, 0x11, - 0xd6, 0x96, 0xeb, 0x89, 0x87, 0xd2, 0x81, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0x37, 0x40, 0x5b, - 0xae, 0x87, 0xde, 0x87, 0x11, 0x61, 0x94, 0x20, 0x7c, 0xee, 0xe7, 0x07, 0x68, 0x4f, 0xd8, 0x34, - 0xf0, 0x36, 0xe7, 0xa5, 0x10, 0x2c, 0x4a, 0xfb, 0xb6, 0x2b, 0x1b, 0x44, 0x7f, 0xc3, 0x82, 0x09, - 0xf1, 0x1b, 0x93, 0xf7, 0x3a, 0x24, 0x8c, 0x04, 0xef, 0xf9, 0xe9, 0xc1, 0xfb, 0x20, 0x2a, 0xf2, - 0xae, 0x7c, 0x5a, 0x1e, 0xb3, 0x26, 0xb0, 0x6f, 0x8f, 0x12, 0xbd, 0x40, 0xff, 0xd0, 0x82, 0xb3, - 0xbb, 0xce, 0x03, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x58, 0xff, 0xf5, 0xc1, 0xa6, - 0xbf, 0xab, 0x3a, 0xef, 0xa4, 0xb4, 0xeb, 0x3d, 0x9b, 0x86, 0xd2, 0xb7, 0xab, 0xa9, 0xfd, 0x9a, - 0xdd, 0x84, 0xa2, 0x5c, 0x6f, 0x29, 0xaa, 0x86, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x26, 0x44, 0x77, - 0xc4, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, 0xdb, - 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0x38, 0x9f, 0xb9, 0x3e, 0x1e, 0x65, - 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x6f, 0x0e, 0x4b, 0xe6, 0x9b, 0xc3, 0xa5, 0xde, - 0x3b, 0x27, 0xe3, 0xe1, 0xe1, 0x1d, 0xbd, 0xd3, 0xf4, 0x54, 0x47, 0x6f, 0xc2, 0x70, 0x8b, 0x96, - 0x48, 0x6b, 0x18, 0xbb, 0xff, 0x8e, 0x8c, 0x79, 0x29, 0x56, 0x1e, 0x62, 0x41, 0xc1, 0xfe, 0x65, - 0x0b, 0x0a, 0xa7, 0x30, 0x12, 0xd8, 0x1c, 0x89, 0xe7, 0x33, 0x49, 0x8b, 0x58, 0x7c, 0x73, 0xd8, - 0xb9, 0xbf, 0xfc, 0x20, 0x22, 0x5e, 0xc8, 0x44, 0xc5, 0xd4, 0x81, 0xf9, 0x2e, 0x38, 0x73, 0xcb, - 0x77, 0x9a, 0x8b, 0x4e, 0xcb, 0xf1, 0x1a, 0x24, 0xa8, 0x7a, 0x5b, 0x7d, 0xcd, 0xb2, 0x74, 0x23, - 0xaa, 0x5c, 0x3f, 0x23, 0x2a, 0x7b, 0x1b, 0x90, 0xde, 0x80, 0x30, 0x5c, 0xc5, 0x30, 0xe2, 0xf2, - 0xa6, 0xc4, 0xf0, 0x3f, 0x9d, 0xce, 0xdd, 0x75, 0xf5, 0x4c, 0x33, 0xc9, 0xe4, 0x05, 0x58, 0x12, - 0xb2, 0x5f, 0x85, 0x54, 0x67, 0xb5, 0xfe, 0x6a, 0x03, 0xfb, 0xf3, 0x30, 0xcd, 0x6a, 0x1e, 0x53, - 0xa4, 0xb5, 0x13, 0x5a, 0xc9, 0x94, 0xc8, 0x34, 0xf6, 0x57, 0x2c, 0x98, 0x5c, 0x4b, 0x04, 0xec, - 0xb8, 0xc2, 0x1e, 0x40, 0x53, 0x94, 0xe1, 0x75, 0x56, 0x8a, 0x05, 0xf4, 0xc4, 0x75, 0x50, 0x7f, - 0x6e, 0x41, 0xec, 0x3f, 0x7a, 0x0a, 0x8c, 0xd7, 0x92, 0xc1, 0x78, 0xa5, 0xea, 0x46, 0x54, 0x77, - 0xb2, 0xf8, 0x2e, 0x74, 0x53, 0x05, 0x4b, 0xe8, 0xa1, 0x16, 0x89, 0xc9, 0x70, 0xd7, 0xfa, 0x09, - 0x33, 0xa2, 0x82, 0x0c, 0x9f, 0xc0, 0x6c, 0xa7, 0x14, 0xee, 0x47, 0xc4, 0x76, 0x4a, 0xf5, 0x27, - 0x63, 0x87, 0xd6, 0xb4, 0x2e, 0xb3, 0x93, 0xeb, 0xdb, 0x99, 0x2d, 0xbc, 0xd3, 0x72, 0xdf, 0x27, - 0x2a, 0xe2, 0x4b, 0x59, 0xd8, 0xb6, 0x8b, 0xd2, 0xa3, 0x83, 0xf2, 0xb8, 0xfa, 0xc7, 0xc3, 0xbb, - 0xc5, 0x55, 0xec, 0x1b, 0x30, 0x99, 0x18, 0x30, 0xf4, 0x32, 0x0c, 0xb5, 0xb7, 0x9d, 0x90, 0x24, - 0xec, 0x45, 0x87, 0x6a, 0xb4, 0xf0, 0xe8, 0xa0, 0x3c, 0xa1, 0x2a, 0xb0, 0x12, 0xcc, 0xb1, 0xed, - 0xff, 0x6e, 0x41, 0x61, 0xcd, 0x6f, 0x9e, 0xc6, 0x62, 0x7a, 0xc3, 0x58, 0x4c, 0x17, 0xb2, 0x82, - 0x63, 0x66, 0xae, 0xa3, 0x95, 0xc4, 0x3a, 0xba, 0x94, 0x49, 0xa1, 0xf7, 0x12, 0xda, 0x85, 0x51, - 0x16, 0x72, 0x53, 0xd8, 0xaf, 0xbe, 0x68, 0xc8, 0x00, 0xe5, 0x84, 0x0c, 0x30, 0xa9, 0xa1, 0x6a, - 0x92, 0xc0, 0x33, 0x30, 0x22, 0x6c, 0x28, 0x93, 0x56, 0xff, 0x02, 0x17, 0x4b, 0xb8, 0xfd, 0x63, - 0x79, 0x30, 0x42, 0x7c, 0xa2, 0x5f, 0xb5, 0x60, 0x2e, 0xe0, 0x6e, 0x94, 0xcd, 0x4a, 0x27, 0x70, - 0xbd, 0xad, 0x7a, 0x63, 0x9b, 0x34, 0x3b, 0x2d, 0xd7, 0xdb, 0xaa, 0x6e, 0x79, 0xbe, 0x2a, 0x5e, - 0x7e, 0x40, 0x1a, 0x1d, 0xf6, 0x10, 0xd2, 0x27, 0x9e, 0xa8, 0xb2, 0x51, 0xba, 0x76, 0x78, 0x50, - 0x9e, 0xc3, 0xc7, 0xa2, 0x8d, 0x8f, 0xd9, 0x17, 0xf4, 0x0d, 0x0b, 0xe6, 0x79, 0xe4, 0xcb, 0xc1, - 0xfb, 0xdf, 0x43, 0x62, 0xaa, 0x49, 0x52, 0x31, 0x91, 0x75, 0x12, 0xec, 0x2e, 0xbe, 0x22, 0x06, - 0x74, 0xbe, 0x76, 0xbc, 0xb6, 0xf0, 0x71, 0x3b, 0x67, 0xff, 0xcb, 0x3c, 0x8c, 0x0b, 0x0f, 0x7e, - 0x11, 0x1a, 0xe6, 0x65, 0x63, 0x49, 0x3c, 0x99, 0x58, 0x12, 0xd3, 0x06, 0xf2, 0xc9, 0x44, 0x85, - 0x09, 0x61, 0xba, 0xe5, 0x84, 0xd1, 0x0d, 0xe2, 0x04, 0xd1, 0x06, 0x71, 0xb8, 0xed, 0x4e, 0xfe, - 0xd8, 0x76, 0x46, 0x4a, 0x45, 0x73, 0x2b, 0x49, 0x0c, 0x77, 0xd3, 0x47, 0x7b, 0x80, 0x98, 0x01, - 0x52, 0xe0, 0x78, 0x21, 0xff, 0x16, 0x57, 0xbc, 0x19, 0x1c, 0xaf, 0xd5, 0x59, 0xd1, 0x2a, 0xba, - 0xd5, 0x45, 0x0d, 0xa7, 0xb4, 0xa0, 0x19, 0x96, 0x0d, 0x0d, 0x6a, 0x58, 0x36, 0xdc, 0xc7, 0xb5, - 0xc6, 0x83, 0xa9, 0xae, 0x20, 0x0c, 0x6f, 0x43, 0x49, 0x19, 0x00, 0x8a, 0x43, 0xa7, 0x77, 0x2c, - 0x93, 0x24, 0x05, 0xae, 0x46, 0x89, 0x8d, 0x4f, 0x63, 0x72, 0xf6, 0x3f, 0xca, 0x19, 0x0d, 0xf2, - 0x49, 0x5c, 0x83, 0xa2, 0x13, 0x86, 0xee, 0x96, 0x47, 0x9a, 0x62, 0xc7, 0x7e, 0x3c, 0x6b, 0xc7, - 0x1a, 0xcd, 0x30, 0x23, 0xcc, 0x05, 0x51, 0x13, 0x2b, 0x1a, 0xe8, 0x06, 0xb7, 0x90, 0xda, 0x93, - 0x3c, 0xff, 0x60, 0xd4, 0x40, 0xda, 0x50, 0xed, 0x11, 0x2c, 0xea, 0xa3, 0x2f, 0x70, 0x13, 0xb6, - 0x9b, 0x9e, 0x7f, 0xdf, 0xbb, 0xee, 0xfb, 0xd2, 0xed, 0x6e, 0x30, 0x82, 0xd3, 0xd2, 0x70, 0x4d, - 0x55, 0xc7, 0x26, 0xb5, 0xc1, 0x02, 0x15, 0x7d, 0x37, 0x9c, 0xa1, 0xa4, 0x4d, 0xe7, 0x99, 0x10, - 0x11, 0x98, 0x14, 0xe1, 0x21, 0x64, 0x99, 0x18, 0xbb, 0x54, 0x76, 0xde, 0xac, 0x1d, 0x2b, 0xfd, - 0x6e, 0x9a, 0x24, 0x70, 0x92, 0xa6, 0xfd, 0x93, 0x16, 0x30, 0xb3, 0xff, 0x53, 0x60, 0x19, 0x3e, - 0x6b, 0xb2, 0x0c, 0x33, 0x59, 0x83, 0x9c, 0xc1, 0x2d, 0xbc, 0xc4, 0x57, 0x56, 0x2d, 0xf0, 0x1f, - 0xec, 0x0b, 0xf3, 0x81, 0xfe, 0x9c, 0xac, 0xfd, 0xbf, 0x2c, 0x7e, 0x88, 0x29, 0x4f, 0x7c, 0xf4, - 0x3d, 0x50, 0x6c, 0x38, 0x6d, 0xa7, 0xc1, 0xe3, 0x51, 0x67, 0x6a, 0x75, 0x8c, 0x4a, 0x73, 0x4b, - 0xa2, 0x06, 0xd7, 0x52, 0xc8, 0x30, 0x23, 0x45, 0x59, 0xdc, 0x57, 0x33, 0xa1, 0x9a, 0x9c, 0xdd, - 0x81, 0x71, 0x83, 0xd8, 0x23, 0x15, 0x69, 0xbf, 0x87, 0x5f, 0xb1, 0x2a, 0x2c, 0xce, 0x2e, 0x4c, - 0x7b, 0xda, 0x7f, 0x7a, 0xa1, 0x48, 0x31, 0xe5, 0xe3, 0xfd, 0x2e, 0x51, 0x76, 0xfb, 0x68, 0x6e, - 0x0d, 0x09, 0x32, 0xb8, 0x9b, 0xb2, 0xfd, 0xe3, 0x16, 0x3c, 0xae, 0x23, 0x6a, 0x41, 0x12, 0xfa, - 0xe9, 0x89, 0x2b, 0x50, 0xf4, 0xdb, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xb7, 0xc6, 0x55, 0x39, 0xe8, - 0xb7, 0x45, 0xf9, 0x91, 0x08, 0x28, 0x29, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0x52, 0x39, 0x86, 0x0d, - 0x46, 0x28, 0x02, 0x58, 0xb0, 0x33, 0x80, 0x3d, 0x99, 0x86, 0x58, 0x40, 0xec, 0x3f, 0xb4, 0xf8, - 0xc2, 0xd2, 0xbb, 0x8e, 0xde, 0x83, 0xa9, 0x5d, 0x27, 0x6a, 0x6c, 0x2f, 0x3f, 0x68, 0x07, 0x5c, - 0x3d, 0x2e, 0xc7, 0xe9, 0xd9, 0x7e, 0xe3, 0xa4, 0x7d, 0x64, 0x6c, 0x95, 0xb7, 0x9a, 0x20, 0x86, - 0xbb, 0xc8, 0xa3, 0x0d, 0x18, 0x65, 0x65, 0xcc, 0xfc, 0x3b, 0xec, 0xc5, 0x1a, 0x64, 0xb5, 0xa6, - 0x5e, 0x9d, 0x57, 0x63, 0x3a, 0x58, 0x27, 0x6a, 0x7f, 0x39, 0xcf, 0x77, 0x3b, 0xe3, 0xb6, 0x9f, - 0x81, 0x91, 0xb6, 0xdf, 0x5c, 0xaa, 0x56, 0xb0, 0x98, 0x05, 0x75, 0x8d, 0xd4, 0x78, 0x31, 0x96, - 0x70, 0xf4, 0x1a, 0x00, 0x79, 0x10, 0x91, 0xc0, 0x73, 0x5a, 0xca, 0x4a, 0x46, 0xd9, 0x85, 0x56, - 0xfc, 0x35, 0x3f, 0xba, 0x13, 0x92, 0xef, 0x5a, 0x56, 0x28, 0x58, 0x43, 0x47, 0xd7, 0x00, 0xda, - 0x81, 0xbf, 0xe7, 0x36, 0x99, 0x3f, 0x61, 0xde, 0xb4, 0x21, 0xa9, 0x29, 0x08, 0xd6, 0xb0, 0xd0, - 0x6b, 0x30, 0xde, 0xf1, 0x42, 0xce, 0xa1, 0x38, 0x1b, 0x22, 0x1c, 0x63, 0x31, 0xb6, 0x6e, 0xb8, - 0xa3, 0x03, 0xb1, 0x89, 0x8b, 0x16, 0x60, 0x38, 0x72, 0x98, 0x4d, 0xc4, 0x50, 0xb6, 0x31, 0xe7, - 0x3a, 0xc5, 0xd0, 0xa3, 0x21, 0xd3, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xe7, 0x0c, 0x7e, 0xd6, - 0x0b, 0x2b, 0xea, 0xc1, 0xee, 0x05, 0xcd, 0x35, 0x43, 0x58, 0x67, 0x1b, 0xb4, 0xec, 0x6f, 0x94, - 0x00, 0x62, 0x76, 0x1c, 0xbd, 0xdf, 0x75, 0x1e, 0x3d, 0xd7, 0x9b, 0x81, 0x3f, 0xb9, 0xc3, 0x08, - 0x7d, 0xbf, 0x05, 0xa3, 0x4e, 0xab, 0xe5, 0x37, 0x9c, 0x88, 0x8d, 0x72, 0xae, 0xf7, 0x79, 0x28, - 0xda, 0x5f, 0x88, 0x6b, 0xf0, 0x2e, 0xbc, 0x28, 0x17, 0x9e, 0x06, 0xe9, 0xdb, 0x0b, 0xbd, 0x61, - 0xf4, 0x29, 0x29, 0xa5, 0xf1, 0xe5, 0x31, 0x9b, 0x94, 0xd2, 0x4a, 0xec, 0xe8, 0xd7, 0x04, 0x34, - 0x74, 0xc7, 0x88, 0xb4, 0x57, 0xc8, 0x0e, 0x3a, 0x61, 0x70, 0xa5, 0xfd, 0x82, 0xec, 0xa1, 0x9a, - 0xee, 0x4d, 0x36, 0x94, 0x1d, 0x99, 0x45, 0x13, 0x7f, 0xfa, 0x78, 0x92, 0xbd, 0x0b, 0x93, 0x4d, - 0xf3, 0x6e, 0x17, 0xab, 0xe9, 0xe9, 0x2c, 0xba, 0x09, 0x56, 0x20, 0xbe, 0xcd, 0x13, 0x00, 0x9c, - 0x24, 0x8c, 0x6a, 0xdc, 0xaf, 0xaf, 0xea, 0x6d, 0xfa, 0xc2, 0x1a, 0xdf, 0xce, 0x9c, 0xcb, 0xfd, - 0x30, 0x22, 0xbb, 0x14, 0x33, 0xbe, 0xb4, 0xd7, 0x44, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x98, - 0x39, 0x06, 0x87, 0x33, 0xc5, 0x6c, 0x65, 0xa2, 0x19, 0xd3, 0x22, 0xde, 0x54, 0xec, 0x6f, 0x88, - 0x05, 0x05, 0x74, 0x43, 0x06, 0xbe, 0x09, 0xab, 0xde, 0x9d, 0x90, 0xb0, 0xc0, 0x37, 0xa5, 0xc5, - 0x8f, 0xc7, 0x31, 0x6d, 0x78, 0x79, 0x6a, 0xde, 0x03, 0xa3, 0x26, 0x65, 0x8e, 0xc4, 0x7f, 0x99, - 0x4e, 0x61, 0x06, 0xb2, 0xbb, 0x67, 0xa6, 0x5c, 0x88, 0x87, 0xf3, 0xae, 0x49, 0x02, 0x27, 0x69, - 0x52, 0x46, 0x93, 0xef, 0x5c, 0x61, 0xcf, 0xdf, 0x6f, 0xff, 0x73, 0xf9, 0x9a, 0x5d, 0x32, 0xbc, - 0x04, 0x8b, 0xfa, 0xa7, 0x7a, 0xeb, 0xcf, 0x7a, 0x30, 0x95, 0xdc, 0xa2, 0x8f, 0x94, 0xcb, 0xf8, - 0xfd, 0x02, 0x4c, 0x98, 0x4b, 0x0a, 0xcd, 0x43, 0x49, 0x10, 0x51, 0x51, 0x58, 0xd5, 0x2e, 0x59, - 0x95, 0x00, 0x1c, 0xe3, 0xb0, 0xe0, 0xbb, 0xac, 0xba, 0x66, 0x87, 0x19, 0x07, 0xdf, 0x55, 0x10, - 0xac, 0x61, 0x51, 0x79, 0x69, 0xc3, 0xf7, 0x23, 0x75, 0xa9, 0xa8, 0x75, 0xb7, 0xc8, 0x4a, 0xb1, - 0x80, 0xd2, 0xcb, 0x64, 0x87, 0x04, 0x1e, 0x69, 0x99, 0xc1, 0xdd, 0xd4, 0x65, 0x72, 0x53, 0x07, - 0x62, 0x13, 0x97, 0xde, 0x92, 0x7e, 0xc8, 0x16, 0xb2, 0x90, 0xca, 0x62, 0xbb, 0xd6, 0x3a, 0x77, - 0xb1, 0x97, 0x70, 0xf4, 0x79, 0x78, 0x5c, 0x79, 0xc4, 0x63, 0xae, 0xa8, 0x96, 0x2d, 0x0e, 0x1b, - 0x4a, 0x94, 0xc7, 0x97, 0xd2, 0xd1, 0x70, 0x56, 0x7d, 0xf4, 0x06, 0x4c, 0x08, 0xce, 0x5d, 0x52, - 0x1c, 0x31, 0x6d, 0x27, 0x6e, 0x1a, 0x50, 0x9c, 0xc0, 0x96, 0xe1, 0xe9, 0x18, 0xf3, 0x2c, 0x29, - 0x14, 0xbb, 0xc3, 0xd3, 0xe9, 0x70, 0xdc, 0x55, 0x03, 0x2d, 0xc0, 0x24, 0x67, 0xad, 0x5c, 0x6f, - 0x8b, 0xcf, 0x89, 0x70, 0xb7, 0x51, 0x5b, 0xea, 0xb6, 0x09, 0xc6, 0x49, 0x7c, 0xf4, 0x2a, 0x8c, - 0x39, 0x41, 0x63, 0xdb, 0x8d, 0x48, 0x23, 0xea, 0x04, 0xdc, 0x0f, 0x47, 0x33, 0x3e, 0x59, 0xd0, - 0x60, 0xd8, 0xc0, 0xb4, 0xdf, 0x87, 0x33, 0x29, 0x9e, 0x7a, 0x74, 0xe1, 0x38, 0x6d, 0x57, 0x7e, - 0x53, 0xc2, 0x42, 0x75, 0xa1, 0x56, 0x95, 0x5f, 0xa3, 0x61, 0xd1, 0xd5, 0xc9, 0x3c, 0xfa, 0xb4, - 0xec, 0x29, 0x6a, 0x75, 0xae, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x91, 0x83, 0xc9, 0x14, 0xe5, - 0x3b, 0xcb, 0xe0, 0x91, 0x90, 0x3d, 0xe2, 0x84, 0x1d, 0x66, 0xb4, 0xc3, 0xdc, 0x31, 0xa2, 0x1d, - 0xe6, 0xfb, 0x45, 0x3b, 0x2c, 0x7c, 0x90, 0x68, 0x87, 0xe6, 0x88, 0x0d, 0x0d, 0x34, 0x62, 0x29, - 0x11, 0x12, 0x87, 0x8f, 0x19, 0x21, 0xd1, 0x18, 0xf4, 0x91, 0x01, 0x06, 0xfd, 0x6b, 0x39, 0x98, - 0x4a, 0x1a, 0xc9, 0x9d, 0x82, 0x3a, 0xf6, 0x4d, 0x43, 0x1d, 0x9b, 0x9e, 0x0f, 0x27, 0x69, 0xba, - 0x97, 0xa5, 0x9a, 0xc5, 0x09, 0xd5, 0xec, 0x27, 0x07, 0xa2, 0xd6, 0x5b, 0x4d, 0xfb, 0x77, 0x73, - 0x70, 0x2e, 0x59, 0x65, 0xa9, 0xe5, 0xb8, 0xbb, 0xa7, 0x30, 0x36, 0xb7, 0x8d, 0xb1, 0x79, 0x7e, - 0x90, 0xaf, 0x61, 0x5d, 0xcb, 0x1c, 0xa0, 0x7b, 0x89, 0x01, 0x9a, 0x1f, 0x9c, 0x64, 0xef, 0x51, - 0xfa, 0x66, 0x1e, 0x2e, 0xa5, 0xd6, 0x8b, 0xb5, 0x99, 0x2b, 0x86, 0x36, 0xf3, 0x5a, 0x42, 0x9b, - 0x69, 0xf7, 0xae, 0x7d, 0x32, 0xea, 0x4d, 0xe1, 0x42, 0xc9, 0x22, 0xe2, 0x3d, 0xa4, 0x6a, 0xd3, - 0x70, 0xa1, 0x54, 0x84, 0xb0, 0x49, 0xf7, 0x2f, 0x93, 0x4a, 0xf3, 0xdf, 0x58, 0x70, 0x3e, 0x75, - 0x6e, 0x4e, 0x41, 0x85, 0xb5, 0x66, 0xaa, 0xb0, 0x9e, 0x19, 0x78, 0xb5, 0x66, 0xe8, 0xb4, 0x7e, - 0xa3, 0x90, 0xf1, 0x2d, 0x4c, 0x40, 0xbf, 0x0d, 0xa3, 0x4e, 0xa3, 0x41, 0xc2, 0x70, 0xd5, 0x6f, - 0xaa, 0x08, 0x71, 0xcf, 0x33, 0x39, 0x2b, 0x2e, 0x3e, 0x3a, 0x28, 0xcf, 0x26, 0x49, 0xc4, 0x60, - 0xac, 0x53, 0x30, 0x83, 0x5a, 0xe6, 0x4e, 0x34, 0xa8, 0xe5, 0x35, 0x80, 0x3d, 0xc5, 0xad, 0x27, - 0x85, 0x7c, 0x8d, 0x8f, 0xd7, 0xb0, 0xd0, 0x17, 0xa0, 0x18, 0x8a, 0x6b, 0x5c, 0x2c, 0xc5, 0x17, - 0x07, 0x9c, 0x2b, 0x67, 0x83, 0xb4, 0x4c, 0x5f, 0x7d, 0xa5, 0x0f, 0x51, 0x24, 0xd1, 0x77, 0xc0, - 0x54, 0xc8, 0x43, 0xc1, 0x2c, 0xb5, 0x9c, 0x90, 0xf9, 0x41, 0x88, 0x55, 0xc8, 0x1c, 0xf0, 0xeb, - 0x09, 0x18, 0xee, 0xc2, 0x46, 0x2b, 0xf2, 0xa3, 0x58, 0xdc, 0x1a, 0xbe, 0x30, 0xaf, 0xc4, 0x1f, - 0x24, 0xf2, 0x87, 0x9d, 0x4d, 0x0e, 0x3f, 0x1b, 0x78, 0xad, 0x26, 0xfa, 0x02, 0x00, 0x5d, 0x3e, - 0x42, 0x97, 0x30, 0x92, 0x7d, 0x78, 0xd2, 0x53, 0xa5, 0x99, 0x6a, 0xf9, 0xc9, 0x9c, 0x17, 0x2b, - 0x8a, 0x08, 0xd6, 0x08, 0xda, 0x5f, 0x2b, 0xc0, 0x13, 0x3d, 0xce, 0x48, 0xb4, 0x60, 0x3e, 0x81, - 0x3e, 0x9b, 0x14, 0xae, 0x67, 0x53, 0x2b, 0x1b, 0xd2, 0x76, 0x62, 0x29, 0xe6, 0x3e, 0xf0, 0x52, - 0xfc, 0x41, 0x4b, 0x53, 0x7b, 0x70, 0x63, 0xbe, 0xcf, 0x1e, 0xf3, 0xec, 0x3f, 0x41, 0x3d, 0xc8, - 0x66, 0x8a, 0x32, 0xe1, 0xda, 0xc0, 0xdd, 0x19, 0x58, 0xbb, 0x70, 0xba, 0xca, 0xdf, 0x2f, 0x5b, - 0xf0, 0x64, 0x6a, 0x7f, 0x0d, 0x93, 0x8d, 0x79, 0x28, 0x35, 0x68, 0xa1, 0xe6, 0xab, 0x16, 0x3b, - 0xf1, 0x4a, 0x00, 0x8e, 0x71, 0x0c, 0xcb, 0x8c, 0x5c, 0x5f, 0xcb, 0x8c, 0x7f, 0x61, 0x41, 0xd7, - 0xfe, 0x38, 0x85, 0x83, 0xba, 0x6a, 0x1e, 0xd4, 0x1f, 0x1f, 0x64, 0x2e, 0x33, 0xce, 0xe8, 0x3f, - 0x9e, 0x84, 0xc7, 0x32, 0x7c, 0x35, 0xf6, 0x60, 0x7a, 0xab, 0x41, 0x4c, 0x2f, 0x40, 0xf1, 0x31, - 0xa9, 0x0e, 0x93, 0x3d, 0x5d, 0x06, 0x59, 0x3e, 0xa2, 0xe9, 0x2e, 0x14, 0xdc, 0xdd, 0x04, 0xfa, - 0xb2, 0x05, 0x67, 0x9d, 0xfb, 0x61, 0x57, 0xf6, 0x50, 0xb1, 0x66, 0x5e, 0x4a, 0x55, 0x82, 0xf4, - 0xc9, 0x36, 0xca, 0x13, 0x34, 0xa5, 0x61, 0xe1, 0xd4, 0xb6, 0x10, 0x16, 0x31, 0x43, 0x29, 0x3b, - 0xdf, 0xc3, 0x4f, 0x35, 0xcd, 0xa9, 0x86, 0x1f, 0xd9, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x25, 0x28, - 0x6d, 0x49, 0x4f, 0xb7, 0x94, 0x2b, 0x21, 0x1e, 0xc8, 0xde, 0xfe, 0x7f, 0xfc, 0x81, 0x52, 0x21, - 0xe1, 0x98, 0x28, 0x7a, 0x03, 0xf2, 0xde, 0x66, 0xd8, 0x2b, 0xc7, 0x51, 0xc2, 0xa6, 0x89, 0x7b, - 0x83, 0xaf, 0xad, 0xd4, 0x31, 0xad, 0x88, 0x6e, 0x40, 0x3e, 0xd8, 0x68, 0x0a, 0x0d, 0x5e, 0xea, - 0x19, 0x8e, 0x17, 0x2b, 0x19, 0xbd, 0x62, 0x94, 0xf0, 0x62, 0x05, 0x53, 0x12, 0xa8, 0x06, 0x43, - 0xcc, 0xc1, 0x41, 0xdc, 0x07, 0xa9, 0x9c, 0x6f, 0x0f, 0x47, 0x21, 0xee, 0x32, 0xce, 0x10, 0x30, - 0x27, 0x84, 0xd6, 0x61, 0xb8, 0xc1, 0xf2, 0xe1, 0x88, 0x80, 0xd5, 0x9f, 0x4a, 0xd5, 0xd5, 0xf5, - 0x48, 0x14, 0x24, 0x54, 0x57, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6f, 0x6f, 0x86, 0x4c, - 0xd6, 0xcf, 0xa2, 0xda, 0x23, 0xff, 0x95, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x03, 0xb9, - 0xcd, 0x86, 0xf0, 0x7f, 0x48, 0x55, 0xda, 0x99, 0x0e, 0xfd, 0x8b, 0xc3, 0x87, 0x07, 0xe5, 0xdc, - 0xca, 0x12, 0xce, 0x6d, 0x36, 0xd0, 0x1a, 0x8c, 0x6c, 0x72, 0x17, 0x60, 0xa1, 0x97, 0x7b, 0x3a, - 0xdd, 0x3b, 0xb9, 0xcb, 0x4b, 0x98, 0xdb, 0xed, 0x0b, 0x00, 0x96, 0x44, 0x58, 0x08, 0x4e, 0xe5, - 0xca, 0x2c, 0x62, 0x51, 0xcf, 0x1d, 0xcf, 0xfd, 0x9c, 0xdf, 0xcf, 0xb1, 0x43, 0x34, 0xd6, 0x28, - 0xd2, 0x55, 0xed, 0xc8, 0x0c, 0x96, 0x22, 0x56, 0x47, 0xea, 0xaa, 0xee, 0x93, 0xdc, 0x93, 0xaf, - 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x07, 0xc6, 0xf7, 0xc2, 0xf6, 0x36, 0x91, 0x5b, 0x9a, 0x85, - 0xee, 0xc8, 0xb8, 0xc2, 0xee, 0x0a, 0x44, 0x37, 0x88, 0x3a, 0x4e, 0xab, 0xeb, 0x14, 0x62, 0xaf, - 0xda, 0x77, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xbd, 0x8e, 0xbf, 0xb1, 0x1f, 0x11, 0x11, - 0xbc, 0x3a, 0x75, 0xf8, 0xdf, 0xe2, 0x28, 0xdd, 0xc3, 0x2f, 0x00, 0x58, 0x12, 0x41, 0x77, 0xc5, - 0xf0, 0xb0, 0xd3, 0x73, 0x2a, 0x3b, 0x98, 0x52, 0x6a, 0x0a, 0x59, 0x6d, 0x50, 0xd8, 0x69, 0x19, - 0x93, 0x62, 0xa7, 0x64, 0x7b, 0xdb, 0x8f, 0x7c, 0x2f, 0x71, 0x42, 0x4f, 0x67, 0x9f, 0x92, 0xb5, - 0x14, 0xfc, 0xee, 0x53, 0x32, 0x0d, 0x0b, 0xa7, 0xb6, 0x85, 0x9a, 0x30, 0xd1, 0xf6, 0x83, 0xe8, - 0xbe, 0x1f, 0xc8, 0xf5, 0x85, 0x7a, 0xe8, 0x15, 0x0c, 0x4c, 0xd1, 0x22, 0x0b, 0xa6, 0x6e, 0x42, - 0x70, 0x82, 0x26, 0xfa, 0x1c, 0x8c, 0x84, 0x0d, 0xa7, 0x45, 0xaa, 0xb7, 0x67, 0xce, 0x64, 0x5f, - 0x3f, 0x75, 0x8e, 0x92, 0xb1, 0xba, 0xd8, 0xe4, 0x08, 0x14, 0x2c, 0xc9, 0xa1, 0x15, 0x18, 0x62, - 0x19, 0x11, 0x58, 0xdc, 0xed, 0x8c, 0x98, 0x50, 0x5d, 0x16, 0xa6, 0xfc, 0x6c, 0x62, 0xc5, 0x98, - 0x57, 0xa7, 0x7b, 0x40, 0xb0, 0xd7, 0x7e, 0x38, 0x73, 0x2e, 0x7b, 0x0f, 0x08, 0xae, 0xfc, 0x76, - 0xbd, 0xd7, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x3e, 0xd6, 0xc3, 0xa0, - 0x25, 0xf3, 0x2c, 0x65, 0x27, 0x33, 0x3d, 0x49, 0x29, 0x09, 0xfb, 0x77, 0x47, 0xba, 0x79, 0x16, - 0x26, 0x90, 0xfd, 0x7f, 0x56, 0xd7, 0x5b, 0xdd, 0xa7, 0x07, 0xd5, 0x0f, 0x9d, 0x20, 0xb7, 0xfa, - 0x65, 0x0b, 0x1e, 0x6b, 0xa7, 0x7e, 0x88, 0x60, 0x00, 0x06, 0x53, 0x33, 0xf1, 0x4f, 0x57, 0xb1, - 0xf1, 0xd3, 0xe1, 0x38, 0xa3, 0xa5, 0xa4, 0x44, 0x90, 0xff, 0xc0, 0x12, 0xc1, 0x2a, 0x14, 0x19, - 0x93, 0xd9, 0x27, 0x3f, 0x5c, 0x52, 0x30, 0x62, 0xac, 0xc4, 0x92, 0xa8, 0x88, 0x15, 0x09, 0xf4, - 0x43, 0x16, 0x5c, 0x4c, 0x76, 0x1d, 0x13, 0x06, 0x16, 0x91, 0xe4, 0xb9, 0x2c, 0xb8, 0x22, 0xbe, - 0xff, 0x62, 0xad, 0x17, 0xf2, 0x51, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0x49, 0x11, 0x46, 0x87, - 0x4d, 0x05, 0xfc, 0x00, 0x02, 0xe9, 0x4b, 0x30, 0xb6, 0xeb, 0x77, 0xbc, 0x48, 0xd8, 0xbf, 0x08, - 0x8f, 0x45, 0xf6, 0xe0, 0xbc, 0xaa, 0x95, 0x63, 0x03, 0x2b, 0x21, 0xc6, 0x16, 0x1f, 0x5a, 0x8c, - 0x7d, 0x27, 0x91, 0xcd, 0xbd, 0x94, 0x1d, 0xb1, 0x50, 0x48, 0xfc, 0xc7, 0xc8, 0xe9, 0x7e, 0xba, - 0xb2, 0xd1, 0x4f, 0x5b, 0x29, 0x4c, 0x3d, 0x97, 0x96, 0x5f, 0x37, 0xa5, 0xe5, 0x2b, 0x49, 0x69, - 0xb9, 0x4b, 0xf9, 0x6a, 0x08, 0xca, 0x83, 0x87, 0xbd, 0x1e, 0x34, 0x8e, 0x9c, 0xdd, 0x82, 0xcb, - 0xfd, 0xae, 0x25, 0x66, 0x08, 0xd5, 0x54, 0x4f, 0x6d, 0xb1, 0x21, 0x54, 0xb3, 0x5a, 0xc1, 0x0c, - 0x32, 0x68, 0xa0, 0x11, 0xfb, 0xbf, 0x59, 0x90, 0xaf, 0xf9, 0xcd, 0x53, 0x50, 0x26, 0x7f, 0xd6, - 0x50, 0x26, 0x3f, 0x91, 0x91, 0x65, 0x3f, 0x53, 0x75, 0xbc, 0x9c, 0x50, 0x1d, 0x5f, 0xcc, 0x22, - 0xd0, 0x5b, 0x51, 0xfc, 0x13, 0x79, 0x18, 0xad, 0xf9, 0x4d, 0x65, 0x85, 0xfc, 0x1b, 0x0f, 0x63, - 0x85, 0x9c, 0x19, 0x16, 0x56, 0xa3, 0xcc, 0xec, 0xa7, 0xa4, 0x13, 0xde, 0x5f, 0x30, 0x63, 0xe4, - 0x7b, 0xc4, 0xdd, 0xda, 0x8e, 0x48, 0x33, 0xf9, 0x39, 0xa7, 0x67, 0x8c, 0xfc, 0x5f, 0x2d, 0x98, - 0x4c, 0xb4, 0x8e, 0x5a, 0x30, 0xde, 0xd2, 0x35, 0x81, 0x62, 0x9d, 0x3e, 0x94, 0x12, 0x51, 0x18, - 0x73, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0xe6, 0x00, 0xd4, 0x4b, 0x9d, 0xd4, 0x80, 0x31, 0xae, 0x5f, - 0x3d, 0xe5, 0x85, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x68, 0xe4, 0xb7, 0xfd, 0x96, 0xbf, 0xb5, 0x7f, - 0x93, 0xc8, 0xd0, 0x36, 0xca, 0x44, 0x6b, 0x3d, 0x06, 0x61, 0x1d, 0xcf, 0xfe, 0xa9, 0x3c, 0xff, - 0x50, 0x2f, 0x72, 0xbf, 0xb5, 0x26, 0x3f, 0xda, 0x6b, 0xf2, 0x9b, 0x16, 0x4c, 0xd1, 0xd6, 0x99, - 0xb9, 0x88, 0xbc, 0x6c, 0x55, 0xfa, 0x1d, 0xab, 0x47, 0xfa, 0x9d, 0x2b, 0xf4, 0xec, 0x6a, 0xfa, - 0x9d, 0x48, 0x68, 0xd0, 0xb4, 0xc3, 0x89, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0x0f, - 0x94, 0x8e, 0x47, 0x82, 0x00, 0x0b, 0xa8, 0xcc, 0xce, 0x53, 0xc8, 0xc8, 0xce, 0xc3, 0x02, 0xf5, - 0x09, 0xc3, 0x02, 0xc1, 0xf6, 0x68, 0x81, 0xfa, 0xa4, 0xc5, 0x41, 0x8c, 0x63, 0xff, 0x7c, 0x1e, - 0xc6, 0x6a, 0x7e, 0x33, 0x7e, 0x2b, 0x7b, 0xc9, 0x78, 0x2b, 0xbb, 0x9c, 0x78, 0x2b, 0x9b, 0xd2, - 0x71, 0xbf, 0xf5, 0x32, 0xf6, 0x61, 0xbd, 0x8c, 0xfd, 0x73, 0x8b, 0xcd, 0x5a, 0x65, 0xad, 0x2e, - 0xb2, 0x03, 0xbf, 0x00, 0xa3, 0xec, 0x40, 0x62, 0x4e, 0x77, 0xf2, 0x01, 0x89, 0x05, 0xde, 0x5f, - 0x8b, 0x8b, 0xb1, 0x8e, 0x83, 0xae, 0x42, 0x31, 0x24, 0x4e, 0xd0, 0xd8, 0x56, 0x67, 0x9c, 0x78, - 0x5e, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x5b, 0x71, 0x8c, 0xb8, 0x7c, 0x76, 0x9e, 0x5b, 0xbd, 0x3f, - 0x7c, 0x8b, 0x64, 0x07, 0x86, 0xb3, 0xef, 0x01, 0xea, 0xc6, 0x1f, 0x20, 0x38, 0x52, 0xd9, 0x0c, - 0x8e, 0x54, 0xea, 0x0a, 0x8c, 0xf4, 0x67, 0x16, 0x4c, 0xd4, 0xfc, 0x26, 0xdd, 0xba, 0x7f, 0x99, - 0xf6, 0xa9, 0x1e, 0x20, 0x73, 0xb8, 0x47, 0x80, 0xcc, 0xbf, 0x67, 0xc1, 0x48, 0xcd, 0x6f, 0x9e, - 0x82, 0xde, 0xfd, 0x75, 0x53, 0xef, 0xfe, 0x78, 0xc6, 0x92, 0xc8, 0x50, 0xb5, 0xff, 0x62, 0x1e, - 0xc6, 0x69, 0x3f, 0xfd, 0x2d, 0x39, 0x4b, 0xc6, 0x88, 0x58, 0x03, 0x8c, 0x08, 0x65, 0x73, 0xfd, - 0x56, 0xcb, 0xbf, 0x9f, 0x9c, 0xb1, 0x15, 0x56, 0x8a, 0x05, 0x14, 0x3d, 0x07, 0xc5, 0x76, 0x40, - 0xf6, 0x5c, 0x5f, 0xf0, 0x8f, 0xda, 0x2b, 0x46, 0x4d, 0x94, 0x63, 0x85, 0x41, 0xe5, 0xae, 0xd0, - 0xf5, 0x1a, 0x44, 0x26, 0xd9, 0x2e, 0xb0, 0x3c, 0x5c, 0x3c, 0xf2, 0xb5, 0x56, 0x8e, 0x0d, 0x2c, - 0x74, 0x0f, 0x4a, 0xec, 0x3f, 0x3b, 0x51, 0x8e, 0x9f, 0x37, 0x48, 0xa4, 0x9b, 0x10, 0x04, 0x70, - 0x4c, 0x0b, 0x5d, 0x03, 0x88, 0x64, 0x74, 0xe4, 0x50, 0xc4, 0xb8, 0x51, 0xbc, 0xb6, 0x8a, 0x9b, - 0x1c, 0x62, 0x0d, 0x0b, 0x3d, 0x0b, 0xa5, 0xc8, 0x71, 0x5b, 0xb7, 0x5c, 0x8f, 0x84, 0x4c, 0xe5, - 0x9c, 0x97, 0xd9, 0x24, 0x44, 0x21, 0x8e, 0xe1, 0x94, 0xd7, 0x61, 0x0e, 0xe0, 0x3c, 0xeb, 0x58, - 0x91, 0x61, 0x33, 0x5e, 0xe7, 0x96, 0x2a, 0xc5, 0x1a, 0x86, 0xfd, 0x2a, 0x9c, 0xab, 0xf9, 0xcd, - 0x9a, 0x1f, 0x44, 0x2b, 0x7e, 0x70, 0xdf, 0x09, 0x9a, 0x72, 0xfe, 0xca, 0x32, 0xb1, 0x01, 0x3d, - 0x7b, 0x86, 0xf8, 0xce, 0x34, 0x52, 0x16, 0xbc, 0xc8, 0xb8, 0x9d, 0x63, 0x3a, 0x75, 0x34, 0xd8, - 0xbd, 0xab, 0x12, 0x0c, 0x5e, 0x77, 0x22, 0x82, 0x6e, 0xb3, 0xa4, 0x64, 0xf1, 0x15, 0x24, 0xaa, - 0x3f, 0xa3, 0x25, 0x25, 0x8b, 0x81, 0xa9, 0x77, 0x96, 0x59, 0xdf, 0xfe, 0xd9, 0x02, 0x3b, 0x8d, - 0x12, 0xf9, 0xf6, 0xd0, 0x17, 0x61, 0x22, 0x24, 0xb7, 0x5c, 0xaf, 0xf3, 0x40, 0x0a, 0xe1, 0x3d, - 0xdc, 0x72, 0xea, 0xcb, 0x3a, 0x26, 0x57, 0xe5, 0x99, 0x65, 0x38, 0x41, 0x8d, 0xce, 0x53, 0xd0, - 0xf1, 0x16, 0xc2, 0x3b, 0x21, 0x09, 0x44, 0xbe, 0x37, 0x36, 0x4f, 0x58, 0x16, 0xe2, 0x18, 0x4e, - 0xd7, 0x25, 0xfb, 0xb3, 0xe6, 0x7b, 0xd8, 0xf7, 0x23, 0xb9, 0x92, 0x59, 0xc6, 0x20, 0xad, 0x1c, - 0x1b, 0x58, 0x68, 0x05, 0x50, 0xd8, 0x69, 0xb7, 0x5b, 0xec, 0x61, 0xdf, 0x69, 0x5d, 0x0f, 0xfc, - 0x4e, 0x9b, 0xbf, 0x7a, 0xe6, 0x79, 0x60, 0xc2, 0x7a, 0x17, 0x14, 0xa7, 0xd4, 0xa0, 0xa7, 0xcf, - 0x66, 0xc8, 0x7e, 0xb3, 0xd5, 0x9d, 0x17, 0xea, 0xf5, 0x3a, 0x2b, 0xc2, 0x12, 0x46, 0x17, 0x13, - 0x6b, 0x9e, 0x63, 0x0e, 0xc7, 0x8b, 0x09, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x65, 0x18, 0x09, 0xf7, - 0xc3, 0x46, 0x24, 0x22, 0x32, 0x65, 0x64, 0xee, 0xac, 0x33, 0x14, 0x2d, 0x9b, 0x04, 0xaf, 0x82, - 0x65, 0x5d, 0xb4, 0x0b, 0x13, 0xf7, 0x5d, 0xaf, 0xe9, 0xdf, 0x0f, 0xe5, 0x44, 0x15, 0xb3, 0x55, - 0xa3, 0xf7, 0x38, 0x66, 0x62, 0xb2, 0x8d, 0x79, 0xbb, 0x67, 0x10, 0xc3, 0x09, 0xe2, 0xf6, 0xf7, - 0xb0, 0xbb, 0x97, 0x25, 0x23, 0x8b, 0x3a, 0x01, 0x41, 0xbb, 0x30, 0xde, 0x66, 0x2b, 0x4c, 0x84, - 0xca, 0x16, 0xcb, 0xe4, 0xa5, 0x01, 0x85, 0xe8, 0xfb, 0xf4, 0x5c, 0x53, 0x4a, 0x2e, 0x26, 0x9d, - 0xd4, 0x74, 0x72, 0xd8, 0xa4, 0x6e, 0x7f, 0x0d, 0xb1, 0x23, 0xbe, 0xce, 0x25, 0xe3, 0x11, 0x61, - 0xc9, 0x2c, 0xc4, 0x80, 0xd9, 0x6c, 0x15, 0x4d, 0x3c, 0x80, 0xc2, 0x1a, 0x1a, 0xcb, 0xba, 0xe8, - 0x2d, 0xf6, 0x28, 0xce, 0xcf, 0xd5, 0x7e, 0x39, 0xa1, 0x39, 0x96, 0xf1, 0xfe, 0x2d, 0x2a, 0x62, - 0x8d, 0x08, 0xba, 0x05, 0xe3, 0x22, 0x77, 0x95, 0xd0, 0xc1, 0xe5, 0x0d, 0x1d, 0xcb, 0x38, 0xd6, - 0x81, 0x47, 0xc9, 0x02, 0x6c, 0x56, 0x46, 0x5b, 0x70, 0x51, 0x4b, 0xe4, 0x78, 0x3d, 0x70, 0xd8, - 0x43, 0xa9, 0xcb, 0xf6, 0xac, 0x76, 0x4c, 0x3f, 0x79, 0x78, 0x50, 0xbe, 0xb8, 0xde, 0x0b, 0x11, - 0xf7, 0xa6, 0x83, 0x6e, 0xc3, 0x39, 0xee, 0x30, 0x58, 0x21, 0x4e, 0xb3, 0xe5, 0x7a, 0xea, 0x1e, - 0xe0, 0xcb, 0xfe, 0xfc, 0xe1, 0x41, 0xf9, 0xdc, 0x42, 0x1a, 0x02, 0x4e, 0xaf, 0x87, 0x5e, 0x87, - 0x52, 0xd3, 0x0b, 0xc5, 0x18, 0x0c, 0x1b, 0x39, 0x4a, 0x4b, 0x95, 0xb5, 0xba, 0xfa, 0xfe, 0xf8, - 0x0f, 0x8e, 0x2b, 0xa0, 0x2d, 0xae, 0x87, 0x53, 0x62, 0xef, 0x48, 0x76, 0x3e, 0x7a, 0xb1, 0x24, - 0x0c, 0x97, 0x21, 0xae, 0x80, 0x56, 0x26, 0xb7, 0x86, 0x37, 0x91, 0x41, 0x18, 0xbd, 0x09, 0x88, - 0xf2, 0x85, 0x6e, 0x83, 0x2c, 0x34, 0x58, 0xc4, 0x72, 0xa6, 0xb6, 0x2c, 0x1a, 0x2e, 0x1a, 0xa8, - 0xde, 0x85, 0x81, 0x53, 0x6a, 0xa1, 0x1b, 0xf4, 0xdc, 0xd4, 0x4b, 0x85, 0xe9, 0xb0, 0x94, 0x25, - 0x66, 0x2a, 0xa4, 0x1d, 0x90, 0x86, 0x13, 0x91, 0xa6, 0x49, 0x11, 0x27, 0xea, 0xd1, 0xab, 0x5b, - 0x25, 0x2f, 0x02, 0x33, 0x4a, 0x47, 0x77, 0x02, 0x23, 0x2a, 0x86, 0x6f, 0xfb, 0x61, 0xb4, 0x46, - 0xa2, 0xfb, 0x7e, 0xb0, 0x23, 0x82, 0xa2, 0xc5, 0xf1, 0x39, 0x63, 0x10, 0xd6, 0xf1, 0x28, 0xdb, - 0xcd, 0x5e, 0xa5, 0xab, 0x15, 0xf6, 0x20, 0x58, 0x8c, 0xf7, 0xc9, 0x0d, 0x5e, 0x8c, 0x25, 0x5c, - 0xa2, 0x56, 0x6b, 0x4b, 0xec, 0x71, 0x2f, 0x81, 0x5a, 0xad, 0x2d, 0x61, 0x09, 0x47, 0xa4, 0x3b, - 0xff, 0xeb, 0x44, 0xb6, 0x12, 0xb5, 0xfb, 0xf6, 0x19, 0x30, 0x05, 0xac, 0x07, 0x53, 0x2a, 0xf3, - 0x2c, 0x8f, 0x16, 0x17, 0xce, 0x4c, 0xb2, 0x45, 0x32, 0x78, 0xa8, 0x39, 0xa5, 0x96, 0xae, 0x26, - 0x28, 0xe1, 0x2e, 0xda, 0x46, 0xdc, 0x94, 0xa9, 0xbe, 0xc9, 0xa7, 0xe6, 0xa1, 0x14, 0x76, 0x36, - 0x9a, 0xfe, 0xae, 0xe3, 0x7a, 0xec, 0x2d, 0x4e, 0xe3, 0xe9, 0xea, 0x12, 0x80, 0x63, 0x1c, 0xb4, - 0x02, 0x45, 0x47, 0xea, 0x9c, 0x51, 0x76, 0x90, 0x04, 0xa5, 0x69, 0xe6, 0x7e, 0xc3, 0x52, 0xcb, - 0xac, 0xea, 0xa2, 0xd7, 0x60, 0x5c, 0xb8, 0x89, 0xf1, 0xd0, 0x11, 0xec, 0xad, 0x4c, 0xf3, 0x03, - 0xa8, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2f, 0xc0, 0x04, 0xa5, 0x12, 0x1f, 0x6c, 0x33, 0x67, 0x07, - 0x39, 0x11, 0xb5, 0xa4, 0x22, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0x9a, 0x70, 0xc1, 0xe9, 0x44, 0x3e, - 0xd3, 0xdb, 0x9b, 0xeb, 0x7f, 0xdd, 0xdf, 0x21, 0x1e, 0x7b, 0x32, 0x2b, 0x2e, 0x5e, 0x3e, 0x3c, - 0x28, 0x5f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x22, - 0x9f, 0x5e, 0x88, 0x8f, 0x65, 0xc7, 0x1d, 0x5a, 0x57, 0x68, 0xba, 0xce, 0x4a, 0x55, 0xc5, 0x3a, - 0x1d, 0xb4, 0xce, 0xf7, 0x18, 0x8b, 0xc8, 0x4a, 0xc2, 0x99, 0xc7, 0xb3, 0x07, 0x46, 0x05, 0x6e, - 0x35, 0xb7, 0xa0, 0xa8, 0x89, 0x75, 0x32, 0xe8, 0x3a, 0x4c, 0xb7, 0x03, 0xd7, 0x67, 0x0b, 0x5b, - 0xbd, 0x99, 0xcc, 0x98, 0x79, 0x24, 0x6a, 0x49, 0x04, 0xdc, 0x5d, 0x87, 0xca, 0xb4, 0xb2, 0x70, - 0xe6, 0x3c, 0x4f, 0x4a, 0xc6, 0xf9, 0x7c, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb2, 0x73, 0x99, 0x4b, - 0x9f, 0x33, 0xb3, 0xd9, 0xc1, 0x25, 0x74, 0x29, 0x95, 0xb3, 0x67, 0xea, 0x2f, 0x8e, 0x29, 0xd0, - 0x7b, 0x23, 0xdc, 0x76, 0x02, 0x52, 0x0b, 0xfc, 0x06, 0x09, 0xb5, 0x20, 0xd0, 0x4f, 0xf0, 0xc0, - 0x91, 0xf4, 0xde, 0xa8, 0xa7, 0x21, 0xe0, 0xf4, 0x7a, 0xa8, 0xa9, 0xe5, 0xe2, 0xa6, 0x5c, 0x6f, - 0x38, 0x73, 0xa1, 0x87, 0x7d, 0x53, 0x82, 0x45, 0x8e, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, - 0xf4, 0x1d, 0x30, 0x25, 0xe2, 0x2c, 0xc5, 0xe3, 0x7e, 0x31, 0x36, 0x9c, 0xc4, 0x09, 0x18, 0xee, - 0xc2, 0xe6, 0xa1, 0xaf, 0x9d, 0x8d, 0x16, 0x11, 0x8b, 0xf0, 0x96, 0xeb, 0xed, 0x84, 0x33, 0x97, - 0xd8, 0x57, 0x8b, 0xd0, 0xd7, 0x49, 0x28, 0x4e, 0xa9, 0x31, 0xfb, 0xed, 0x30, 0xdd, 0x75, 0x73, - 0x1d, 0x2b, 0x5c, 0xfc, 0x9f, 0x0e, 0x41, 0x49, 0xbd, 0x01, 0xa0, 0x79, 0xf3, 0x69, 0xe7, 0x7c, - 0xf2, 0x69, 0xa7, 0x48, 0x45, 0x11, 0xfd, 0x35, 0x67, 0xdd, 0xb0, 0x0b, 0xcc, 0x65, 0x27, 0x67, - 0xd3, 0x85, 0x89, 0xbe, 0x3e, 0x86, 0x9a, 0x4a, 0x27, 0x3f, 0xf0, 0x1b, 0x51, 0xa1, 0xa7, 0x96, - 0x68, 0xc0, 0xdc, 0xc8, 0xe8, 0x29, 0x2a, 0x8f, 0x35, 0xab, 0xb5, 0x64, 0xb2, 0xd0, 0x1a, 0x2d, - 0xc4, 0x1c, 0xc6, 0xe4, 0x56, 0xca, 0x66, 0x31, 0xb9, 0x75, 0xe4, 0x21, 0xe5, 0x56, 0x49, 0x00, - 0xc7, 0xb4, 0x50, 0x0b, 0xa6, 0x1b, 0x66, 0x9e, 0x57, 0xe5, 0x57, 0xf8, 0x54, 0xdf, 0x8c, 0xab, - 0x1d, 0x2d, 0xa9, 0xde, 0x52, 0x92, 0x0a, 0xee, 0x26, 0x8c, 0x5e, 0x83, 0xe2, 0x7b, 0x7e, 0xc8, - 0x16, 0xa5, 0xe0, 0x35, 0xa4, 0xff, 0x55, 0xf1, 0xad, 0xdb, 0x75, 0x56, 0x7e, 0x74, 0x50, 0x1e, - 0xad, 0xf9, 0x4d, 0xf9, 0x17, 0xab, 0x0a, 0xe8, 0x01, 0x9c, 0x33, 0x4e, 0x68, 0xd5, 0x5d, 0x18, - 0xbc, 0xbb, 0x17, 0x45, 0x73, 0xe7, 0xaa, 0x69, 0x94, 0x70, 0x7a, 0x03, 0xf4, 0xd8, 0xf3, 0x7c, - 0x91, 0x23, 0x59, 0xf2, 0x33, 0x8c, 0x6d, 0x29, 0xe9, 0xde, 0xf7, 0x09, 0x04, 0xdc, 0x5d, 0xc7, - 0xfe, 0x15, 0xfe, 0x64, 0x22, 0x14, 0xab, 0x24, 0xec, 0xb4, 0x4e, 0x23, 0x05, 0xd7, 0xb2, 0xa1, - 0xf3, 0x7d, 0xe8, 0x67, 0xb9, 0x5f, 0xb7, 0xd8, 0xb3, 0xdc, 0x3a, 0xd9, 0x6d, 0xb7, 0xa8, 0x78, - 0xff, 0xe8, 0x3b, 0xfe, 0x16, 0x14, 0x23, 0xd1, 0x5a, 0xaf, 0xac, 0x61, 0x5a, 0xa7, 0xd8, 0xd3, - 0xa4, 0xe2, 0x74, 0x64, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc2, 0x67, 0x40, 0x42, 0x4e, 0x41, 0xff, - 0x56, 0x31, 0xf5, 0x6f, 0xe5, 0x3e, 0x5f, 0x90, 0xa1, 0x87, 0xfb, 0xc7, 0x66, 0xbf, 0x99, 0x50, - 0xf9, 0x51, 0x7f, 0x0f, 0xb6, 0x7f, 0xd8, 0x82, 0xb3, 0x69, 0x06, 0x54, 0x94, 0x3b, 0xe5, 0x22, - 0xad, 0x7a, 0x1f, 0x57, 0x23, 0x78, 0x57, 0x94, 0x63, 0x85, 0x31, 0x70, 0x42, 0x8e, 0xe3, 0x05, - 0xa8, 0xbb, 0x0d, 0xe3, 0xb5, 0x80, 0x68, 0x77, 0xc0, 0x1b, 0xdc, 0x91, 0x8f, 0xf7, 0xe7, 0xb9, - 0x63, 0x3b, 0xf1, 0xd9, 0x3f, 0x93, 0x83, 0xb3, 0xfc, 0x81, 0x6b, 0x61, 0xcf, 0x77, 0x9b, 0x35, - 0xbf, 0x29, 0x92, 0xa9, 0xbc, 0x0d, 0x63, 0x6d, 0x4d, 0x0f, 0xd1, 0x2b, 0x44, 0x96, 0xae, 0xaf, - 0x88, 0xe5, 0x41, 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x13, 0xc6, 0xc8, 0x9e, 0xdb, 0x50, 0xaf, 0x24, - 0xb9, 0x63, 0xdf, 0x0d, 0xaa, 0x95, 0x65, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x82, 0xfc, 0x7a, 0xf6, - 0x8f, 0x58, 0xf0, 0x78, 0x46, 0x40, 0x2d, 0xda, 0xdc, 0x7d, 0xf6, 0x94, 0x28, 0x52, 0x75, 0xa9, - 0xe6, 0xf8, 0x03, 0x23, 0x16, 0x50, 0xf4, 0x39, 0x00, 0xfe, 0x40, 0x48, 0xc5, 0xa3, 0x7e, 0x91, - 0x87, 0x8c, 0xa0, 0x29, 0x5a, 0xb0, 0x0b, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0xcc, 0xc3, 0x10, - 0x7b, 0x90, 0x42, 0x2b, 0x30, 0xb2, 0xcd, 0x43, 0x4c, 0x0f, 0x12, 0xcd, 0x3a, 0x96, 0x33, 0x79, - 0x01, 0x96, 0x95, 0xd1, 0x2a, 0x9c, 0xe1, 0x21, 0xba, 0x5b, 0x15, 0xd2, 0x72, 0xf6, 0xa5, 0xba, - 0x82, 0xa7, 0xb7, 0x52, 0x81, 0x3b, 0xaa, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x03, 0x26, 0x28, - 0x7f, 0xe7, 0x77, 0x22, 0x49, 0x89, 0x07, 0xe7, 0x56, 0x0c, 0xe5, 0xba, 0x01, 0xc5, 0x09, 0x6c, - 0x2a, 0x78, 0xb5, 0xbb, 0x14, 0x33, 0x43, 0xb1, 0xe0, 0x65, 0x2a, 0x63, 0x4c, 0x5c, 0x66, 0x39, - 0xd5, 0x61, 0x76, 0x62, 0xeb, 0xdb, 0x01, 0x09, 0xb7, 0xfd, 0x56, 0x53, 0x64, 0x47, 0x8f, 0x2d, - 0xa7, 0x12, 0x70, 0xdc, 0x55, 0x83, 0x52, 0xd9, 0x74, 0xdc, 0x56, 0x27, 0x20, 0x31, 0x95, 0x61, - 0x93, 0xca, 0x4a, 0x02, 0x8e, 0xbb, 0x6a, 0xd0, 0x75, 0x74, 0x4e, 0xa4, 0x2b, 0x97, 0xe1, 0x04, - 0x94, 0x39, 0xdc, 0x88, 0x74, 0xac, 0xea, 0x11, 0x4f, 0x47, 0x18, 0x0c, 0xa9, 0x84, 0xe7, 0x9a, - 0xfa, 0x52, 0xb8, 0x54, 0x49, 0x2a, 0x0f, 0x93, 0x34, 0xfb, 0x07, 0x72, 0x70, 0x26, 0xc5, 0xec, - 0x96, 0x1f, 0x55, 0x5b, 0x6e, 0x18, 0xa9, 0x14, 0x3e, 0xda, 0x51, 0xc5, 0xcb, 0xb1, 0xc2, 0xa0, - 0xfb, 0x81, 0x1f, 0x86, 0xc9, 0x03, 0x50, 0x98, 0xb5, 0x09, 0xe8, 0x31, 0x93, 0xe1, 0x5c, 0x86, - 0x42, 0x27, 0x24, 0x32, 0x12, 0x96, 0x3a, 0xbf, 0x99, 0x42, 0x9b, 0x41, 0x28, 0x6b, 0xba, 0xa5, - 0x74, 0xc9, 0x1a, 0x6b, 0xca, 0x15, 0xc4, 0x1c, 0x46, 0x3b, 0x17, 0x11, 0xcf, 0xf1, 0x22, 0xc1, - 0xc0, 0xc6, 0xf1, 0x5b, 0x58, 0x29, 0x16, 0x50, 0xfb, 0xab, 0x79, 0x38, 0x9f, 0x69, 0x88, 0x4f, - 0xbb, 0xbe, 0xeb, 0x7b, 0x6e, 0xe4, 0xab, 0x47, 0x51, 0x1e, 0xb3, 0x85, 0xb4, 0xb7, 0x57, 0x45, - 0x39, 0x56, 0x18, 0xe8, 0x8a, 0x4c, 0xb0, 0x9f, 0x4c, 0x66, 0xb4, 0x58, 0x31, 0x72, 0xec, 0x0f, - 0x9a, 0x28, 0xee, 0x29, 0x28, 0xb4, 0x7d, 0xbf, 0x95, 0x3c, 0xb4, 0x68, 0x77, 0x7d, 0xbf, 0x85, - 0x19, 0x10, 0x7d, 0x42, 0x8c, 0x57, 0xe2, 0x15, 0x10, 0x3b, 0x4d, 0x3f, 0xd4, 0x06, 0xed, 0x19, - 0x18, 0xd9, 0x21, 0xfb, 0x81, 0xeb, 0x6d, 0x25, 0x5f, 0x87, 0x6f, 0xf2, 0x62, 0x2c, 0xe1, 0x66, - 0x6a, 0x8b, 0x91, 0x93, 0xce, 0xf0, 0x56, 0xec, 0x7b, 0x05, 0xfe, 0x60, 0x1e, 0x26, 0xf1, 0x62, - 0xe5, 0x5b, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0x3a, 0xc3, 0x5b, 0xff, 0xd9, 0xf8, 0x45, 0x0b, - 0x26, 0x59, 0xf8, 0x67, 0x11, 0x29, 0xc4, 0xf5, 0xbd, 0x53, 0x60, 0xf1, 0x9e, 0x82, 0xa1, 0x80, - 0x36, 0x9a, 0xcc, 0x62, 0xc4, 0x7a, 0x82, 0x39, 0x0c, 0x5d, 0x80, 0x02, 0xeb, 0x02, 0x9d, 0xbc, - 0x31, 0x9e, 0x00, 0xa2, 0xe2, 0x44, 0x0e, 0x66, 0xa5, 0xcc, 0xfd, 0x1d, 0x93, 0x76, 0xcb, 0xe5, - 0x9d, 0x8e, 0x9f, 0x40, 0x3e, 0x1a, 0xee, 0xef, 0xa9, 0x5d, 0xfb, 0x60, 0xee, 0xef, 0xe9, 0x24, - 0x7b, 0x8b, 0x4f, 0x7f, 0x94, 0x83, 0x4b, 0xa9, 0xf5, 0x06, 0x76, 0x7f, 0xef, 0x5d, 0xfb, 0x64, - 0x8c, 0x7c, 0xd2, 0x6d, 0x6f, 0xf2, 0xa7, 0x68, 0x7b, 0x53, 0x18, 0x94, 0xc3, 0x1c, 0x1a, 0xc0, - 0x2b, 0x3d, 0x75, 0xc8, 0x3e, 0x22, 0x5e, 0xe9, 0xa9, 0x7d, 0xcb, 0x10, 0xff, 0xfe, 0x3c, 0x97, - 0xf1, 0x2d, 0x4c, 0x10, 0xbc, 0x4a, 0xcf, 0x19, 0x06, 0x0c, 0x05, 0xc7, 0x3c, 0xc6, 0xcf, 0x18, - 0x5e, 0x86, 0x15, 0x14, 0xb9, 0x9a, 0x7f, 0x77, 0x2e, 0x3b, 0xa9, 0x67, 0x66, 0x53, 0x73, 0xe6, - 0x8b, 0x95, 0x1a, 0x82, 0x14, 0x5f, 0xef, 0x55, 0x4d, 0x78, 0xcf, 0x0f, 0x2e, 0xbc, 0x8f, 0xa5, - 0x0b, 0xee, 0x68, 0x01, 0x26, 0x77, 0x5d, 0x8f, 0x1e, 0x9b, 0xfb, 0x26, 0xcb, 0xaa, 0xc2, 0x9d, - 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x6d, 0xf9, 0xcd, 0x3c, 0x3c, - 0xd1, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, - 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xbc, 0x95, 0x34, 0x25, 0x86, 0xe0, 0x29, 0x2f, 0xc8, 0x94, 0x1b, - 0x2b, 0x29, 0x38, 0x38, 0xb5, 0x26, 0x7a, 0x13, 0x90, 0x2f, 0x32, 0x0a, 0x5f, 0x27, 0x9e, 0x78, - 0x07, 0x60, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x0a, 0x07, 0xf4, - 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x08, 0x07, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, - 0xcf, 0x71, 0x79, 0x18, 0x40, 0x49, 0x80, 0x4b, 0x07, 0x4a, 0x59, 0xb6, 0x90, 0x44, 0xc0, 0xdd, - 0x75, 0x12, 0x1e, 0xe0, 0xc3, 0xd9, 0x1e, 0xe0, 0xbd, 0xcf, 0xc5, 0x7e, 0xba, 0x5f, 0xfb, 0x3f, - 0x59, 0xf4, 0xfa, 0xd2, 0x12, 0xfd, 0xab, 0xc7, 0xd6, 0xd7, 0x98, 0x01, 0x0b, 0x57, 0x06, 0x6a, - 0xce, 0xd8, 0xe7, 0x34, 0x03, 0x96, 0x18, 0x88, 0x4d, 0x5c, 0xbe, 0x20, 0xc2, 0xd8, 0x07, 0xc8, - 0x60, 0xf1, 0x45, 0x30, 0x07, 0x85, 0x81, 0x3e, 0x0f, 0x23, 0x4d, 0x77, 0xcf, 0x0d, 0xfd, 0x40, - 0x6c, 0x96, 0x63, 0x7a, 0x52, 0xc4, 0xe7, 0x60, 0x85, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x83, 0x39, - 0x18, 0x97, 0x2d, 0xbe, 0xd5, 0xf1, 0x23, 0xe7, 0x14, 0xae, 0xe5, 0xeb, 0xc6, 0xb5, 0xfc, 0x89, - 0x5e, 0x11, 0x2d, 0x58, 0x97, 0x32, 0xaf, 0xe3, 0xdb, 0x89, 0xeb, 0xf8, 0xe9, 0xfe, 0xa4, 0x7a, - 0x5f, 0xc3, 0xff, 0xd4, 0x82, 0x69, 0x03, 0xff, 0x14, 0x6e, 0x83, 0x15, 0xf3, 0x36, 0x78, 0xb2, - 0xef, 0x37, 0x64, 0xdc, 0x02, 0xdf, 0x97, 0x4f, 0xf4, 0x9d, 0x9d, 0xfe, 0xef, 0x41, 0x61, 0xdb, - 0x09, 0x9a, 0xbd, 0x22, 0xe7, 0x76, 0x55, 0x9a, 0xbb, 0xe1, 0x04, 0x4d, 0x7e, 0x86, 0x3f, 0xa7, - 0xd2, 0x72, 0x3a, 0x41, 0xb3, 0xaf, 0xcb, 0x1b, 0x6b, 0x0a, 0xbd, 0x0a, 0xc3, 0x61, 0xc3, 0x6f, - 0x2b, 0x83, 0xd4, 0xcb, 0x3c, 0x65, 0x27, 0x2d, 0x39, 0x3a, 0x28, 0x23, 0xb3, 0x39, 0x5a, 0x8c, - 0x05, 0x3e, 0x7a, 0x1b, 0xc6, 0xd9, 0x2f, 0x65, 0x29, 0x91, 0xcf, 0xce, 0xd7, 0x50, 0xd7, 0x11, - 0xb9, 0xc1, 0x8d, 0x51, 0x84, 0x4d, 0x52, 0xb3, 0x5b, 0x50, 0x52, 0x9f, 0xf5, 0x48, 0x5d, 0x95, - 0xfe, 0x7d, 0x1e, 0xce, 0xa4, 0xac, 0x39, 0x14, 0x1a, 0x33, 0xf1, 0xc2, 0x80, 0x4b, 0xf5, 0x03, - 0xce, 0x45, 0xc8, 0xa4, 0xa1, 0xa6, 0x58, 0x5b, 0x03, 0x37, 0x7a, 0x27, 0x24, 0xc9, 0x46, 0x69, - 0x51, 0xff, 0x46, 0x69, 0x63, 0xa7, 0x36, 0xd4, 0xb4, 0x21, 0xd5, 0xd3, 0x47, 0x3a, 0xa7, 0x7f, - 0x92, 0x87, 0xb3, 0x69, 0x41, 0x76, 0xd0, 0x77, 0x27, 0x72, 0xf7, 0xbc, 0x34, 0x68, 0x78, 0x1e, - 0x9e, 0xd0, 0x47, 0xa4, 0xde, 0x9e, 0x33, 0xb3, 0xf9, 0xf4, 0x1d, 0x66, 0xd1, 0x26, 0xf3, 0x6f, - 0x0d, 0x78, 0xce, 0x25, 0x79, 0x7c, 0x7c, 0x7a, 0xe0, 0x0e, 0x88, 0x64, 0x4d, 0x61, 0xc2, 0xbf, - 0x55, 0x16, 0xf7, 0xf7, 0x6f, 0x95, 0x2d, 0xcf, 0xba, 0x30, 0xaa, 0x7d, 0xcd, 0x23, 0x9d, 0xf1, - 0x1d, 0x7a, 0x5b, 0x69, 0xfd, 0x7e, 0xa4, 0xb3, 0xfe, 0x23, 0x16, 0x24, 0xac, 0x3f, 0x95, 0x5a, - 0xcc, 0xca, 0x54, 0x8b, 0x5d, 0x86, 0x42, 0xe0, 0xb7, 0x48, 0x32, 0x55, 0x0e, 0xf6, 0x5b, 0x04, - 0x33, 0x08, 0xc5, 0x88, 0x62, 0x65, 0xc7, 0x98, 0x2e, 0xc8, 0x09, 0x11, 0xed, 0x29, 0x18, 0x6a, - 0x91, 0x3d, 0xd2, 0x4a, 0xc6, 0xa1, 0xbf, 0x45, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x58, 0x80, 0x8b, - 0x3d, 0x3d, 0xc4, 0xa9, 0x38, 0xb4, 0xe5, 0x44, 0xe4, 0xbe, 0xb3, 0x9f, 0x0c, 0x18, 0x7d, 0x9d, - 0x17, 0x63, 0x09, 0x67, 0x06, 0xf1, 0x3c, 0x40, 0x64, 0x42, 0x89, 0x28, 0xe2, 0x42, 0x0a, 0xa8, - 0xa9, 0x94, 0xca, 0x9f, 0x84, 0x52, 0xea, 0x1a, 0x40, 0x18, 0xb6, 0xb8, 0x7d, 0x41, 0x53, 0x58, - 0xda, 0xc7, 0x81, 0x44, 0xeb, 0xb7, 0x04, 0x04, 0x6b, 0x58, 0xa8, 0x02, 0x53, 0xed, 0xc0, 0x8f, - 0xb8, 0x4e, 0xb6, 0xc2, 0x0d, 0x93, 0x86, 0x4c, 0xe7, 0xdc, 0x5a, 0x02, 0x8e, 0xbb, 0x6a, 0xa0, - 0x97, 0x61, 0x54, 0x38, 0xec, 0xd6, 0x7c, 0xbf, 0x25, 0xd4, 0x40, 0xca, 0xcc, 0xa5, 0x1e, 0x83, - 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x8a, 0xde, 0x91, 0xd4, 0x6a, 0x5c, 0xd9, 0xab, 0xe1, 0x25, 0x02, - 0x6e, 0x15, 0x07, 0x0a, 0xb8, 0x15, 0x2b, 0xc6, 0x4a, 0x03, 0xbf, 0x6d, 0x41, 0x5f, 0x55, 0xd2, - 0xcf, 0x15, 0xe0, 0x8c, 0x58, 0x38, 0x8f, 0x7a, 0xb9, 0xdc, 0xe9, 0x5e, 0x2e, 0x27, 0xa1, 0x3a, - 0xfb, 0xd6, 0x9a, 0x39, 0xed, 0x35, 0xf3, 0x43, 0x16, 0x98, 0xec, 0x15, 0xfa, 0xbf, 0x32, 0x23, - 0xee, 0xbf, 0x9c, 0xc9, 0xae, 0x35, 0xe5, 0x05, 0xf2, 0x01, 0x63, 0xef, 0xdb, 0xff, 0xd1, 0x82, - 0x27, 0xfb, 0x52, 0x44, 0xcb, 0x50, 0x62, 0x3c, 0xa0, 0x26, 0x9d, 0x3d, 0xad, 0x0c, 0x17, 0x25, - 0x20, 0x83, 0x25, 0x8d, 0x6b, 0xa2, 0xe5, 0xae, 0xd4, 0x06, 0xcf, 0xa4, 0xa4, 0x36, 0x38, 0x67, - 0x0c, 0xcf, 0x43, 0xe6, 0x36, 0xf8, 0x95, 0x3c, 0x0c, 0xf3, 0x15, 0x7f, 0x0a, 0x62, 0xd8, 0x8a, - 0xd0, 0xdb, 0xf6, 0x08, 0xb9, 0xc5, 0xfb, 0x32, 0x57, 0x71, 0x22, 0x87, 0xb3, 0x09, 0xea, 0xb6, - 0x8a, 0x35, 0xbc, 0x68, 0xce, 0xb8, 0xcf, 0x66, 0x13, 0x8a, 0x49, 0xe0, 0x34, 0xb4, 0xdb, 0xed, - 0x8b, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x16, 0xa5, 0x21, 0x82, 0xb7, 0x7d, 0xb2, 0x47, 0xeb, 0x75, - 0x85, 0xcc, 0xfb, 0x10, 0xef, 0x74, 0x05, 0xc0, 0x1a, 0xc5, 0xd9, 0x57, 0xa0, 0xa4, 0x90, 0xfb, - 0x69, 0x71, 0xc6, 0x74, 0xe6, 0xe2, 0xb3, 0x30, 0x99, 0x68, 0xeb, 0x58, 0x4a, 0xa0, 0x5f, 0xb2, - 0x60, 0x92, 0x77, 0x79, 0xd9, 0xdb, 0x13, 0x67, 0xea, 0xfb, 0x70, 0xb6, 0x95, 0x72, 0xb6, 0x89, - 0x19, 0x1d, 0xfc, 0x2c, 0x54, 0x4a, 0x9f, 0x34, 0x28, 0x4e, 0x6d, 0x03, 0x5d, 0xa5, 0xeb, 0x96, - 0x9e, 0x5d, 0x4e, 0x4b, 0x38, 0x57, 0x8d, 0xf1, 0x35, 0xcb, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, - 0xc1, 0x34, 0xef, 0xf9, 0x4d, 0xb2, 0xaf, 0x76, 0xf8, 0x87, 0xd9, 0x77, 0x91, 0x6d, 0x24, 0x97, - 0x91, 0x6d, 0x44, 0xff, 0xb4, 0x7c, 0xcf, 0x4f, 0xfb, 0x19, 0x0b, 0xc4, 0x0a, 0x3c, 0x05, 0x51, - 0xfe, 0xdb, 0x4d, 0x51, 0x7e, 0x36, 0x7b, 0x51, 0x67, 0xc8, 0xf0, 0x7f, 0x66, 0xc1, 0x14, 0x47, - 0x88, 0xdf, 0x9c, 0x3f, 0xd4, 0x79, 0x18, 0x24, 0x6d, 0xa0, 0xca, 0x25, 0x9e, 0xfe, 0x51, 0xc6, - 0x64, 0x15, 0x7a, 0x4e, 0x56, 0x53, 0x6e, 0xa0, 0x63, 0xa4, 0xcc, 0x3c, 0x76, 0xd4, 0x6e, 0xfb, - 0x0f, 0x2d, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, 0xd5, 0xae, 0x8b, 0xf8, 0xa8, - 0x51, 0x10, 0xac, 0x61, 0x9d, 0xc8, 0xf0, 0x24, 0x0c, 0x07, 0xf2, 0xfd, 0x0d, 0x07, 0x8e, 0x31, - 0xa2, 0x7f, 0x30, 0x04, 0x49, 0xf7, 0x03, 0x74, 0x17, 0xc6, 0x1a, 0x4e, 0xdb, 0xd9, 0x70, 0x5b, - 0x6e, 0xe4, 0x92, 0xb0, 0x97, 0xc5, 0xd1, 0x92, 0x86, 0x27, 0x9e, 0x7a, 0xb5, 0x12, 0x6c, 0xd0, - 0x41, 0x73, 0x00, 0xed, 0xc0, 0xdd, 0x73, 0x5b, 0x64, 0x8b, 0x69, 0x1c, 0x98, 0x3b, 0x27, 0x37, - 0xa3, 0x91, 0xa5, 0x58, 0xc3, 0x48, 0xf1, 0xcc, 0xcb, 0x3f, 0x3a, 0xcf, 0xbc, 0xc2, 0x31, 0x3d, - 0xf3, 0x86, 0x06, 0xf2, 0xcc, 0xc3, 0xf0, 0x98, 0x64, 0x91, 0xe8, 0xff, 0x15, 0xb7, 0x45, 0x04, - 0x5f, 0xcc, 0x9d, 0x3c, 0x67, 0x0f, 0x0f, 0xca, 0x8f, 0xe1, 0x54, 0x0c, 0x9c, 0x51, 0x13, 0x7d, - 0x0e, 0x66, 0x9c, 0x56, 0xcb, 0xbf, 0xaf, 0x46, 0x6d, 0x39, 0x6c, 0x38, 0x2d, 0xae, 0xb1, 0x1f, - 0x61, 0x54, 0x2f, 0x1c, 0x1e, 0x94, 0x67, 0x16, 0x32, 0x70, 0x70, 0x66, 0xed, 0x84, 0x63, 0x5f, - 0xb1, 0xaf, 0x63, 0xdf, 0xeb, 0x50, 0x6a, 0x07, 0x7e, 0x63, 0x55, 0xf3, 0xfe, 0xb9, 0xc4, 0x12, - 0xf2, 0xcb, 0xc2, 0xa3, 0x83, 0xf2, 0xb8, 0xfa, 0xc3, 0x6e, 0xf8, 0xb8, 0x42, 0x8a, 0x3f, 0x1f, - 0x3c, 0x4a, 0x7f, 0xbe, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x8b, 0x36, 0xe3, 0xf3, 0x63, - 0x1d, 0x4a, 0x41, 0xe2, 0xc4, 0x1c, 0x28, 0x4c, 0x95, 0x16, 0x3d, 0x59, 0x9e, 0x90, 0x31, 0x21, - 0xfb, 0x4f, 0x2d, 0x18, 0x11, 0x86, 0xef, 0xa7, 0xc0, 0xa8, 0x2d, 0x18, 0xfa, 0xf2, 0x72, 0xfa, - 0xad, 0xc2, 0x3a, 0x93, 0xa9, 0x29, 0xaf, 0x26, 0x34, 0xe5, 0x4f, 0xf6, 0x22, 0xd2, 0x5b, 0x47, - 0xfe, 0xb7, 0xf2, 0x30, 0x61, 0xfa, 0xaa, 0x9c, 0xc2, 0x10, 0xac, 0xc1, 0x48, 0x28, 0x1c, 0xa3, - 0x72, 0xd9, 0x06, 0xdd, 0xc9, 0x49, 0x8c, 0xad, 0xb5, 0x84, 0x2b, 0x94, 0x24, 0x92, 0xea, 0x71, - 0x95, 0x7f, 0x84, 0x1e, 0x57, 0xfd, 0xdc, 0x85, 0x0a, 0x27, 0xe1, 0x2e, 0x64, 0x7f, 0x9d, 0xdd, - 0x6c, 0x7a, 0xf9, 0x29, 0x30, 0x3d, 0xd7, 0xcd, 0x3b, 0xd0, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x32, - 0x98, 0x9f, 0x5f, 0xb0, 0xe0, 0x62, 0xca, 0x57, 0x69, 0x9c, 0xd0, 0x73, 0x50, 0x74, 0x3a, 0x4d, - 0x57, 0xed, 0x65, 0xed, 0xd5, 0x6c, 0x41, 0x94, 0x63, 0x85, 0x81, 0x96, 0x60, 0x9a, 0x3c, 0x68, - 0xbb, 0xfc, 0xd9, 0x52, 0x37, 0xa9, 0xcc, 0xf3, 0xd0, 0xbd, 0xcb, 0x49, 0x20, 0xee, 0xc6, 0x57, - 0xce, 0xed, 0xf9, 0x4c, 0xe7, 0xf6, 0x7f, 0x60, 0xc1, 0xa8, 0x72, 0x82, 0x79, 0xe4, 0xa3, 0xfd, - 0x1d, 0xe6, 0x68, 0x3f, 0xd1, 0x63, 0xb4, 0x33, 0x86, 0xf9, 0xef, 0xe4, 0x54, 0x7f, 0x6b, 0x7e, - 0x10, 0x0d, 0xc0, 0x61, 0xbd, 0x0a, 0xc5, 0x76, 0xe0, 0x47, 0x7e, 0xc3, 0x6f, 0x09, 0x06, 0xeb, - 0x42, 0x1c, 0x7b, 0x81, 0x97, 0x1f, 0x69, 0xbf, 0xb1, 0xc2, 0x66, 0xa3, 0xe7, 0x07, 0x91, 0x60, - 0x6a, 0xe2, 0xd1, 0xf3, 0x83, 0x08, 0x33, 0x08, 0x6a, 0x02, 0x44, 0x4e, 0xb0, 0x45, 0x22, 0x5a, - 0x26, 0xc2, 0xb8, 0x64, 0x1f, 0x1e, 0x9d, 0xc8, 0x6d, 0xcd, 0xb9, 0x5e, 0x14, 0x46, 0xc1, 0x5c, - 0xd5, 0x8b, 0x6e, 0x07, 0x5c, 0x5e, 0xd3, 0x82, 0x29, 0x28, 0x5a, 0x58, 0xa3, 0x2b, 0x5d, 0x50, - 0x59, 0x1b, 0x43, 0xe6, 0xfb, 0xfb, 0x9a, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x85, 0x5d, 0x25, 0x6c, - 0x80, 0x8e, 0x17, 0xe7, 0xe0, 0x1b, 0x45, 0x35, 0xb4, 0xec, 0xf1, 0xad, 0xa2, 0x47, 0x53, 0xe8, - 0x7d, 0x72, 0xd3, 0x86, 0x75, 0xf7, 0x9e, 0x38, 0xe4, 0x02, 0xfa, 0xce, 0x2e, 0xb3, 0x8c, 0xe7, - 0xfb, 0x5c, 0x01, 0xc7, 0x30, 0xc4, 0x60, 0xe1, 0xc4, 0x59, 0xb0, 0xe5, 0x6a, 0x4d, 0x2c, 0x72, - 0x2d, 0x9c, 0xb8, 0x00, 0xe0, 0x18, 0x07, 0xcd, 0x0b, 0x69, 0xbf, 0x60, 0x24, 0x15, 0x94, 0xd2, - 0xbe, 0xfc, 0x7c, 0x4d, 0xdc, 0x7f, 0x01, 0x46, 0x55, 0x72, 0xc1, 0x1a, 0xcf, 0xd1, 0x26, 0x82, - 0xda, 0x2c, 0xc7, 0xc5, 0x58, 0xc7, 0x41, 0xeb, 0x30, 0x19, 0x72, 0x55, 0x8f, 0x8a, 0x5d, 0xc8, - 0x55, 0x66, 0x9f, 0x94, 0xe6, 0x1c, 0x75, 0x13, 0x7c, 0xc4, 0x8a, 0xf8, 0xd1, 0x21, 0xfd, 0x48, - 0x93, 0x24, 0xd0, 0x1b, 0x30, 0xd1, 0xd2, 0xd3, 0xf8, 0xd7, 0x84, 0x46, 0x4d, 0x59, 0x45, 0x1b, - 0x49, 0xfe, 0x6b, 0x38, 0x81, 0x4d, 0x19, 0x33, 0xbd, 0x44, 0xc4, 0xdb, 0x74, 0xbc, 0x2d, 0x12, - 0x8a, 0xd4, 0x68, 0x8c, 0x31, 0xbb, 0x95, 0x81, 0x83, 0x33, 0x6b, 0xa3, 0x57, 0x61, 0x4c, 0x7e, - 0xbe, 0xe6, 0x25, 0x1d, 0xdb, 0xde, 0x6b, 0x30, 0x6c, 0x60, 0xa2, 0xfb, 0x70, 0x4e, 0xfe, 0x5f, - 0x0f, 0x9c, 0xcd, 0x4d, 0xb7, 0x21, 0x9c, 0xd4, 0xb9, 0x03, 0xd2, 0x82, 0xf4, 0x68, 0x5a, 0x4e, - 0x43, 0x3a, 0x3a, 0x28, 0x5f, 0x16, 0xa3, 0x96, 0x0a, 0x67, 0x93, 0x98, 0x4e, 0x1f, 0xad, 0xc2, - 0x99, 0x6d, 0xe2, 0xb4, 0xa2, 0xed, 0xa5, 0x6d, 0xd2, 0xd8, 0x91, 0x9b, 0x88, 0xf9, 0x5e, 0x6b, - 0x16, 0xeb, 0x37, 0xba, 0x51, 0x70, 0x5a, 0x3d, 0xf4, 0x0e, 0xcc, 0xb4, 0x3b, 0x1b, 0x2d, 0x37, - 0xdc, 0x5e, 0xf3, 0x23, 0x66, 0x41, 0xa2, 0x72, 0xf3, 0x09, 0x27, 0x6d, 0xe5, 0x77, 0x5e, 0xcb, - 0xc0, 0xc3, 0x99, 0x14, 0xd0, 0xfb, 0x70, 0x2e, 0xb1, 0x18, 0x84, 0xcb, 0xe8, 0x44, 0x76, 0xf4, - 0xe2, 0x7a, 0x5a, 0x05, 0xe1, 0x02, 0x9a, 0x06, 0xc2, 0xe9, 0x4d, 0x7c, 0x30, 0xbb, 0xa2, 0xf7, - 0x68, 0x65, 0x8d, 0x29, 0x43, 0x5f, 0x82, 0x31, 0x7d, 0x15, 0x89, 0x0b, 0xe6, 0x4a, 0x3a, 0xcf, - 0xa2, 0xad, 0x36, 0xce, 0xd2, 0xa9, 0x15, 0xa5, 0xc3, 0xb0, 0x41, 0xd1, 0x26, 0x90, 0xfe, 0x7d, - 0xe8, 0x16, 0x14, 0x1b, 0x2d, 0x97, 0x78, 0x51, 0xb5, 0xd6, 0x2b, 0x84, 0xca, 0x92, 0xc0, 0x11, - 0x03, 0x26, 0xc2, 0xbd, 0xf2, 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x96, 0x83, 0x72, 0x9f, 0xd8, 0xc1, - 0x09, 0xf5, 0xb7, 0x35, 0x90, 0xfa, 0x7b, 0x41, 0x66, 0x1a, 0x5c, 0x4b, 0xe8, 0x04, 0x12, 0x59, - 0x04, 0x63, 0xcd, 0x40, 0x12, 0x7f, 0x60, 0x73, 0x64, 0x5d, 0x83, 0x5e, 0xe8, 0x6b, 0x50, 0x6f, - 0xbc, 0x9c, 0x0d, 0x0d, 0x2e, 0x88, 0x64, 0xbe, 0x82, 0xd8, 0x5f, 0xcf, 0xc1, 0x39, 0x35, 0x84, - 0x7f, 0x79, 0x07, 0xee, 0x4e, 0xf7, 0xc0, 0x9d, 0xc0, 0x1b, 0x92, 0x7d, 0x1b, 0x86, 0x79, 0x08, - 0x9a, 0x01, 0x18, 0xa0, 0xa7, 0xcc, 0x78, 0x65, 0xea, 0x9a, 0x36, 0x62, 0x96, 0xfd, 0xff, 0x16, - 0x4c, 0xae, 0x2f, 0xd5, 0xea, 0x7e, 0x63, 0x87, 0x44, 0x0b, 0x9c, 0x61, 0xc5, 0x82, 0xff, 0xb1, - 0x1e, 0x92, 0xaf, 0x49, 0xe3, 0x98, 0x2e, 0x43, 0x61, 0xdb, 0x0f, 0xa3, 0xe4, 0x03, 0xf3, 0x0d, - 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xbf, 0x63, 0xc1, 0x10, 0xcb, 0x8f, 0xdb, 0x2f, 0x69, 0xf3, 0x20, - 0xdf, 0x85, 0x5e, 0x86, 0x61, 0xb2, 0xb9, 0x49, 0x1a, 0x91, 0x98, 0x55, 0xe9, 0x25, 0x3b, 0xbc, - 0xcc, 0x4a, 0xe9, 0xa5, 0xcf, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xf7, 0xa0, 0x14, 0xb9, 0xbb, - 0x64, 0xa1, 0xd9, 0x14, 0x4f, 0x74, 0x0f, 0xe1, 0x94, 0xbc, 0x2e, 0x09, 0xe0, 0x98, 0x96, 0xfd, - 0xd5, 0x1c, 0x40, 0x1c, 0xd9, 0xa0, 0xdf, 0x27, 0x2e, 0x76, 0x3d, 0xde, 0x5c, 0x49, 0x79, 0xbc, - 0x41, 0x31, 0xc1, 0x94, 0x97, 0x1b, 0x35, 0x4c, 0xf9, 0x81, 0x86, 0xa9, 0x70, 0x9c, 0x61, 0x5a, - 0x82, 0xe9, 0x38, 0x32, 0x83, 0x19, 0xa6, 0x86, 0x09, 0x29, 0xeb, 0x49, 0x20, 0xee, 0xc6, 0xb7, - 0x09, 0x5c, 0x96, 0xf1, 0x49, 0xe5, 0x5d, 0xc3, 0x2c, 0x40, 0x8f, 0x91, 0xbf, 0x3b, 0x7e, 0x9d, - 0xca, 0x65, 0xbe, 0x4e, 0xfd, 0xb8, 0x05, 0x67, 0x93, 0xed, 0x30, 0x97, 0xbc, 0xaf, 0x58, 0x70, - 0x8e, 0xbd, 0xd1, 0xb1, 0x56, 0xbb, 0x5f, 0x04, 0x5f, 0x4a, 0x8f, 0x58, 0xd1, 0xbb, 0xc7, 0xb1, - 0x3b, 0xf6, 0x6a, 0x1a, 0x69, 0x9c, 0xde, 0xa2, 0xfd, 0x15, 0x0b, 0xce, 0x67, 0xa6, 0x65, 0x42, - 0x57, 0xa1, 0xe8, 0xb4, 0x5d, 0xae, 0x00, 0x13, 0xfb, 0x9d, 0x49, 0x8f, 0xb5, 0x2a, 0x57, 0x7f, - 0x29, 0xa8, 0x4a, 0x17, 0x99, 0xcb, 0x4c, 0x17, 0xd9, 0x37, 0xfb, 0xa3, 0xfd, 0xfd, 0x16, 0x08, - 0x2f, 0xac, 0x01, 0x0e, 0x99, 0xb7, 0x65, 0xb6, 0x5d, 0x23, 0x34, 0xfc, 0xe5, 0x6c, 0xb7, 0x34, - 0x11, 0x10, 0x5e, 0x5d, 0xea, 0x46, 0x18, 0x78, 0x83, 0x96, 0xdd, 0x04, 0x01, 0xad, 0x10, 0xa6, - 0xb3, 0xea, 0xdf, 0x9b, 0x6b, 0x00, 0x4d, 0x86, 0xab, 0xe5, 0xdc, 0x54, 0x57, 0x48, 0x45, 0x41, - 0xb0, 0x86, 0x65, 0xff, 0xdb, 0x1c, 0x8c, 0xca, 0x50, 0xe4, 0x1d, 0x6f, 0x10, 0xc9, 0xf2, 0x58, - 0xb9, 0x89, 0x58, 0x92, 0x5a, 0x4a, 0xb8, 0x16, 0x0b, 0xe4, 0x71, 0x92, 0x5a, 0x09, 0xc0, 0x31, - 0x0e, 0x7a, 0x06, 0x46, 0xc2, 0xce, 0x06, 0x43, 0x4f, 0xf8, 0x0c, 0xd5, 0x79, 0x31, 0x96, 0x70, - 0xf4, 0x39, 0x98, 0xe2, 0xf5, 0x02, 0xbf, 0xed, 0x6c, 0x71, 0x6d, 0xeb, 0x90, 0x72, 0xf6, 0x9d, - 0x5a, 0x4d, 0xc0, 0x8e, 0x0e, 0xca, 0x67, 0x93, 0x65, 0x4c, 0x4f, 0xdf, 0x45, 0x85, 0xbd, 0xfd, - 0xf3, 0x46, 0xe8, 0x32, 0xed, 0x32, 0x19, 0x88, 0x41, 0x58, 0xc7, 0xb3, 0xbf, 0x04, 0xa8, 0x3b, - 0x28, 0x3b, 0x7a, 0x93, 0x1b, 0x7c, 0xb9, 0x01, 0x69, 0xf6, 0xd2, 0xdb, 0xeb, 0x2e, 0xad, 0xd2, - 0xdc, 0x9f, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0x57, 0xf3, 0x30, 0x95, 0x74, 0x70, 0x44, 0x37, 0x60, - 0x98, 0xdf, 0x91, 0x82, 0x7c, 0x8f, 0x67, 0x61, 0xcd, 0x2d, 0x92, 0x9d, 0x16, 0xe2, 0x9a, 0x15, - 0xf5, 0xd1, 0x3b, 0x30, 0xda, 0xf4, 0xef, 0x7b, 0xf7, 0x9d, 0xa0, 0xb9, 0x50, 0xab, 0x8a, 0xe5, - 0x9c, 0xca, 0x6a, 0x57, 0x62, 0x34, 0xdd, 0xd5, 0x92, 0x3d, 0x81, 0xc4, 0x20, 0xac, 0x93, 0x43, - 0xeb, 0x2c, 0xd0, 0xe4, 0xa6, 0xbb, 0xb5, 0xea, 0xb4, 0x7b, 0x59, 0xff, 0x2e, 0x49, 0x24, 0x8d, - 0xf2, 0xb8, 0x88, 0x46, 0xc9, 0x01, 0x38, 0x26, 0x84, 0xbe, 0x1b, 0xce, 0x84, 0x19, 0xda, 0xb9, - 0xac, 0x1c, 0x1d, 0xbd, 0x14, 0x56, 0x8b, 0x8f, 0x53, 0x21, 0x28, 0x4d, 0x8f, 0x97, 0xd6, 0x8c, - 0xfd, 0xeb, 0x67, 0xc0, 0xd8, 0xc4, 0x46, 0xca, 0x26, 0xeb, 0x84, 0x52, 0x36, 0x61, 0x28, 0x92, - 0xdd, 0x76, 0xb4, 0x5f, 0x71, 0x83, 0x5e, 0x29, 0x05, 0x97, 0x05, 0x4e, 0x37, 0x4d, 0x09, 0xc1, - 0x8a, 0x4e, 0x7a, 0x5e, 0xad, 0xfc, 0x87, 0x98, 0x57, 0xab, 0x70, 0x8a, 0x79, 0xb5, 0xd6, 0x60, - 0x64, 0xcb, 0x8d, 0x30, 0x69, 0xfb, 0x82, 0x3b, 0x4d, 0x5d, 0x87, 0xd7, 0x39, 0x4a, 0x77, 0x06, - 0x17, 0x01, 0xc0, 0x92, 0x08, 0x7a, 0x53, 0xed, 0xc0, 0xe1, 0x6c, 0xe1, 0xae, 0xfb, 0xfd, 0x32, - 0x75, 0x0f, 0x8a, 0xec, 0x59, 0x23, 0x0f, 0x9b, 0x3d, 0x6b, 0x45, 0xe6, 0xbc, 0x2a, 0x66, 0x9b, - 0xea, 0xb3, 0x94, 0x56, 0x7d, 0x32, 0x5d, 0xdd, 0xd5, 0xf3, 0x84, 0x95, 0xb2, 0x4f, 0x02, 0x95, - 0x02, 0x6c, 0xc0, 0xec, 0x60, 0xdf, 0x6f, 0xc1, 0xb9, 0x76, 0x5a, 0xca, 0x3c, 0xf1, 0xd6, 0xf4, - 0xf2, 0xc0, 0x39, 0x01, 0x8d, 0x06, 0x99, 0x94, 0x9f, 0x8a, 0x86, 0xd3, 0x9b, 0xa3, 0x03, 0x1d, - 0x6c, 0x34, 0x45, 0x7a, 0xab, 0xa7, 0x32, 0xd2, 0x8c, 0xf5, 0x48, 0x2e, 0xb6, 0x9e, 0x92, 0xd2, - 0xea, 0xe3, 0x59, 0x29, 0xad, 0x06, 0x4e, 0x64, 0xf5, 0xa6, 0x4a, 0x30, 0x36, 0x9e, 0xbd, 0x94, - 0x78, 0xfa, 0xb0, 0xbe, 0x69, 0xc5, 0xde, 0x54, 0x69, 0xc5, 0x7a, 0x44, 0xc0, 0xe3, 0x49, 0xc3, - 0xfa, 0x26, 0x13, 0xd3, 0x12, 0x82, 0x4d, 0x9e, 0x4c, 0x42, 0x30, 0xe3, 0xaa, 0xe1, 0x39, 0xa9, - 0x9e, 0xed, 0x73, 0xd5, 0x18, 0x74, 0x7b, 0x5f, 0x36, 0x3c, 0xf9, 0xd9, 0xf4, 0x43, 0x25, 0x3f, - 0xbb, 0xab, 0x27, 0x13, 0x43, 0x7d, 0xb2, 0x65, 0x51, 0xa4, 0x01, 0x53, 0x88, 0xdd, 0xd5, 0x2f, - 0xc0, 0x33, 0xd9, 0x74, 0xd5, 0x3d, 0xd7, 0x4d, 0x37, 0xf5, 0x0a, 0xec, 0x4a, 0x4d, 0x76, 0xf6, - 0x74, 0x52, 0x93, 0x9d, 0x3b, 0xf1, 0xd4, 0x64, 0x8f, 0x9d, 0x42, 0x6a, 0xb2, 0xc7, 0x3f, 0xd4, - 0xd4, 0x64, 0x33, 0x8f, 0x20, 0x35, 0xd9, 0x5a, 0x9c, 0x9a, 0xec, 0x7c, 0xf6, 0x94, 0xa4, 0xd8, - 0x0f, 0x67, 0x24, 0x24, 0xbb, 0xcb, 0x8c, 0x08, 0x78, 0x04, 0x0e, 0x11, 0xa2, 0x2f, 0x3d, 0x0d, - 0x73, 0x5a, 0x98, 0x0e, 0x3e, 0x25, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0x9c, 0xa0, 0xec, 0x89, - 0x1e, 0x7a, 0xdc, 0x34, 0x0d, 0x59, 0x8f, 0xb4, 0x64, 0x6f, 0xf0, 0xb4, 0x64, 0x17, 0xb2, 0x4f, - 0xf2, 0xe4, 0x75, 0x67, 0x26, 0x23, 0xfb, 0x81, 0x1c, 0x5c, 0xea, 0xbd, 0x2f, 0x62, 0xf5, 0x5c, - 0x2d, 0x7e, 0x4e, 0x4a, 0xa8, 0xe7, 0xb8, 0x6c, 0x15, 0x63, 0x0d, 0x1c, 0xe6, 0xe8, 0x3a, 0x4c, - 0x2b, 0xc3, 0xe3, 0x96, 0xdb, 0xd8, 0xd7, 0xd2, 0x3b, 0x2b, 0x07, 0xcb, 0x7a, 0x12, 0x01, 0x77, - 0xd7, 0x41, 0x0b, 0x30, 0x69, 0x14, 0x56, 0x2b, 0x42, 0x86, 0x52, 0xfa, 0xc0, 0xba, 0x09, 0xc6, - 0x49, 0x7c, 0xfb, 0xa7, 0x2d, 0x78, 0x3c, 0x23, 0xeb, 0xc7, 0xc0, 0x51, 0x7c, 0x36, 0x61, 0xb2, - 0x6d, 0x56, 0xed, 0x13, 0xec, 0xcb, 0xc8, 0x2d, 0xa2, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xbb, - 0x0c, 0x17, 0x7b, 0xdb, 0xa0, 0x5c, 0xfd, 0xcd, 0xdf, 0xbb, 0xf4, 0xb1, 0xdf, 0xfa, 0xbd, 0x4b, - 0x1f, 0xfb, 0xed, 0xdf, 0xbb, 0xf4, 0xb1, 0xff, 0xe7, 0xf0, 0x92, 0xf5, 0x9b, 0x87, 0x97, 0xac, - 0xdf, 0x3a, 0xbc, 0x64, 0xfd, 0xf6, 0xe1, 0x25, 0xeb, 0x77, 0x0f, 0x2f, 0x59, 0x5f, 0xfd, 0xfd, - 0x4b, 0x1f, 0x7b, 0x3b, 0xb7, 0xf7, 0xc2, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x46, 0x7c, - 0x9b, 0xb6, 0xeb, 0x00, 0x00, + // 13009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x90, 0x24, 0x47, + 0x56, 0xd8, 0x55, 0xf7, 0x7c, 0x74, 0xbf, 0xf9, 0xce, 0xdd, 0x95, 0x66, 0x47, 0xbb, 0xdb, 0xab, + 0xd2, 0xdd, 0x6a, 0x75, 0x92, 0x66, 0x4f, 0x2b, 0xe9, 0x4e, 0x9c, 0x74, 0x82, 0x99, 0xe9, 0x99, + 0xdd, 0xd6, 0xee, 0xcc, 0xb6, 0xb2, 0x67, 0x77, 0xef, 0x84, 0xee, 0xb8, 0x9a, 0xae, 0x9c, 0x99, + 0xd2, 0xf4, 0x54, 0xb5, 0xaa, 0xaa, 0x67, 0x77, 0x64, 0x08, 0xe3, 0xc3, 0x60, 0x2e, 0xc0, 0x8e, + 0x0b, 0xfb, 0xc2, 0x1f, 0x40, 0xe0, 0x08, 0x8c, 0x03, 0x30, 0xd8, 0x61, 0x0c, 0x06, 0xcc, 0x61, + 0x1b, 0x83, 0x1d, 0x81, 0xfd, 0xe3, 0x8c, 0x1d, 0xe1, 0x38, 0x22, 0x08, 0x8f, 0x61, 0x71, 0x98, + 0xe0, 0x87, 0xc1, 0x61, 0xfc, 0x87, 0x31, 0x61, 0x1c, 0xf9, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0x3d, + 0xab, 0xd9, 0x91, 0x20, 0xee, 0x5f, 0x77, 0xbe, 0x97, 0x2f, 0xb3, 0xf2, 0xf3, 0xbd, 0x97, 0xef, + 0x03, 0x5e, 0xdd, 0x79, 0x25, 0x9a, 0xf7, 0x82, 0x2b, 0x3b, 0x9d, 0x0d, 0x12, 0xfa, 0x24, 0x26, + 0xd1, 0x95, 0x3d, 0xe2, 0xbb, 0x41, 0x78, 0x45, 0x00, 0x9c, 0xb6, 0x77, 0xa5, 0x19, 0x84, 0xe4, + 0xca, 0xde, 0x0b, 0x57, 0xb6, 0x88, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xef, 0xb4, 0xbd, 0x79, 0x8a, 0x33, 0xbf, 0xf7, 0xc2, 0xdc, 0xf3, 0x5b, 0x5e, + 0xbc, 0xdd, 0xd9, 0x98, 0x6f, 0x06, 0xbb, 0x57, 0xb6, 0x82, 0xad, 0xe0, 0x0a, 0x43, 0xdd, 0xe8, + 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbd, 0x94, 0x34, 0xb3, 0xeb, 0x34, 0xb7, + 0x3d, 0x9f, 0x84, 0xfb, 0x57, 0xda, 0x3b, 0x5b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x27, 0x6c, 0x92, + 0x74, 0xc3, 0x3d, 0x6b, 0x45, 0x57, 0x76, 0x49, 0xec, 0x64, 0x74, 0x77, 0xee, 0x4a, 0x5e, 0xad, + 0xb0, 0xe3, 0xc7, 0xde, 0x6e, 0x77, 0x33, 0x9f, 0xec, 0x57, 0x21, 0x6a, 0x6e, 0x93, 0x5d, 0xa7, + 0xab, 0xde, 0x8b, 0x79, 0xf5, 0x3a, 0xb1, 0xd7, 0xba, 0xe2, 0xf9, 0x71, 0x14, 0x87, 0xe9, 0x4a, + 0xf6, 0x37, 0x2c, 0xb8, 0xb8, 0x70, 0xb7, 0xb1, 0xdc, 0x72, 0xa2, 0xd8, 0x6b, 0x2e, 0xb6, 0x82, + 0xe6, 0x4e, 0x23, 0x0e, 0x42, 0x72, 0x27, 0x68, 0x75, 0x76, 0x49, 0x83, 0x0d, 0x04, 0x7a, 0x0e, + 0x4a, 0x7b, 0xec, 0x7f, 0xad, 0x3a, 0x6b, 0x5d, 0xb4, 0x2e, 0x97, 0x17, 0xa7, 0x7f, 0xe3, 0xa0, + 0xf2, 0x91, 0x07, 0x07, 0x95, 0xd2, 0x1d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x23, 0x9b, 0xd1, + 0xfa, 0x7e, 0x9b, 0xcc, 0x16, 0x18, 0xee, 0xa4, 0xc0, 0x1d, 0x59, 0x69, 0xd0, 0x52, 0x2c, 0xa0, + 0xe8, 0x0a, 0x94, 0xdb, 0x4e, 0x18, 0x7b, 0xb1, 0x17, 0xf8, 0xb3, 0xc5, 0x8b, 0xd6, 0xe5, 0xe1, + 0xc5, 0x19, 0x81, 0x5a, 0xae, 0x4b, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0xb8, 0xb7, 0xfc, + 0xd6, 0xfe, 0xec, 0xd0, 0x45, 0xeb, 0x72, 0x29, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, + 0x54, 0x80, 0xd2, 0xc2, 0xe6, 0xa6, 0xe7, 0x7b, 0xf1, 0x3e, 0xba, 0x03, 0xe3, 0x7e, 0xe0, 0x12, + 0xf9, 0x9f, 0x7d, 0xc5, 0xd8, 0xd5, 0x8b, 0xf3, 0xdd, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, + 0x7e, 0x70, 0x50, 0x19, 0xd7, 0x4b, 0xb0, 0x41, 0x07, 0x61, 0x18, 0x6b, 0x07, 0xae, 0x22, 0x5b, + 0x60, 0x64, 0x2b, 0x59, 0x64, 0xeb, 0x09, 0xda, 0xe2, 0xd4, 0x83, 0x83, 0xca, 0x98, 0x56, 0x80, + 0x75, 0x22, 0x68, 0x03, 0xa6, 0xe8, 0x5f, 0x3f, 0xf6, 0x14, 0xdd, 0x22, 0xa3, 0xfb, 0x54, 0x1e, + 0x5d, 0x0d, 0x75, 0xf1, 0xd4, 0x83, 0x83, 0xca, 0x54, 0xaa, 0x10, 0xa7, 0x09, 0xda, 0xef, 0xc1, + 0xe4, 0x42, 0x1c, 0x3b, 0xcd, 0x6d, 0xe2, 0xf2, 0x19, 0x44, 0x2f, 0xc1, 0x90, 0xef, 0xec, 0x12, + 0x31, 0xbf, 0x17, 0xc5, 0xc0, 0x0e, 0xad, 0x39, 0xbb, 0xe4, 0xf0, 0xa0, 0x32, 0x7d, 0xdb, 0xf7, + 0xde, 0xed, 0x88, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0x57, 0x01, 0x5c, 0xb2, 0xe7, 0x35, 0x49, + 0xdd, 0x89, 0xb7, 0xc5, 0x7c, 0x23, 0x51, 0x17, 0xaa, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x3e, 0x94, + 0x17, 0xf6, 0x02, 0xcf, 0xad, 0x07, 0x6e, 0x84, 0x76, 0x60, 0xaa, 0x1d, 0x92, 0x4d, 0x12, 0xaa, + 0xa2, 0x59, 0xeb, 0x62, 0xf1, 0xf2, 0xd8, 0xd5, 0xcb, 0x99, 0x1f, 0x6b, 0xa2, 0x2e, 0xfb, 0x71, + 0xb8, 0xbf, 0xf8, 0xb8, 0x68, 0x6f, 0x2a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2d, 0xc0, 0x99, + 0x85, 0xf7, 0x3a, 0x21, 0xa9, 0x7a, 0xd1, 0x4e, 0x7a, 0x85, 0xbb, 0x5e, 0xb4, 0xb3, 0x96, 0x8c, + 0x80, 0x5a, 0x5a, 0x55, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x1e, 0x46, 0xe9, 0xef, 0xdb, 0xb8, 0x26, + 0x3e, 0xf9, 0x94, 0x40, 0x1e, 0xab, 0x3a, 0xb1, 0x53, 0xe5, 0x20, 0x2c, 0x71, 0xd0, 0x2a, 0x8c, + 0x35, 0xd9, 0x86, 0xdc, 0x5a, 0x0d, 0x5c, 0xc2, 0x26, 0xb3, 0xbc, 0xf8, 0x2c, 0x45, 0x5f, 0x4a, + 0x8a, 0x0f, 0x0f, 0x2a, 0xb3, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, 0xfd, + 0x35, 0xc4, 0x28, 0x41, 0xc6, 0xde, 0xba, 0xac, 0x6d, 0x95, 0x61, 0xb6, 0x55, 0xc6, 0xb3, 0xb7, + 0x09, 0x7a, 0x01, 0x86, 0x76, 0x3c, 0xdf, 0x9d, 0x1d, 0x61, 0xb4, 0xce, 0xd3, 0x39, 0xbf, 0xe1, + 0xf9, 0xee, 0xe1, 0x41, 0x65, 0xc6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x63, 0x0b, 0x2a, + 0x0c, 0xb6, 0xe2, 0xb5, 0x48, 0x9d, 0x84, 0x91, 0x17, 0xc5, 0xc4, 0x8f, 0x8d, 0x01, 0xbd, 0x0a, + 0x10, 0x91, 0x66, 0x48, 0x62, 0x6d, 0x48, 0xd5, 0xc2, 0x68, 0x28, 0x08, 0xd6, 0xb0, 0xe8, 0x81, + 0x10, 0x6d, 0x3b, 0x21, 0x5b, 0x5f, 0x62, 0x60, 0xd5, 0x81, 0xd0, 0x90, 0x00, 0x9c, 0xe0, 0x18, + 0x07, 0x42, 0xb1, 0xdf, 0x81, 0x80, 0x3e, 0x03, 0x53, 0x49, 0x63, 0x51, 0xdb, 0x69, 0xca, 0x01, + 0x64, 0x5b, 0xa6, 0x61, 0x82, 0x70, 0x1a, 0xd7, 0xfe, 0x47, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0x3f, + 0xe4, 0xdf, 0x6a, 0xff, 0x92, 0x05, 0xa3, 0x8b, 0x9e, 0xef, 0x7a, 0xfe, 0x16, 0xfa, 0x22, 0x94, + 0xe8, 0xdd, 0xe4, 0x3a, 0xb1, 0x23, 0xce, 0xbd, 0x4f, 0x68, 0x7b, 0x4b, 0x5d, 0x15, 0xf3, 0xed, + 0x9d, 0x2d, 0x5a, 0x10, 0xcd, 0x53, 0x6c, 0xba, 0xdb, 0x6e, 0x6d, 0xbc, 0x43, 0x9a, 0xf1, 0x2a, + 0x89, 0x9d, 0xe4, 0x73, 0x92, 0x32, 0xac, 0xa8, 0xa2, 0x1b, 0x30, 0x12, 0x3b, 0xe1, 0x16, 0x89, + 0xc5, 0x01, 0x98, 0x79, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x37, 0x49, 0x72, 0x2d, 0xac, + 0xb3, 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xfa, 0x28, 0x9c, 0x5d, 0x6a, 0xd4, 0x72, 0xd6, 0xd5, 0x25, + 0x18, 0x71, 0x43, 0x6f, 0x8f, 0x84, 0x62, 0x9c, 0x15, 0x95, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, 0x5e, + 0x81, 0x71, 0x7e, 0x21, 0x5d, 0x77, 0x7c, 0xb7, 0x25, 0x87, 0xf8, 0xb4, 0xc0, 0x1e, 0xbf, 0xa3, + 0xc1, 0xb0, 0x81, 0x79, 0xc4, 0x45, 0x75, 0x29, 0xb5, 0x19, 0xf3, 0x2e, 0xbb, 0x2f, 0x5b, 0x30, + 0xcd, 0x9b, 0x59, 0x88, 0xe3, 0xd0, 0xdb, 0xe8, 0xc4, 0x24, 0x9a, 0x1d, 0x66, 0x27, 0xdd, 0x52, + 0xd6, 0x68, 0xe5, 0x8e, 0xc0, 0xfc, 0x9d, 0x14, 0x15, 0x7e, 0x08, 0xce, 0x8a, 0x76, 0xa7, 0xd3, + 0x60, 0xdc, 0xd5, 0x2c, 0xfa, 0x1e, 0x0b, 0xe6, 0x9a, 0x81, 0x1f, 0x87, 0x41, 0xab, 0x45, 0xc2, + 0x7a, 0x67, 0xa3, 0xe5, 0x45, 0xdb, 0x7c, 0x9d, 0x62, 0xb2, 0xc9, 0x4e, 0x82, 0x9c, 0x39, 0x54, + 0x48, 0x62, 0x0e, 0x2f, 0x3c, 0x38, 0xa8, 0xcc, 0x2d, 0xe5, 0x92, 0xc2, 0x3d, 0x9a, 0x41, 0x3b, + 0x80, 0xe8, 0x55, 0xda, 0x88, 0x9d, 0x2d, 0x92, 0x34, 0x3e, 0x3a, 0x78, 0xe3, 0x8f, 0x3d, 0x38, + 0xa8, 0xa0, 0xb5, 0x2e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x4e, 0xd3, 0xd2, 0xae, 0x6f, 0x2d, + 0x0d, 0xde, 0xdc, 0xec, 0x83, 0x83, 0xca, 0xe9, 0xb5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x6e, + 0x0b, 0xce, 0x26, 0x9f, 0xbf, 0x7c, 0xbf, 0xed, 0xf8, 0x6e, 0xd2, 0x70, 0x79, 0xf0, 0x86, 0xe9, + 0x99, 0x7c, 0x76, 0x29, 0x8f, 0x12, 0xce, 0x6f, 0x64, 0x6e, 0x09, 0xce, 0x64, 0xae, 0x16, 0x34, + 0x0d, 0xc5, 0x1d, 0xc2, 0xb9, 0xa0, 0x32, 0xa6, 0x3f, 0xd1, 0x69, 0x18, 0xde, 0x73, 0x5a, 0x1d, + 0xb1, 0x51, 0x30, 0xff, 0xf3, 0xe9, 0xc2, 0x2b, 0x96, 0xfd, 0xef, 0x8a, 0x30, 0xb5, 0xd4, 0xa8, + 0x3d, 0xd4, 0x2e, 0xd4, 0xaf, 0xa1, 0x42, 0xcf, 0x6b, 0x28, 0xb9, 0xd4, 0x8a, 0xb9, 0x97, 0xda, + 0x5f, 0xce, 0xd8, 0x42, 0x43, 0x6c, 0x0b, 0x7d, 0x4b, 0xce, 0x16, 0x3a, 0xe6, 0x8d, 0xb3, 0x97, + 0xb3, 0x8a, 0x86, 0xd9, 0x64, 0x66, 0x72, 0x2c, 0x37, 0x83, 0xa6, 0xd3, 0x4a, 0x1f, 0x7d, 0x47, + 0x5c, 0x4a, 0xc7, 0x33, 0x8f, 0x4d, 0x18, 0x5f, 0x72, 0xda, 0xce, 0x86, 0xd7, 0xf2, 0x62, 0x8f, + 0x44, 0xe8, 0x69, 0x28, 0x3a, 0xae, 0xcb, 0xb8, 0xad, 0xf2, 0xe2, 0x99, 0x07, 0x07, 0x95, 0xe2, + 0x82, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0xfb, 0x98, 0x62, 0xa0, 0x8f, 0xc3, 0x90, 0x1b, 0x06, 0xed, + 0xd9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa8, 0x1a, 0x06, 0xed, 0x14, 0x2a, 0xc3, 0xb1, 0x7f, 0xb5, + 0x00, 0xe7, 0x96, 0x48, 0x7b, 0x7b, 0xa5, 0x91, 0x73, 0x7e, 0x5f, 0x86, 0xd2, 0x6e, 0xe0, 0x7b, + 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x45, 0x19, 0x56, 0x50, 0x74, 0x11, 0x86, 0xda, + 0x09, 0x53, 0x39, 0x2e, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0xe8, 0x44, 0x24, 0x14, 0x2b, + 0x46, 0x61, 0xdc, 0x8e, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e, 0xe8, 0xd4, + 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x75, 0x28, 0x47, 0xa9, 0x99, 0x1d, 0x68, 0x9b, 0x4e, 0xb0, + 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x46, 0xfa, 0x5e, 0xdd, 0x5f, 0x2b, 0x00, 0xe2, + 0x43, 0xf8, 0xe7, 0x6c, 0xe0, 0x6e, 0x77, 0x0f, 0xdc, 0xe0, 0x5b, 0xe2, 0xb8, 0x46, 0xef, 0xff, + 0x58, 0x70, 0x6e, 0xc9, 0xf3, 0x5d, 0x12, 0xe6, 0x2c, 0xc0, 0x47, 0x23, 0xcb, 0x1e, 0x8d, 0x69, + 0x30, 0x96, 0xd8, 0xd0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x74, 0x1f, + 0x7b, 0xbb, 0xfb, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0x37, 0x61, 0x72, 0xa9, 0xe5, 0x11, 0x3f, 0xae, + 0xd5, 0x97, 0x02, 0x7f, 0xd3, 0xdb, 0x42, 0x9f, 0x86, 0xc9, 0xd8, 0xdb, 0x25, 0x41, 0x27, 0x6e, + 0x90, 0x66, 0xe0, 0x33, 0x49, 0xd2, 0xba, 0x3c, 0xbc, 0x88, 0x1e, 0x1c, 0x54, 0x26, 0xd7, 0x0d, + 0x08, 0x4e, 0x61, 0xda, 0xbf, 0x4d, 0xc7, 0x2f, 0xd8, 0x6d, 0x07, 0x3e, 0xf1, 0xe3, 0xa5, 0xc0, + 0x77, 0xb9, 0xc6, 0xe1, 0xd3, 0x30, 0x14, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x49, 0x6e, 0x14, 0x3a, + 0x0a, 0x87, 0x07, 0x95, 0xc7, 0xba, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0xb7, 0xc0, 0x48, 0x14, + 0x3b, 0x71, 0x27, 0x12, 0xa3, 0xf9, 0xa4, 0x1c, 0xcd, 0x06, 0x2b, 0x3d, 0x3c, 0xa8, 0x4c, 0xa9, + 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x19, 0x18, 0xdd, 0x25, 0x51, 0xe4, 0x6c, 0xc9, 0xdb, 0x70, + 0x4a, 0xd4, 0x1d, 0x5d, 0xe5, 0xc5, 0x58, 0xc2, 0xd1, 0x53, 0x30, 0x4c, 0xc2, 0x30, 0x08, 0xc5, + 0x1e, 0x9d, 0x10, 0x88, 0xc3, 0xcb, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2d, 0x98, 0x52, 0x7d, + 0xe5, 0x6d, 0x9d, 0x80, 0x54, 0xf0, 0x16, 0x40, 0x53, 0x7e, 0x60, 0xc4, 0x6e, 0x8f, 0xb1, 0xab, + 0x97, 0x32, 0x2f, 0xea, 0xae, 0x61, 0x4c, 0x28, 0xab, 0xa2, 0x08, 0x6b, 0xd4, 0xec, 0x7f, 0x69, + 0xc1, 0xa9, 0xd4, 0x17, 0xdd, 0xf4, 0xa2, 0x18, 0xbd, 0xdd, 0xf5, 0x55, 0xf3, 0x83, 0x7d, 0x15, + 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x75, 0x18, 0xf6, 0x62, 0xb2, 0x2b, + 0x3f, 0xe6, 0xa9, 0x9e, 0x1f, 0xc3, 0x7b, 0x95, 0xcc, 0x48, 0x8d, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, + 0xb7, 0x8a, 0x50, 0xe6, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc0, 0x5c, 0xd4, 0x60, 0x88, 0x51, 0xe7, + 0x1d, 0x7f, 0x3a, 0xbb, 0xe3, 0xa2, 0x3b, 0xf3, 0x54, 0xe4, 0xe7, 0xcc, 0x91, 0xba, 0x1a, 0x68, + 0x11, 0x66, 0x24, 0x90, 0x03, 0xb0, 0xe1, 0xf9, 0x4e, 0xb8, 0x4f, 0xcb, 0x66, 0x8b, 0x8c, 0xe0, + 0xf3, 0xbd, 0x09, 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x01, 0x60, 0x8d, 0xe8, 0xdc, 0xa7, + 0xa0, 0xac, 0x90, 0x8f, 0xc2, 0xe3, 0xcc, 0x7d, 0x06, 0xa6, 0x52, 0x6d, 0xf5, 0xab, 0x3e, 0xae, + 0xb3, 0x48, 0xbf, 0xcc, 0x4e, 0x01, 0xd1, 0xeb, 0x65, 0x7f, 0x4f, 0x9c, 0xa2, 0xef, 0xc1, 0xe9, + 0x56, 0xc6, 0xe1, 0x24, 0xa6, 0x6a, 0xf0, 0xc3, 0xec, 0x9c, 0xf8, 0xec, 0xd3, 0x59, 0x50, 0x9c, + 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x36, 0x5d, 0xf3, 0x4e, 0x4b, 0xe7, 0xa0, 0x6f, 0x89, 0x32, 0xac, + 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x83, 0xec, 0x37, 0x48, 0x8b, 0x34, 0xe3, 0x20, 0xfc, + 0x40, 0xbb, 0x7f, 0x9e, 0x8f, 0x3e, 0x3f, 0x01, 0xc7, 0x04, 0x81, 0xe2, 0x0d, 0xb2, 0xcf, 0xa7, + 0x42, 0xff, 0xba, 0x62, 0xcf, 0xaf, 0xfb, 0x59, 0x0b, 0x26, 0xd4, 0xd7, 0x9d, 0xc0, 0x56, 0x5f, + 0x34, 0xb7, 0xfa, 0xf9, 0x9e, 0x0b, 0x3c, 0x67, 0x93, 0x7f, 0xad, 0x00, 0x67, 0x15, 0x0e, 0x65, + 0xf7, 0xf9, 0x1f, 0xb1, 0xaa, 0xae, 0x40, 0xd9, 0x57, 0x8a, 0x28, 0xcb, 0xd4, 0x00, 0x25, 0x6a, + 0xa8, 0x04, 0x87, 0x72, 0x6d, 0x7e, 0xa2, 0x2d, 0x1a, 0xd7, 0x35, 0xb4, 0x42, 0x1b, 0xbb, 0x08, + 0xc5, 0x8e, 0xe7, 0x8a, 0x3b, 0xe3, 0x13, 0x72, 0xb4, 0x6f, 0xd7, 0xaa, 0x87, 0x07, 0x95, 0x27, + 0xf3, 0x5e, 0x07, 0xe8, 0x65, 0x15, 0xcd, 0xdf, 0xae, 0x55, 0x31, 0xad, 0x8c, 0x16, 0x60, 0x4a, + 0x3e, 0x80, 0xdc, 0xa1, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0xa9, 0x59, 0xb1, 0x09, 0xc6, 0x69, + 0x7c, 0x54, 0x85, 0xe9, 0x9d, 0xce, 0x06, 0x69, 0x91, 0x98, 0x7f, 0xf0, 0x0d, 0xc2, 0x95, 0x90, + 0xe5, 0x44, 0xd8, 0xba, 0x91, 0x82, 0xe3, 0xae, 0x1a, 0xf6, 0x9f, 0xb1, 0x23, 0x5e, 0x8c, 0x5e, + 0x3d, 0x0c, 0xe8, 0xc2, 0xa2, 0xd4, 0x3f, 0xc8, 0xe5, 0x3c, 0xc8, 0xaa, 0xb8, 0x41, 0xf6, 0xd7, + 0x03, 0xca, 0x6c, 0x67, 0xaf, 0x0a, 0x63, 0xcd, 0x0f, 0xf5, 0x5c, 0xf3, 0x3f, 0x5f, 0x80, 0x33, + 0x6a, 0x04, 0x0c, 0xbe, 0xee, 0xcf, 0xfb, 0x18, 0xbc, 0x00, 0x63, 0x2e, 0xd9, 0x74, 0x3a, 0xad, + 0x58, 0x69, 0xc4, 0x87, 0xf9, 0xab, 0x48, 0x35, 0x29, 0xc6, 0x3a, 0xce, 0x11, 0x86, 0xed, 0xc7, + 0xc7, 0xd8, 0xdd, 0x1a, 0x3b, 0x74, 0x8d, 0xab, 0x5d, 0x63, 0xe5, 0xee, 0x9a, 0xa7, 0x60, 0xd8, + 0xdb, 0xa5, 0xbc, 0x56, 0xc1, 0x64, 0xa1, 0x6a, 0xb4, 0x10, 0x73, 0x18, 0xfa, 0x18, 0x8c, 0x36, + 0x83, 0xdd, 0x5d, 0xc7, 0x77, 0xd9, 0x95, 0x57, 0x5e, 0x1c, 0xa3, 0xec, 0xd8, 0x12, 0x2f, 0xc2, + 0x12, 0x86, 0xce, 0xc1, 0x90, 0x13, 0x6e, 0x71, 0xb5, 0x44, 0x79, 0xb1, 0x44, 0x5b, 0x5a, 0x08, + 0xb7, 0x22, 0xcc, 0x4a, 0xa9, 0x54, 0x75, 0x2f, 0x08, 0x77, 0x3c, 0x7f, 0xab, 0xea, 0x85, 0x62, + 0x4b, 0xa8, 0xbb, 0xf0, 0xae, 0x82, 0x60, 0x0d, 0x0b, 0xad, 0xc0, 0x70, 0x3b, 0x08, 0xe3, 0x68, + 0x76, 0x84, 0x0d, 0xf7, 0x93, 0x39, 0x07, 0x11, 0xff, 0xda, 0x7a, 0x10, 0xc6, 0xc9, 0x07, 0xd0, + 0x7f, 0x11, 0xe6, 0xd5, 0xd1, 0xb7, 0x40, 0x91, 0xf8, 0x7b, 0xb3, 0xa3, 0x8c, 0xca, 0x5c, 0x16, + 0x95, 0x65, 0x7f, 0xef, 0x8e, 0x13, 0x26, 0xa7, 0xf4, 0xb2, 0xbf, 0x87, 0x69, 0x1d, 0xf4, 0x39, + 0x28, 0xcb, 0x2d, 0x1e, 0x09, 0x8d, 0x59, 0xe6, 0x12, 0x93, 0x07, 0x03, 0x26, 0xef, 0x76, 0xbc, + 0x90, 0xec, 0x12, 0x3f, 0x8e, 0x92, 0x33, 0x4d, 0x42, 0x23, 0x9c, 0x50, 0x43, 0x9f, 0x93, 0x6a, + 0xda, 0xd5, 0xa0, 0xe3, 0xc7, 0xd1, 0x6c, 0x99, 0x75, 0x2f, 0xf3, 0x01, 0xed, 0x4e, 0x82, 0x97, + 0xd6, 0xe3, 0xf2, 0xca, 0xd8, 0x20, 0x85, 0x30, 0x4c, 0xb4, 0xbc, 0x3d, 0xe2, 0x93, 0x28, 0xaa, + 0x87, 0xc1, 0x06, 0x99, 0x05, 0xd6, 0xf3, 0xb3, 0xd9, 0xef, 0x4a, 0xc1, 0x06, 0x59, 0x9c, 0x79, + 0x70, 0x50, 0x99, 0xb8, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b, 0x26, 0xa9, 0x5c, 0xe3, 0x25, + 0x44, 0xc7, 0xfa, 0x11, 0x65, 0xd2, 0x07, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0xf4, 0x06, 0x94, 0x5b, + 0xde, 0x26, 0x69, 0xee, 0x37, 0x5b, 0x64, 0x76, 0x9c, 0x51, 0xcc, 0xdc, 0x56, 0x37, 0x25, 0x12, + 0x97, 0x8b, 0xd4, 0x5f, 0x9c, 0x54, 0x47, 0x77, 0xe0, 0xb1, 0x98, 0x84, 0xbb, 0x9e, 0xef, 0xd0, + 0xed, 0x20, 0xe4, 0x05, 0xf6, 0x3a, 0x37, 0xc1, 0xd6, 0xdb, 0x05, 0x31, 0x74, 0x8f, 0xad, 0x67, + 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x16, 0x4c, 0xb1, 0x9d, 0x50, 0xef, 0xb4, 0x5a, 0xf5, 0xa0, 0xe5, + 0x35, 0xf7, 0x67, 0x27, 0x19, 0xc1, 0x8f, 0xc9, 0x7b, 0xa1, 0x66, 0x82, 0x0f, 0x0f, 0x2a, 0x90, + 0xfc, 0xc3, 0xe9, 0xda, 0x68, 0x83, 0x3d, 0xc7, 0x74, 0x42, 0x2f, 0xde, 0xa7, 0xeb, 0x97, 0xdc, + 0x8f, 0x67, 0xa7, 0x7a, 0x8a, 0xc2, 0x3a, 0xaa, 0x7a, 0xb3, 0xd1, 0x0b, 0x71, 0x9a, 0x20, 0xdd, + 0xda, 0x51, 0xec, 0x7a, 0xfe, 0xec, 0x34, 0x3b, 0x31, 0xd4, 0xce, 0x68, 0xd0, 0x42, 0xcc, 0x61, + 0xec, 0x29, 0x86, 0xfe, 0xb8, 0x45, 0x4f, 0xd0, 0x19, 0x86, 0x98, 0x3c, 0xc5, 0x48, 0x00, 0x4e, + 0x70, 0x28, 0x53, 0x13, 0xc7, 0xfb, 0xb3, 0x88, 0xa1, 0xaa, 0xed, 0xb2, 0xbe, 0xfe, 0x39, 0x4c, + 0xcb, 0xd1, 0x4d, 0x18, 0x25, 0xfe, 0xde, 0x4a, 0x18, 0xec, 0xce, 0x9e, 0xca, 0xdf, 0xb3, 0xcb, + 0x1c, 0x85, 0x1f, 0xe8, 0x89, 0x80, 0x27, 0x8a, 0xb1, 0x24, 0x81, 0xee, 0xc3, 0x6c, 0xc6, 0x8c, + 0xf0, 0x09, 0x38, 0xcd, 0x26, 0xe0, 0x35, 0x51, 0x77, 0x76, 0x3d, 0x07, 0xef, 0xb0, 0x07, 0x0c, + 0xe7, 0x52, 0x47, 0x9f, 0x87, 0x09, 0xbe, 0xa1, 0xf8, 0x3b, 0x6e, 0x34, 0x7b, 0x86, 0x7d, 0xcd, + 0xc5, 0xfc, 0xcd, 0xc9, 0x11, 0x17, 0xcf, 0x88, 0x0e, 0x4d, 0xe8, 0xa5, 0x11, 0x36, 0xa9, 0xd9, + 0x1b, 0x30, 0xa9, 0xce, 0x2d, 0xb6, 0x74, 0x50, 0x05, 0x86, 0x19, 0xb7, 0x23, 0xf4, 0x5b, 0x65, + 0x3a, 0x53, 0x8c, 0x13, 0xc2, 0xbc, 0x9c, 0xcd, 0x94, 0xf7, 0x1e, 0x59, 0xdc, 0x8f, 0x09, 0x97, + 0xaa, 0x8b, 0xda, 0x4c, 0x49, 0x00, 0x4e, 0x70, 0xec, 0xff, 0xc7, 0xb9, 0xc6, 0xe4, 0x70, 0x1c, + 0xe0, 0x3a, 0x78, 0x0e, 0x4a, 0xdb, 0x41, 0x14, 0x53, 0x6c, 0xd6, 0xc6, 0x70, 0xc2, 0x27, 0x5e, + 0x17, 0xe5, 0x58, 0x61, 0xa0, 0x57, 0x61, 0xa2, 0xa9, 0x37, 0x20, 0xee, 0x32, 0x35, 0x04, 0x46, + 0xeb, 0xd8, 0xc4, 0x45, 0xaf, 0x40, 0x89, 0x59, 0x61, 0x34, 0x83, 0x96, 0x60, 0xb2, 0xe4, 0x85, + 0x5c, 0xaa, 0x8b, 0xf2, 0x43, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x12, 0x8c, 0xd0, 0x2e, 0xd4, 0xea, + 0xe2, 0x16, 0x51, 0xaa, 0x9a, 0xeb, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0x37, 0x0b, 0xda, 0x28, 0x53, + 0x89, 0x94, 0xa0, 0x3a, 0x8c, 0xde, 0x73, 0xbc, 0xd8, 0xf3, 0xb7, 0x04, 0xbb, 0xf0, 0x4c, 0xcf, + 0x2b, 0x85, 0x55, 0xba, 0xcb, 0x2b, 0xf0, 0x4b, 0x4f, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, + 0x7c, 0x9f, 0x52, 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, + 0x36, 0x80, 0x5c, 0x96, 0xc4, 0x15, 0xd6, 0x0f, 0xcf, 0xf5, 0x27, 0xba, 0xae, 0xea, 0x2c, 0x4e, + 0xd2, 0x2b, 0x35, 0xf9, 0x8f, 0x35, 0x7a, 0x76, 0xcc, 0xd8, 0xaa, 0xee, 0xce, 0xa0, 0x6f, 0xa7, + 0x27, 0x81, 0x13, 0xc6, 0xc4, 0x5d, 0x88, 0xc5, 0xe0, 0x7c, 0x7c, 0x30, 0x99, 0x62, 0xdd, 0xdb, + 0x25, 0xfa, 0xa9, 0x21, 0x88, 0xe0, 0x84, 0x9e, 0xfd, 0x8b, 0x45, 0x98, 0xcd, 0xeb, 0x2e, 0x5d, + 0x74, 0xe4, 0xbe, 0x17, 0x2f, 0x51, 0x6e, 0xc8, 0x32, 0x17, 0xdd, 0xb2, 0x28, 0xc7, 0x0a, 0x83, + 0xce, 0x7e, 0xe4, 0x6d, 0x49, 0x91, 0x70, 0x38, 0x99, 0xfd, 0x06, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, + 0x42, 0xe2, 0x44, 0xc2, 0xbc, 0x46, 0x5b, 0x25, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xf5, 0x4d, 0x43, + 0x7d, 0xf4, 0x4d, 0xc6, 0x10, 0x0d, 0x1f, 0xef, 0x10, 0xa1, 0x2f, 0x00, 0x6c, 0x7a, 0xbe, 0x17, + 0x6d, 0x33, 0xea, 0x23, 0x47, 0xa6, 0xae, 0x78, 0xa9, 0x15, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0xcb, + 0x30, 0xa6, 0x36, 0x60, 0xad, 0xca, 0xde, 0x1a, 0x35, 0xdb, 0x8d, 0xe4, 0x34, 0xaa, 0x62, 0x1d, + 0xcf, 0x7e, 0x27, 0xbd, 0x5e, 0xc4, 0x0e, 0xd0, 0xc6, 0xd7, 0x1a, 0x74, 0x7c, 0x0b, 0xbd, 0xc7, + 0xd7, 0xfe, 0xb5, 0x22, 0x4c, 0x19, 0x8d, 0x75, 0xa2, 0x01, 0xce, 0xac, 0x6b, 0xf4, 0x9e, 0x73, + 0x62, 0x22, 0xf6, 0x9f, 0xdd, 0x7f, 0xab, 0xe8, 0x77, 0x21, 0xdd, 0x01, 0xbc, 0x3e, 0xfa, 0x02, + 0x94, 0x5b, 0x4e, 0xc4, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0x25, 0x72, 0x84, 0x13, 0xc5, + 0xda, 0x55, 0xc3, 0x69, 0x27, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0xbf, 0xa5, 0x3a, 0x41, + 0x19, 0xa4, 0x7d, 0xcc, 0x61, 0xe8, 0x15, 0x18, 0x0f, 0x09, 0x5b, 0x15, 0x4b, 0x94, 0x95, 0x63, + 0xcb, 0x6c, 0x38, 0xe1, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xc2, 0xca, 0x8f, 0xf4, 0x60, 0xe5, + 0x9f, 0x81, 0x51, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x35, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, + 0x4a, 0x03, 0x2e, 0x98, 0x8f, 0xc3, 0x64, 0xd5, 0x21, 0xbb, 0x81, 0xbf, 0xec, 0xbb, 0xed, 0xc0, + 0xf3, 0x63, 0x34, 0x0b, 0x43, 0xec, 0x76, 0xe0, 0x7b, 0x7b, 0x88, 0x52, 0xc0, 0x43, 0x94, 0x31, + 0xb7, 0xb7, 0xe0, 0x4c, 0x35, 0xb8, 0xe7, 0xdf, 0x73, 0x42, 0x77, 0xa1, 0x5e, 0xd3, 0xe4, 0xdc, + 0x35, 0x29, 0x67, 0x71, 0x7b, 0xa8, 0xcc, 0x33, 0x55, 0xab, 0xc9, 0xef, 0xda, 0x15, 0xaf, 0x45, + 0x72, 0xb4, 0x11, 0x7f, 0xa7, 0x60, 0xb4, 0x94, 0xe0, 0xab, 0x07, 0x23, 0x2b, 0xf7, 0xc1, 0xe8, + 0x4d, 0x28, 0x6d, 0x7a, 0xa4, 0xe5, 0x62, 0xb2, 0x29, 0x96, 0xd8, 0xd3, 0xf9, 0x26, 0x1e, 0x2b, + 0x14, 0x53, 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, 0xb4, 0x14, + 0x03, 0x24, 0x54, 0x2c, 0xb8, 0x67, 0x7a, 0xc9, 0x16, 0x26, 0xf1, 0xd3, 0x0f, 0x0e, 0x2a, 0xd3, + 0x38, 0x45, 0x06, 0x77, 0x11, 0xa6, 0x62, 0xd9, 0x2e, 0x3d, 0x5a, 0x87, 0xd8, 0xf0, 0x33, 0xb1, + 0x8c, 0x49, 0x98, 0xac, 0xd4, 0xfe, 0x11, 0x0b, 0x1e, 0xef, 0x1a, 0x19, 0x21, 0x69, 0x1f, 0xf3, + 0x2c, 0xa4, 0x25, 0xdf, 0x42, 0x7f, 0xc9, 0xd7, 0xfe, 0x19, 0x0b, 0x4e, 0x2f, 0xef, 0xb6, 0xe3, + 0xfd, 0xaa, 0x67, 0xbe, 0xee, 0x7c, 0x0a, 0x46, 0x76, 0x89, 0xeb, 0x75, 0x76, 0xc5, 0xcc, 0x55, + 0xe4, 0xf1, 0xb3, 0xca, 0x4a, 0x0f, 0x0f, 0x2a, 0x13, 0x8d, 0x38, 0x08, 0x9d, 0x2d, 0xc2, 0x0b, + 0xb0, 0x40, 0x67, 0x87, 0xb8, 0xf7, 0x1e, 0xb9, 0xe9, 0xed, 0x7a, 0xd2, 0x64, 0xa7, 0xa7, 0xee, + 0x6c, 0x5e, 0x0e, 0xe8, 0xfc, 0x9b, 0x1d, 0xc7, 0x8f, 0xbd, 0x78, 0x5f, 0x3c, 0xcc, 0x48, 0x22, + 0x38, 0xa1, 0x67, 0x7f, 0xc3, 0x82, 0x29, 0xb9, 0xee, 0x17, 0x5c, 0x37, 0x24, 0x51, 0x84, 0xe6, + 0xa0, 0xe0, 0xb5, 0x45, 0x2f, 0x41, 0xf4, 0xb2, 0x50, 0xab, 0xe3, 0x82, 0xd7, 0x46, 0x75, 0x28, + 0x73, 0xcb, 0x9f, 0x64, 0x71, 0x0d, 0x64, 0x3f, 0xc4, 0x7a, 0xb0, 0x2e, 0x6b, 0xe2, 0x84, 0x88, + 0xe4, 0xe0, 0xd8, 0x99, 0x59, 0x34, 0x5f, 0xbd, 0xae, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x65, 0x28, + 0xf9, 0x81, 0xcb, 0x0d, 0xb1, 0xf8, 0xed, 0xc7, 0x96, 0xec, 0x9a, 0x28, 0xc3, 0x0a, 0x6a, 0xff, + 0xa0, 0x05, 0xe3, 0xf2, 0xcb, 0x06, 0x64, 0x26, 0xe9, 0xd6, 0x4a, 0x18, 0xc9, 0x64, 0x6b, 0x51, + 0x66, 0x90, 0x41, 0x0c, 0x1e, 0xb0, 0x78, 0x14, 0x1e, 0xd0, 0xfe, 0xe1, 0x02, 0x4c, 0xca, 0xee, + 0x34, 0x3a, 0x1b, 0x11, 0x89, 0xd1, 0x3a, 0x94, 0x1d, 0x3e, 0xe4, 0x44, 0xae, 0xd8, 0xa7, 0xb2, + 0x85, 0x0f, 0x63, 0x7e, 0x92, 0x6b, 0x79, 0x41, 0xd6, 0xc6, 0x09, 0x21, 0xd4, 0x82, 0x19, 0x3f, + 0x88, 0xd9, 0x11, 0xad, 0xe0, 0xbd, 0x9e, 0x40, 0xd2, 0xd4, 0xcf, 0x0a, 0xea, 0x33, 0x6b, 0x69, + 0x2a, 0xb8, 0x9b, 0x30, 0x5a, 0x96, 0x0a, 0x8f, 0x62, 0xbe, 0xb8, 0xa1, 0xcf, 0x42, 0xb6, 0xbe, + 0xc3, 0xfe, 0x15, 0x0b, 0xca, 0x12, 0xed, 0x24, 0x5e, 0xbb, 0x56, 0x61, 0x34, 0x62, 0x93, 0x20, + 0x87, 0xc6, 0xee, 0xd5, 0x71, 0x3e, 0x5f, 0xc9, 0xcd, 0xc3, 0xff, 0x47, 0x58, 0xd2, 0x60, 0xfa, + 0x6e, 0xd5, 0xfd, 0x0f, 0x89, 0xbe, 0x5b, 0xf5, 0x27, 0xe7, 0x86, 0xf9, 0x7d, 0xd6, 0x67, 0x4d, + 0xac, 0xa5, 0x0c, 0x52, 0x3b, 0x24, 0x9b, 0xde, 0xfd, 0x34, 0x83, 0x54, 0x67, 0xa5, 0x58, 0x40, + 0xd1, 0xdb, 0x30, 0xde, 0x94, 0x8a, 0xce, 0xe4, 0x18, 0xb8, 0xd4, 0x53, 0xe9, 0xae, 0xde, 0x67, + 0xb8, 0x91, 0xf6, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x62, 0xbf, 0xe7, 0xf6, 0x84, + 0x6e, 0xfe, 0xe3, 0xf3, 0x8f, 0x5a, 0x30, 0xc2, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, + 0x64, 0xec, 0xee, 0xd0, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xcc, 0x7e, 0x30, 0xb5, 0x41, 0x31, + 0xdf, 0x3a, 0x9d, 0xb7, 0xaa, 0x77, 0xf0, 0x8e, 0xac, 0x86, 0x13, 0x0a, 0xf6, 0x57, 0x8b, 0xf4, + 0xa8, 0x4a, 0x50, 0x8d, 0x1b, 0xdc, 0x7a, 0x74, 0x37, 0x78, 0xe1, 0x51, 0xdd, 0xe0, 0x5b, 0x30, + 0xd5, 0xd4, 0x1e, 0xb7, 0x92, 0x99, 0xbc, 0xdc, 0x73, 0x91, 0x68, 0xef, 0x60, 0x5c, 0x65, 0xb4, + 0x64, 0x12, 0xc1, 0x69, 0xaa, 0xe8, 0xdb, 0x61, 0x9c, 0xcf, 0xb3, 0x68, 0x85, 0x5b, 0x2c, 0x7c, + 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0xb0, 0x95, 0xd8, 0xd0, 0xaa, 0x63, 0x83, 0x98, 0xfd, 0x8b, 0x25, + 0x18, 0x5e, 0xde, 0x23, 0x7e, 0x7c, 0x02, 0x07, 0x52, 0x13, 0x26, 0x3d, 0x7f, 0x2f, 0x68, 0xed, + 0x11, 0x97, 0xc3, 0x8f, 0x72, 0xb9, 0x3e, 0x26, 0x48, 0x4f, 0xd6, 0x0c, 0x12, 0x38, 0x45, 0xf2, + 0x51, 0x48, 0x98, 0xd7, 0x60, 0x84, 0xcf, 0xbd, 0x10, 0x2f, 0x33, 0x95, 0xc1, 0x6c, 0x10, 0xc5, + 0x2e, 0x48, 0xa4, 0x5f, 0xae, 0x7d, 0x16, 0xd5, 0xd1, 0x3b, 0x30, 0xb9, 0xe9, 0x85, 0x51, 0x4c, + 0x45, 0xc3, 0x28, 0x76, 0x76, 0xdb, 0x0f, 0x21, 0x51, 0xaa, 0x71, 0x58, 0x31, 0x28, 0xe1, 0x14, + 0x65, 0xb4, 0x05, 0x13, 0x54, 0xc8, 0x49, 0x9a, 0x1a, 0x3d, 0x72, 0x53, 0x4a, 0x65, 0x74, 0x53, + 0x27, 0x84, 0x4d, 0xba, 0xf4, 0x30, 0x69, 0x32, 0xa1, 0xa8, 0xc4, 0x38, 0x0a, 0x75, 0x98, 0x70, + 0x69, 0x88, 0xc3, 0xe8, 0x99, 0xc4, 0xcc, 0x56, 0xca, 0xe6, 0x99, 0xa4, 0x19, 0xa7, 0x7c, 0x11, + 0xca, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x14, 0xe3, 0x57, 0x06, 0xeb, 0xeb, 0xaa, 0xd7, 0x0c, 0x03, + 0x53, 0x96, 0x5f, 0x96, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc1, 0x48, 0x44, 0x42, 0x8f, 0x44, 0x42, + 0x45, 0xde, 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0xf8, 0xe4, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, + 0xa4, 0x21, 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x80, 0xd1, 0x90, + 0xb4, 0x98, 0xb2, 0x68, 0x62, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x0d, + 0x40, 0x21, 0xa1, 0x3c, 0x84, 0xe7, 0x6f, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x42, 0xb4, 0x7f, + 0x0a, 0x27, 0x18, 0xd2, 0xf8, 0x16, 0x67, 0x54, 0x43, 0xd7, 0x60, 0x46, 0x95, 0xd6, 0xfc, 0x28, + 0x76, 0xfc, 0x26, 0x61, 0x6a, 0xee, 0x72, 0xc2, 0x15, 0xe1, 0x34, 0x02, 0xee, 0xae, 0x63, 0xff, + 0x14, 0x65, 0x67, 0xe8, 0x68, 0x9d, 0x00, 0x2f, 0xf0, 0xba, 0xc9, 0x0b, 0x9c, 0xcd, 0x9d, 0xb9, + 0x1c, 0x3e, 0xe0, 0x81, 0x05, 0x63, 0xda, 0xcc, 0x26, 0x6b, 0xd6, 0xea, 0xb1, 0x66, 0x3b, 0x30, + 0x4d, 0x57, 0xfa, 0xad, 0x8d, 0x88, 0x84, 0x7b, 0xc4, 0x65, 0x0b, 0xb3, 0xf0, 0x70, 0x0b, 0x53, + 0xbd, 0x32, 0xdf, 0x4c, 0x11, 0xc4, 0x5d, 0x4d, 0xa0, 0x4f, 0x49, 0xcd, 0x49, 0xd1, 0x30, 0xd2, + 0xe2, 0x5a, 0x91, 0xc3, 0x83, 0xca, 0xb4, 0xf6, 0x21, 0xba, 0xa6, 0xc4, 0xfe, 0xa2, 0xfc, 0x46, + 0xf5, 0x9a, 0xdf, 0x54, 0x8b, 0x25, 0xf5, 0x9a, 0xaf, 0x96, 0x03, 0x4e, 0x70, 0xe8, 0x1e, 0xa5, + 0x22, 0x48, 0xfa, 0x35, 0x9f, 0x0a, 0x28, 0x98, 0x41, 0xec, 0x17, 0x01, 0x96, 0xef, 0x93, 0x26, + 0x5f, 0xea, 0xfa, 0x03, 0xa4, 0x95, 0xff, 0x00, 0x69, 0xff, 0x67, 0x0b, 0x26, 0x57, 0x96, 0x0c, + 0x31, 0x71, 0x1e, 0x80, 0xcb, 0x46, 0x77, 0xef, 0xae, 0x49, 0xdd, 0x3a, 0x57, 0x8f, 0xaa, 0x52, + 0xac, 0x61, 0xa0, 0xb3, 0x50, 0x6c, 0x75, 0x7c, 0x21, 0xb2, 0x8c, 0x3e, 0x38, 0xa8, 0x14, 0x6f, + 0x76, 0x7c, 0x4c, 0xcb, 0x34, 0x0b, 0xc1, 0xe2, 0xc0, 0x16, 0x82, 0x7d, 0x3d, 0xf5, 0x50, 0x05, + 0x86, 0xef, 0xdd, 0xf3, 0x5c, 0xee, 0x0f, 0x21, 0xf4, 0xfe, 0x77, 0xef, 0xd6, 0xaa, 0x11, 0xe6, + 0xe5, 0xf6, 0x57, 0x8a, 0x30, 0xb7, 0xd2, 0x22, 0xf7, 0xdf, 0xa7, 0x4f, 0xc8, 0xa0, 0xf6, 0x8d, + 0x47, 0xe3, 0x17, 0x8f, 0x6a, 0xc3, 0xda, 0x7f, 0x3c, 0x36, 0x61, 0x94, 0x3f, 0x66, 0x4b, 0x0f, + 0x91, 0x57, 0xb3, 0x5a, 0xcf, 0x1f, 0x90, 0x79, 0xfe, 0x28, 0x2e, 0x0c, 0xdc, 0xd5, 0x4d, 0x2b, + 0x4a, 0xb1, 0x24, 0x3e, 0xf7, 0x69, 0x18, 0xd7, 0x31, 0x8f, 0x64, 0x4d, 0xfe, 0x57, 0x8a, 0x30, + 0x4d, 0x7b, 0xf0, 0x48, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x1c, 0xb7, 0x45, 0x71, 0xff, 0xd9, 0x78, + 0x3b, 0x3d, 0x1b, 0x2f, 0xe4, 0xcd, 0xc6, 0x49, 0xcf, 0xc1, 0xf7, 0x58, 0x70, 0x6a, 0xa5, 0x15, + 0x34, 0x77, 0x52, 0x56, 0xbf, 0x2f, 0xc3, 0x18, 0x3d, 0xc7, 0x23, 0xc3, 0x21, 0xcd, 0x70, 0x51, + 0x14, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x6e, 0xdf, 0xae, 0x55, 0xb3, 0x3c, 0x1b, 0x05, 0x08, 0xeb, + 0x78, 0xf6, 0xd7, 0x2d, 0x38, 0x7f, 0x6d, 0x69, 0x39, 0x59, 0x8a, 0x5d, 0xce, 0x95, 0x54, 0x0a, + 0x74, 0xb5, 0xae, 0x24, 0x52, 0x60, 0x95, 0xf5, 0x42, 0x40, 0x3f, 0x2c, 0x8e, 0xc3, 0x3f, 0x69, + 0xc1, 0xa9, 0x6b, 0x5e, 0x4c, 0xaf, 0xe5, 0xb4, 0x9b, 0x1f, 0xbd, 0x97, 0x23, 0x2f, 0x0e, 0xc2, + 0xfd, 0xb4, 0x9b, 0x1f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x3c, 0x66, 0x46, 0x55, 0x30, + 0x55, 0x51, 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xcc, 0xf5, 0x42, 0x26, 0x4a, 0xec, 0x8b, 0x13, + 0x56, 0x7d, 0x58, 0x55, 0x02, 0x70, 0x82, 0x63, 0xff, 0xa1, 0x05, 0x95, 0x6b, 0xad, 0x4e, 0x14, + 0x93, 0x70, 0x33, 0xca, 0x39, 0x1d, 0x5f, 0x84, 0x32, 0x91, 0x82, 0xbb, 0xe8, 0xb5, 0x62, 0x35, + 0x95, 0x44, 0xcf, 0xbd, 0x0d, 0x15, 0xde, 0x00, 0x3e, 0x04, 0x47, 0x33, 0x02, 0x5f, 0x01, 0x44, + 0xf4, 0xb6, 0x74, 0xf7, 0x4b, 0xe6, 0xc7, 0xb5, 0xdc, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x1f, 0xb1, + 0xe0, 0x8c, 0xfa, 0xe0, 0x0f, 0xdd, 0x67, 0xda, 0x3f, 0x57, 0x80, 0x89, 0xeb, 0xeb, 0xeb, 0xf5, + 0x6b, 0x24, 0x16, 0xd7, 0x76, 0x7f, 0xdd, 0x3a, 0xd6, 0x54, 0x84, 0xbd, 0xa4, 0xc0, 0x4e, 0xec, + 0xb5, 0xe6, 0xb9, 0x17, 0xff, 0x7c, 0xcd, 0x8f, 0x6f, 0x85, 0x8d, 0x38, 0xf4, 0xfc, 0xad, 0x4c, + 0xa5, 0xa2, 0x64, 0x2e, 0x8a, 0x79, 0xcc, 0x05, 0x7a, 0x11, 0x46, 0x58, 0x18, 0x01, 0x39, 0x09, + 0x4f, 0x28, 0x21, 0x8a, 0x95, 0x1e, 0x1e, 0x54, 0xca, 0xb7, 0x71, 0x8d, 0xff, 0xc1, 0x02, 0x15, + 0xdd, 0x86, 0xb1, 0xed, 0x38, 0x6e, 0x5f, 0x27, 0x8e, 0x4b, 0x42, 0x79, 0x1c, 0x5e, 0xc8, 0x3a, + 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xe4, 0x04, 0x49, 0xca, 0x22, 0xac, 0xd3, 0xb1, 0x1b, 0x00, 0x09, + 0xec, 0x98, 0x14, 0x2a, 0xf6, 0xef, 0x59, 0x30, 0xca, 0x3d, 0x3a, 0x43, 0xf4, 0x1a, 0x0c, 0x91, + 0xfb, 0xa4, 0x29, 0x58, 0xe5, 0xcc, 0x0e, 0x27, 0x9c, 0x16, 0x7f, 0x1e, 0xa0, 0xff, 0x31, 0xab, + 0x85, 0xae, 0xc3, 0x28, 0xed, 0xed, 0x35, 0xe5, 0xde, 0xfa, 0x64, 0xde, 0x17, 0xab, 0x69, 0xe7, + 0xcc, 0x99, 0x28, 0xc2, 0xb2, 0x3a, 0x53, 0x75, 0x37, 0xdb, 0x0d, 0x7a, 0x62, 0xc7, 0xbd, 0x18, + 0x8b, 0xf5, 0xa5, 0x3a, 0x47, 0x12, 0xd4, 0xb8, 0xaa, 0x5b, 0x16, 0xe2, 0x84, 0x88, 0xbd, 0x0e, + 0x65, 0x3a, 0xa9, 0x0b, 0x2d, 0xcf, 0xe9, 0xad, 0x65, 0x7f, 0x16, 0xca, 0x52, 0xe3, 0x1d, 0x09, + 0x4f, 0x2e, 0x46, 0x55, 0x2a, 0xc4, 0x23, 0x9c, 0xc0, 0xed, 0x4d, 0x38, 0xcd, 0x4c, 0x1d, 0x9c, + 0x78, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0xf2, 0xe4, 0x33, 0x33, 0xab, 0x39, 0x4b, + 0x8c, 0x4b, 0x8a, 0x89, 0x14, 0x6a, 0xff, 0xc1, 0x10, 0x3c, 0x51, 0x6b, 0xe4, 0x3b, 0xfb, 0xbe, + 0x02, 0xe3, 0x9c, 0x2f, 0xa5, 0x4b, 0xdb, 0x69, 0x89, 0x76, 0xd5, 0x43, 0xe0, 0xba, 0x06, 0xc3, + 0x06, 0x26, 0x3a, 0x0f, 0x45, 0xef, 0x5d, 0x3f, 0x6d, 0x77, 0x5c, 0x7b, 0x73, 0x0d, 0xd3, 0x72, + 0x0a, 0xa6, 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, 0xb1, 0xb9, 0xaf, 0xc3, 0xa4, 0x17, 0x35, 0x23, + 0xaf, 0xe6, 0xd3, 0x73, 0x46, 0x3b, 0xa9, 0x94, 0x56, 0x84, 0x76, 0x5a, 0x41, 0x71, 0x0a, 0x5b, + 0xbb, 0xc8, 0x86, 0x07, 0x66, 0x93, 0xfb, 0xba, 0x36, 0x51, 0x09, 0xa0, 0xcd, 0xbe, 0x2e, 0x62, + 0x56, 0x7c, 0x42, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x22, 0x67, 0x73, 0xdb, 0x69, 0x2f, + 0x74, 0xe2, 0xed, 0xaa, 0x17, 0x35, 0x83, 0x3d, 0x12, 0xee, 0x33, 0x6d, 0x41, 0x29, 0x11, 0x39, + 0x15, 0x60, 0xe9, 0xfa, 0x42, 0x9d, 0x62, 0xe2, 0xee, 0x3a, 0x26, 0x1b, 0x0c, 0xc7, 0xc1, 0x06, + 0x2f, 0xc0, 0x94, 0x6c, 0xa6, 0x41, 0x22, 0x76, 0x29, 0x8e, 0xb1, 0x8e, 0x29, 0xdb, 0x62, 0x51, + 0xac, 0xba, 0x95, 0xc6, 0x47, 0x9f, 0x82, 0x09, 0xcf, 0xf7, 0x62, 0xcf, 0x89, 0x83, 0x90, 0xb1, + 0x14, 0x5c, 0x31, 0xc0, 0x4c, 0xf7, 0x6a, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x7f, 0x1f, 0x82, 0x19, + 0x36, 0x6d, 0xdf, 0x5c, 0x61, 0x1f, 0x9a, 0x15, 0x76, 0xbb, 0x7b, 0x85, 0x1d, 0x07, 0x7f, 0xff, + 0x41, 0x2e, 0xb3, 0x77, 0xa0, 0xac, 0x8c, 0x9f, 0xa5, 0xf7, 0x83, 0x95, 0xe3, 0xfd, 0xd0, 0x9f, + 0xfb, 0x90, 0xef, 0xd6, 0xc5, 0xcc, 0x77, 0xeb, 0xbf, 0x67, 0x41, 0x62, 0x03, 0x8a, 0xae, 0x43, + 0xb9, 0x1d, 0x30, 0x3b, 0x8b, 0x50, 0x1a, 0x2f, 0x3d, 0x91, 0x79, 0x51, 0xf1, 0x4b, 0x91, 0x8f, + 0x5f, 0x5d, 0xd6, 0xc0, 0x49, 0x65, 0xb4, 0x08, 0xa3, 0xed, 0x90, 0x34, 0x62, 0xe6, 0xf3, 0xdb, + 0x97, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x5b, 0x00, 0xfc, 0x69, 0xd8, 0xf1, + 0xb7, 0xc8, 0x09, 0xa8, 0xbb, 0xab, 0x30, 0x14, 0xb5, 0x49, 0xb3, 0x97, 0x05, 0x4c, 0xd2, 0x9f, + 0x46, 0x9b, 0x34, 0x93, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x2f, 0xc0, 0x64, 0x82, 0x56, + 0x8b, 0xc9, 0x2e, 0x7a, 0xde, 0xf0, 0x01, 0x3c, 0x9b, 0xf2, 0x01, 0x2c, 0x33, 0x6c, 0x4d, 0xb3, + 0xfa, 0x0e, 0x14, 0x77, 0x9d, 0xfb, 0x42, 0x75, 0xf6, 0x6c, 0xef, 0x6e, 0x50, 0xfa, 0xf3, 0xab, + 0xce, 0x7d, 0x2e, 0x24, 0x3e, 0x2b, 0x17, 0xc8, 0xaa, 0x73, 0xff, 0x90, 0xdb, 0xb9, 0xb0, 0x43, + 0xea, 0xa6, 0x17, 0xc5, 0x5f, 0xfa, 0x6f, 0xc9, 0x7f, 0xb6, 0xec, 0x68, 0x23, 0xac, 0x2d, 0xcf, + 0x17, 0x0f, 0xa5, 0x03, 0xb5, 0xe5, 0xf9, 0xe9, 0xb6, 0x3c, 0x7f, 0x80, 0xb6, 0x3c, 0x1f, 0xbd, + 0x07, 0xa3, 0xc2, 0x28, 0x41, 0xf8, 0xdc, 0x5f, 0x19, 0xa0, 0x3d, 0x61, 0xd3, 0xc0, 0xdb, 0xbc, + 0x22, 0x85, 0x60, 0x51, 0xda, 0xb7, 0x5d, 0xd9, 0x20, 0xfa, 0xdb, 0x16, 0x4c, 0x8a, 0xdf, 0x98, + 0xbc, 0xdb, 0x21, 0x51, 0x2c, 0x78, 0xcf, 0x4f, 0x0e, 0xde, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x93, + 0xf2, 0x98, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, 0x27, 0x16, 0x9c, 0xde, 0x75, 0xee, + 0xf3, 0x16, 0x79, 0x19, 0x76, 0x62, 0x2f, 0x10, 0xc6, 0xfa, 0xaf, 0x0d, 0x36, 0xfd, 0x5d, 0xd5, + 0x79, 0x27, 0xa5, 0x5d, 0xef, 0xe9, 0x2c, 0x94, 0xbe, 0x5d, 0xcd, 0xec, 0xd7, 0xdc, 0x26, 0x94, + 0xe4, 0x7a, 0xcb, 0x50, 0x35, 0x54, 0x75, 0xc6, 0xfa, 0xc8, 0x36, 0x21, 0xba, 0x23, 0x1e, 0x6d, + 0x47, 0xac, 0xb5, 0x47, 0xda, 0xce, 0x3b, 0x30, 0xae, 0xaf, 0xb1, 0x47, 0xda, 0xd6, 0xbb, 0x70, + 0x2a, 0x63, 0x2d, 0x3d, 0xd2, 0x26, 0xef, 0xc1, 0xd9, 0xdc, 0xf5, 0xf1, 0x28, 0x1b, 0xb6, 0x7f, + 0xce, 0xd2, 0xcf, 0xc1, 0x13, 0x78, 0x73, 0x58, 0x32, 0xdf, 0x1c, 0x2e, 0xf4, 0xde, 0x39, 0x39, + 0x0f, 0x0f, 0x6f, 0xeb, 0x9d, 0xa6, 0xa7, 0x3a, 0x7a, 0x03, 0x46, 0x5a, 0xb4, 0x44, 0x5a, 0xc3, + 0xd8, 0xfd, 0x77, 0x64, 0xc2, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, 0x0a, 0xf6, 0x2f, 0x59, 0x30, 0x74, + 0x02, 0x23, 0x81, 0xcd, 0x91, 0x78, 0x3e, 0x97, 0xb4, 0x08, 0x07, 0x38, 0x8f, 0x9d, 0x7b, 0xcb, + 0xf7, 0x63, 0xe2, 0x47, 0x4c, 0x54, 0xcc, 0x1c, 0x98, 0xef, 0x80, 0x53, 0x37, 0x03, 0xc7, 0x5d, + 0x74, 0x5a, 0x8e, 0xdf, 0x24, 0x61, 0xcd, 0xdf, 0xea, 0x6b, 0x96, 0xa5, 0x1b, 0x51, 0x15, 0xfa, + 0x19, 0x51, 0xd9, 0xdb, 0x80, 0xf4, 0x06, 0x84, 0xe1, 0x2a, 0x86, 0x51, 0x8f, 0x37, 0x25, 0x86, + 0xff, 0xe9, 0x6c, 0xee, 0xae, 0xab, 0x67, 0x9a, 0x49, 0x26, 0x2f, 0xc0, 0x92, 0x90, 0xfd, 0x0a, + 0x64, 0x3a, 0xab, 0xf5, 0x57, 0x1b, 0xd8, 0x9f, 0x83, 0x19, 0x56, 0xf3, 0x88, 0x22, 0xad, 0x9d, + 0xd2, 0x4a, 0x66, 0x44, 0xa6, 0xb1, 0xbf, 0x6c, 0xc1, 0xd4, 0x5a, 0x2a, 0x60, 0xc7, 0x25, 0xf6, + 0x00, 0x9a, 0xa1, 0x0c, 0x6f, 0xb0, 0x52, 0x2c, 0xa0, 0xc7, 0xae, 0x83, 0xfa, 0x33, 0x0b, 0x12, + 0xff, 0xd1, 0x13, 0x60, 0xbc, 0x96, 0x0c, 0xc6, 0x2b, 0x53, 0x37, 0xa2, 0xba, 0x93, 0xc7, 0x77, + 0xa1, 0x1b, 0x2a, 0x58, 0x42, 0x0f, 0xb5, 0x48, 0x42, 0x86, 0xbb, 0xd6, 0x4f, 0x9a, 0x11, 0x15, + 0x64, 0xf8, 0x04, 0x66, 0x3b, 0xa5, 0x70, 0x3f, 0x24, 0xb6, 0x53, 0xaa, 0x3f, 0x39, 0x3b, 0xb4, + 0xae, 0x75, 0x99, 0x9d, 0x5c, 0xdf, 0xca, 0x6c, 0xe1, 0x9d, 0x96, 0xf7, 0x1e, 0x51, 0x11, 0x5f, + 0x2a, 0xc2, 0xb6, 0x5d, 0x94, 0x1e, 0x1e, 0x54, 0x26, 0xd4, 0x3f, 0x1e, 0x61, 0x2e, 0xa9, 0x62, + 0x5f, 0x87, 0xa9, 0xd4, 0x80, 0xa1, 0x97, 0x61, 0xb8, 0xbd, 0xed, 0x44, 0x24, 0x65, 0x2f, 0x3a, + 0x5c, 0xa7, 0x85, 0x87, 0x07, 0x95, 0x49, 0x55, 0x81, 0x95, 0x60, 0x8e, 0x6d, 0xff, 0x2f, 0x0b, + 0x86, 0xd6, 0x02, 0xf7, 0x24, 0x16, 0xd3, 0xeb, 0xc6, 0x62, 0x3a, 0x97, 0x17, 0x9f, 0x33, 0x77, + 0x1d, 0xad, 0xa4, 0xd6, 0xd1, 0x85, 0x5c, 0x0a, 0xbd, 0x97, 0xd0, 0x2e, 0x8c, 0xb1, 0xa8, 0x9f, + 0xc2, 0x7e, 0xf5, 0x45, 0x43, 0x06, 0xa8, 0xa4, 0x64, 0x80, 0x29, 0x0d, 0x55, 0x93, 0x04, 0x9e, + 0x81, 0x51, 0x61, 0x43, 0x99, 0xb6, 0xfa, 0x17, 0xb8, 0x58, 0xc2, 0xed, 0x1f, 0x2d, 0x82, 0x11, + 0x65, 0x14, 0xfd, 0x8a, 0x05, 0xf3, 0x21, 0x77, 0xa3, 0x74, 0xab, 0x9d, 0xd0, 0xf3, 0xb7, 0x1a, + 0xcd, 0x6d, 0xe2, 0x76, 0x5a, 0x9e, 0xbf, 0x55, 0xdb, 0xf2, 0x03, 0x55, 0xbc, 0x7c, 0x9f, 0x34, + 0x3b, 0xec, 0x21, 0xa4, 0x4f, 0x48, 0x53, 0x65, 0xa3, 0x74, 0xf5, 0xc1, 0x41, 0x65, 0x1e, 0x1f, + 0x89, 0x36, 0x3e, 0x62, 0x5f, 0xd0, 0xd7, 0x2d, 0xb8, 0xc2, 0x83, 0x6f, 0x0e, 0xde, 0xff, 0x1e, + 0x12, 0x53, 0x5d, 0x92, 0x4a, 0x88, 0xac, 0x93, 0x70, 0x77, 0xf1, 0x53, 0x62, 0x40, 0xaf, 0xd4, + 0x8f, 0xd6, 0x16, 0x3e, 0x6a, 0xe7, 0xec, 0x7f, 0x53, 0x84, 0x09, 0xe1, 0xc1, 0x2f, 0x42, 0xc3, + 0xbc, 0x6c, 0x2c, 0x89, 0x27, 0x53, 0x4b, 0x62, 0xc6, 0x40, 0x3e, 0x9e, 0xa8, 0x30, 0x11, 0xcc, + 0xb4, 0x9c, 0x28, 0xbe, 0x4e, 0x9c, 0x30, 0xde, 0x20, 0x0e, 0xb7, 0xdd, 0x29, 0x1e, 0xd9, 0xce, + 0x48, 0xa9, 0x68, 0x6e, 0xa6, 0x89, 0xe1, 0x6e, 0xfa, 0x68, 0x0f, 0x10, 0x33, 0x40, 0x0a, 0x1d, + 0x3f, 0xe2, 0xdf, 0xe2, 0x89, 0x37, 0x83, 0xa3, 0xb5, 0x3a, 0x27, 0x5a, 0x45, 0x37, 0xbb, 0xa8, + 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xb2, 0xe1, 0x41, 0x0d, 0xcb, 0x46, 0xfa, 0xb8, 0xd6, 0xf8, 0x30, + 0xdd, 0x15, 0x84, 0xe1, 0x2d, 0x28, 0x2b, 0x03, 0x40, 0x71, 0xe8, 0xf4, 0x8e, 0x65, 0x92, 0xa6, + 0xc0, 0xd5, 0x28, 0x89, 0xf1, 0x69, 0x42, 0xce, 0xfe, 0xa7, 0x05, 0xa3, 0x41, 0x3e, 0x89, 0x6b, + 0x50, 0x72, 0xa2, 0xc8, 0xdb, 0xf2, 0x89, 0x2b, 0x76, 0xec, 0x47, 0xf3, 0x76, 0xac, 0xd1, 0x0c, + 0x33, 0xc2, 0x5c, 0x10, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73, 0x0b, 0xa9, 0x3d, 0xc9, 0xf3, 0x0f, + 0x46, 0x0d, 0xa4, 0x0d, 0xd5, 0x1e, 0xc1, 0xa2, 0x3e, 0xfa, 0x3c, 0x37, 0x61, 0xbb, 0xe1, 0x07, + 0xf7, 0xfc, 0x6b, 0x41, 0x20, 0xdd, 0xee, 0x06, 0x23, 0x38, 0x23, 0x0d, 0xd7, 0x54, 0x75, 0x6c, + 0x52, 0x1b, 0x2c, 0x50, 0xd1, 0x77, 0xc2, 0x29, 0x4a, 0xda, 0x74, 0x9e, 0x89, 0x10, 0x81, 0x29, + 0x11, 0x1e, 0x42, 0x96, 0x89, 0xb1, 0xcb, 0x64, 0xe7, 0xcd, 0xda, 0x89, 0xd2, 0xef, 0x86, 0x49, + 0x02, 0xa7, 0x69, 0xda, 0x3f, 0x61, 0x01, 0x33, 0xfb, 0x3f, 0x01, 0x96, 0xe1, 0x33, 0x26, 0xcb, + 0x30, 0x9b, 0x37, 0xc8, 0x39, 0xdc, 0xc2, 0x4b, 0x7c, 0x65, 0xd5, 0xc3, 0xe0, 0xfe, 0xbe, 0x30, + 0x1f, 0xe8, 0xcf, 0xc9, 0xda, 0xff, 0xd7, 0xe2, 0x87, 0x98, 0xf2, 0xc4, 0x47, 0xdf, 0x05, 0xa5, + 0xa6, 0xd3, 0x76, 0x9a, 0x3c, 0x24, 0x76, 0xae, 0x56, 0xc7, 0xa8, 0x34, 0xbf, 0x24, 0x6a, 0x70, + 0x2d, 0x85, 0x0c, 0x33, 0x52, 0x92, 0xc5, 0x7d, 0x35, 0x13, 0xaa, 0xc9, 0xb9, 0x1d, 0x98, 0x30, + 0x88, 0x3d, 0x52, 0x91, 0xf6, 0xbb, 0xf8, 0x15, 0xab, 0xc2, 0xe2, 0xec, 0xc2, 0x8c, 0xaf, 0xfd, + 0xa7, 0x17, 0x8a, 0x14, 0x53, 0x3e, 0xda, 0xef, 0x12, 0x65, 0xb7, 0x8f, 0xe6, 0xd6, 0x90, 0x22, + 0x83, 0xbb, 0x29, 0xdb, 0x3f, 0x66, 0xc1, 0xe3, 0x3a, 0xa2, 0x16, 0x24, 0xa1, 0x9f, 0x9e, 0xb8, + 0x0a, 0xa5, 0xa0, 0x4d, 0x42, 0x27, 0x0e, 0x42, 0x71, 0x6b, 0x5c, 0x96, 0x83, 0x7e, 0x4b, 0x94, + 0x1f, 0x8a, 0x80, 0x92, 0x92, 0xba, 0x2c, 0xc7, 0xaa, 0x26, 0x95, 0x63, 0xd8, 0x60, 0x44, 0x22, + 0x80, 0x05, 0x3b, 0x03, 0xd8, 0x93, 0x69, 0x84, 0x05, 0xc4, 0xfe, 0x03, 0x8b, 0x2f, 0x2c, 0xbd, + 0xeb, 0xe8, 0x5d, 0x98, 0xde, 0x75, 0xe2, 0xe6, 0xf6, 0xf2, 0xfd, 0x76, 0xc8, 0xd5, 0xe3, 0x72, + 0x9c, 0x9e, 0xed, 0x37, 0x4e, 0xda, 0x47, 0x26, 0x56, 0x79, 0xab, 0x29, 0x62, 0xb8, 0x8b, 0x3c, + 0xda, 0x80, 0x31, 0x56, 0xc6, 0xcc, 0xbf, 0xa3, 0x5e, 0xac, 0x41, 0x5e, 0x6b, 0xea, 0xd5, 0x79, + 0x35, 0xa1, 0x83, 0x75, 0xa2, 0xf6, 0x97, 0x8a, 0x7c, 0xb7, 0x33, 0x6e, 0xfb, 0x19, 0x18, 0x6d, + 0x07, 0xee, 0x52, 0xad, 0x8a, 0xc5, 0x2c, 0xa8, 0x6b, 0xa4, 0xce, 0x8b, 0xb1, 0x84, 0xa3, 0x57, + 0x01, 0xc8, 0xfd, 0x98, 0x84, 0xbe, 0xd3, 0x52, 0x56, 0x32, 0xca, 0x2e, 0xb4, 0x1a, 0xac, 0x05, + 0xf1, 0xed, 0x88, 0x7c, 0xc7, 0xb2, 0x42, 0xc1, 0x1a, 0x3a, 0xba, 0x0a, 0xd0, 0x0e, 0x83, 0x3d, + 0xcf, 0x65, 0xfe, 0x84, 0x45, 0xd3, 0x86, 0xa4, 0xae, 0x20, 0x58, 0xc3, 0x42, 0xaf, 0xc2, 0x44, + 0xc7, 0x8f, 0x38, 0x87, 0xe2, 0x6c, 0x88, 0x70, 0x8c, 0xa5, 0xc4, 0xba, 0xe1, 0xb6, 0x0e, 0xc4, + 0x26, 0x2e, 0x5a, 0x80, 0x91, 0xd8, 0x61, 0x36, 0x11, 0xc3, 0xf9, 0xc6, 0x9c, 0xeb, 0x14, 0x43, + 0x0f, 0xc8, 0x4c, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x25, 0x9d, 0x33, 0xf8, 0x59, 0x2f, 0xac, 0xa8, + 0x07, 0xbb, 0x17, 0x34, 0xd7, 0x0c, 0x61, 0x9d, 0x6d, 0xd0, 0xb2, 0xbf, 0x5e, 0x06, 0x48, 0xd8, + 0x71, 0xf4, 0x5e, 0xd7, 0x79, 0xf4, 0x5c, 0x6f, 0x06, 0xfe, 0xf8, 0x0e, 0x23, 0xf4, 0x7d, 0x16, + 0x8c, 0x39, 0xad, 0x56, 0xd0, 0x74, 0x62, 0x36, 0xca, 0x85, 0xde, 0xe7, 0xa1, 0x68, 0x7f, 0x21, + 0xa9, 0xc1, 0xbb, 0xf0, 0xa2, 0x5c, 0x78, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0x27, 0xa4, + 0x94, 0xc6, 0x97, 0xc7, 0x5c, 0x5a, 0x4a, 0x2b, 0xb3, 0xa3, 0x5f, 0x13, 0xd0, 0xd0, 0x6d, 0x23, + 0xd2, 0xde, 0x50, 0x7e, 0xd0, 0x09, 0x83, 0x2b, 0xed, 0x17, 0x64, 0x0f, 0xd5, 0x75, 0x6f, 0xb2, + 0xe1, 0xfc, 0xc8, 0x2c, 0x9a, 0xf8, 0xd3, 0xc7, 0x93, 0xec, 0x1d, 0x98, 0x72, 0xcd, 0xbb, 0x5d, + 0xac, 0xa6, 0xa7, 0xf3, 0xe8, 0xa6, 0x58, 0x81, 0xe4, 0x36, 0x4f, 0x01, 0x70, 0x9a, 0x30, 0xaa, + 0x73, 0xbf, 0xbe, 0x9a, 0xbf, 0x19, 0x08, 0x6b, 0x7c, 0x3b, 0x77, 0x2e, 0xf7, 0xa3, 0x98, 0xec, + 0x52, 0xcc, 0xe4, 0xd2, 0x5e, 0x13, 0x75, 0xb1, 0xa2, 0x82, 0xde, 0x80, 0x11, 0xe6, 0x18, 0x1c, + 0xcd, 0x96, 0xf2, 0x95, 0x89, 0x66, 0x4c, 0x8b, 0x64, 0x53, 0xb1, 0xbf, 0x11, 0x16, 0x14, 0xd0, + 0x75, 0x19, 0xf8, 0x26, 0xaa, 0xf9, 0xb7, 0x23, 0xc2, 0x02, 0xdf, 0x94, 0x17, 0x3f, 0x9a, 0xc4, + 0xb4, 0xe1, 0xe5, 0x99, 0xa9, 0x17, 0x8c, 0x9a, 0x94, 0x39, 0x12, 0xff, 0x65, 0x46, 0x87, 0x59, + 0xc8, 0xef, 0x9e, 0x99, 0xf5, 0x21, 0x19, 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x19, 0x4d, + 0xbe, 0x73, 0x85, 0x3d, 0x7f, 0xbf, 0xfd, 0xcf, 0xe5, 0x6b, 0x76, 0xc9, 0xf0, 0x12, 0x2c, 0xea, + 0x9f, 0xe8, 0xad, 0x3f, 0xe7, 0xc3, 0x74, 0x7a, 0x8b, 0x3e, 0x52, 0x2e, 0xe3, 0xf7, 0x86, 0x60, + 0xd2, 0x5c, 0x52, 0xe8, 0x0a, 0x94, 0x05, 0x11, 0x15, 0x85, 0x55, 0xed, 0x92, 0x55, 0x09, 0xc0, + 0x09, 0x0e, 0x0b, 0xbe, 0xcb, 0xaa, 0x6b, 0x76, 0x98, 0x49, 0xf0, 0x5d, 0x05, 0xc1, 0x1a, 0x16, + 0x95, 0x97, 0x36, 0x82, 0x20, 0x56, 0x97, 0x8a, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, + 0x4c, 0x76, 0x48, 0xe8, 0x93, 0x96, 0x19, 0xdc, 0x4d, 0x5d, 0x26, 0x37, 0x74, 0x20, 0x36, 0x71, + 0xe9, 0x2d, 0x19, 0x44, 0x6c, 0x21, 0x0b, 0xa9, 0x2c, 0xb1, 0x6b, 0x6d, 0x70, 0x17, 0x7b, 0x09, + 0x47, 0x9f, 0x83, 0xc7, 0x95, 0x47, 0x3c, 0xe6, 0x8a, 0x6a, 0xd9, 0xe2, 0x88, 0xa1, 0x44, 0x79, + 0x7c, 0x29, 0x1b, 0x0d, 0xe7, 0xd5, 0x47, 0xaf, 0xc3, 0xa4, 0xe0, 0xdc, 0x25, 0xc5, 0x51, 0xd3, + 0x76, 0xe2, 0x86, 0x01, 0xc5, 0x29, 0x6c, 0x19, 0x9e, 0x8e, 0x31, 0xcf, 0x92, 0x42, 0xa9, 0x3b, + 0x3c, 0x9d, 0x0e, 0xc7, 0x5d, 0x35, 0xd0, 0x02, 0x4c, 0x71, 0xd6, 0xca, 0xf3, 0xb7, 0xf8, 0x9c, + 0x08, 0x77, 0x1b, 0xb5, 0xa5, 0x6e, 0x99, 0x60, 0x9c, 0xc6, 0x47, 0xaf, 0xc0, 0xb8, 0x13, 0x36, + 0xb7, 0xbd, 0x98, 0x34, 0xe3, 0x4e, 0xc8, 0xfd, 0x70, 0x34, 0xe3, 0x93, 0x05, 0x0d, 0x86, 0x0d, + 0x4c, 0xfb, 0x3d, 0x38, 0x95, 0xe1, 0xa9, 0x47, 0x17, 0x8e, 0xd3, 0xf6, 0xe4, 0x37, 0xa5, 0x2c, + 0x54, 0x17, 0xea, 0x35, 0xf9, 0x35, 0x1a, 0x16, 0x5d, 0x9d, 0xcc, 0xa3, 0x4f, 0x4b, 0xe0, 0xa2, + 0x56, 0xe7, 0x8a, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0xdf, 0x05, 0x98, 0xca, 0x50, 0xbe, 0xb3, 0x24, + 0x22, 0x29, 0xd9, 0x23, 0xc9, 0x19, 0x62, 0x46, 0x3b, 0x2c, 0x1c, 0x21, 0xda, 0x61, 0xb1, 0x5f, + 0xb4, 0xc3, 0xa1, 0xf7, 0x13, 0xed, 0xd0, 0x1c, 0xb1, 0xe1, 0x81, 0x46, 0x2c, 0x23, 0x42, 0xe2, + 0xc8, 0x11, 0x23, 0x24, 0x1a, 0x83, 0x3e, 0x3a, 0xc0, 0xa0, 0x7f, 0xb5, 0x00, 0xd3, 0x69, 0x23, + 0xb9, 0x13, 0x50, 0xc7, 0xbe, 0x61, 0xa8, 0x63, 0xb3, 0x53, 0xf2, 0xa4, 0x4d, 0xf7, 0xf2, 0x54, + 0xb3, 0x38, 0xa5, 0x9a, 0xfd, 0xf8, 0x40, 0xd4, 0x7a, 0xab, 0x69, 0xff, 0x41, 0x01, 0xce, 0xa4, + 0xab, 0x2c, 0xb5, 0x1c, 0x6f, 0xf7, 0x04, 0xc6, 0xe6, 0x96, 0x31, 0x36, 0xcf, 0x0f, 0xf2, 0x35, + 0xac, 0x6b, 0xb9, 0x03, 0x74, 0x37, 0x35, 0x40, 0x57, 0x06, 0x27, 0xd9, 0x7b, 0x94, 0xbe, 0x51, + 0x84, 0x0b, 0x99, 0xf5, 0x12, 0x6d, 0xe6, 0x8a, 0xa1, 0xcd, 0xbc, 0x9a, 0xd2, 0x66, 0xda, 0xbd, + 0x6b, 0x1f, 0x8f, 0x7a, 0x53, 0xb8, 0x50, 0xb2, 0x88, 0x78, 0x0f, 0xa9, 0xda, 0x34, 0x5c, 0x28, + 0x15, 0x21, 0x6c, 0xd2, 0xfd, 0x8b, 0xa4, 0xd2, 0xfc, 0xf7, 0x16, 0x9c, 0xcd, 0x9c, 0x9b, 0x13, + 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x39, 0x3a, 0xad, 0x5f, 0x1f, 0xca, + 0xf9, 0x16, 0x26, 0xa0, 0xdf, 0x82, 0x31, 0xa7, 0xd9, 0x24, 0x51, 0xb4, 0x1a, 0xb8, 0x2a, 0x42, + 0xdc, 0xf3, 0x4c, 0xce, 0x4a, 0x8a, 0x0f, 0x0f, 0x2a, 0x73, 0x69, 0x12, 0x09, 0x18, 0xeb, 0x14, + 0xcc, 0xa0, 0x96, 0x85, 0x63, 0x0d, 0x6a, 0x79, 0x15, 0x60, 0x4f, 0x71, 0xeb, 0x69, 0x21, 0x5f, + 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x79, 0x28, 0x45, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, 0xc5, 0x01, 0xe7, + 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x57, 0x5f, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x6d, 0x30, 0x1d, 0xf1, + 0x50, 0x30, 0x4b, 0x2d, 0x27, 0x62, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x07, 0xfc, 0x46, 0x0a, 0x86, + 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xb7, 0x86, 0x2f, 0xcc, 0x4b, 0xc9, 0x07, 0x89, 0x14, + 0x66, 0xa7, 0xd3, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0x3e, 0x0f, 0x40, 0x97, 0x8f, 0xd0, 0x25, + 0x8c, 0xe6, 0x1f, 0x9e, 0xf4, 0x54, 0x71, 0x33, 0x2d, 0x3f, 0x99, 0xf3, 0x62, 0x55, 0x11, 0xc1, + 0x1a, 0x41, 0xfb, 0xab, 0x43, 0xf0, 0x44, 0x8f, 0x33, 0x12, 0x2d, 0x98, 0x4f, 0xa0, 0xcf, 0xa6, + 0x85, 0xeb, 0xb9, 0xcc, 0xca, 0x86, 0xb4, 0x9d, 0x5a, 0x8a, 0x85, 0xf7, 0xbd, 0x14, 0x7f, 0xc0, + 0xd2, 0xd4, 0x1e, 0xdc, 0x98, 0xef, 0x33, 0x47, 0x3c, 0xfb, 0x8f, 0x51, 0x0f, 0xb2, 0x99, 0xa1, + 0x4c, 0xb8, 0x3a, 0x70, 0x77, 0x06, 0xd6, 0x2e, 0x9c, 0xac, 0xf2, 0xf7, 0x4b, 0x16, 0x3c, 0x99, + 0xd9, 0x5f, 0xc3, 0x64, 0xe3, 0x0a, 0x94, 0x9b, 0xb4, 0x50, 0xf3, 0x55, 0x4b, 0x9c, 0x78, 0x25, + 0x00, 0x27, 0x38, 0x86, 0x65, 0x46, 0xa1, 0xaf, 0x65, 0xc6, 0xbf, 0xb6, 0xa0, 0x6b, 0x7f, 0x9c, + 0xc0, 0x41, 0x5d, 0x33, 0x0f, 0xea, 0x8f, 0x0e, 0x32, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc1, + 0x63, 0x39, 0xbe, 0x1a, 0x7b, 0x30, 0xb3, 0xd5, 0x24, 0xa6, 0x17, 0xa0, 0xf8, 0x98, 0x4c, 0x87, + 0xc9, 0x9e, 0x2e, 0x83, 0x2c, 0x1f, 0xd1, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xc9, 0x82, + 0xd3, 0xce, 0xbd, 0xa8, 0x2b, 0x81, 0xa9, 0x58, 0x33, 0x2f, 0x65, 0x2a, 0x41, 0xfa, 0x24, 0x3c, + 0xe5, 0x09, 0x9a, 0xb2, 0xb0, 0x70, 0x66, 0x5b, 0x08, 0x8b, 0x98, 0xa1, 0x94, 0x9d, 0xef, 0xe1, + 0xa7, 0x9a, 0xe5, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x22, 0x94, 0xb7, 0xa4, + 0xa7, 0x5b, 0xc6, 0x95, 0x90, 0x0c, 0x64, 0x6f, 0xff, 0x3f, 0xfe, 0x40, 0xa9, 0x90, 0x70, 0x42, + 0x14, 0xbd, 0x0e, 0x45, 0x7f, 0x33, 0xea, 0x95, 0xe3, 0x28, 0x65, 0xd3, 0xc4, 0xbd, 0xc1, 0xd7, + 0x56, 0x1a, 0x98, 0x56, 0x44, 0xd7, 0xa1, 0x18, 0x6e, 0xb8, 0x42, 0x83, 0x97, 0x79, 0x86, 0xe3, + 0xc5, 0x6a, 0x4e, 0xaf, 0x18, 0x25, 0xbc, 0x58, 0xc5, 0x94, 0x04, 0xaa, 0xc3, 0x30, 0x73, 0x70, + 0x10, 0xf7, 0x41, 0x26, 0xe7, 0xdb, 0xc3, 0x51, 0x88, 0xbb, 0x8c, 0x33, 0x04, 0xcc, 0x09, 0xa1, + 0x75, 0x18, 0x69, 0xb2, 0x7c, 0x38, 0x22, 0x60, 0xf5, 0x27, 0x32, 0x75, 0x75, 0x3d, 0x12, 0x05, + 0x09, 0xd5, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x91, 0xc8, 0xdf, 0x96, + 0x4d, 0xb5, 0x47, 0xfe, 0x2b, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x1a, 0x0a, 0x9b, 0x4d, + 0xe1, 0xff, 0x90, 0xa9, 0xb4, 0x33, 0x1d, 0xfa, 0x17, 0x47, 0x1e, 0x1c, 0x54, 0x0a, 0x2b, 0x4b, + 0xb8, 0xb0, 0xd9, 0x44, 0x6b, 0x30, 0xba, 0xc9, 0x5d, 0x80, 0x85, 0x5e, 0xee, 0xe9, 0x6c, 0xef, + 0xe4, 0x2e, 0x2f, 0x61, 0x6e, 0xb7, 0x2f, 0x00, 0x58, 0x12, 0x61, 0x21, 0x38, 0x95, 0x2b, 0xb3, + 0x88, 0x45, 0x3d, 0x7f, 0x34, 0xf7, 0x73, 0x7e, 0x3f, 0x27, 0x0e, 0xd1, 0x58, 0xa3, 0x48, 0x57, + 0xb5, 0x23, 0x93, 0x68, 0x8a, 0x58, 0x1d, 0x99, 0xab, 0xba, 0x4f, 0x7e, 0x51, 0xbe, 0xaa, 0x15, + 0x12, 0x4e, 0x88, 0xa2, 0x1d, 0x98, 0xd8, 0x8b, 0xda, 0xdb, 0x44, 0x6e, 0x69, 0x16, 0xba, 0x23, + 0xe7, 0x0a, 0xbb, 0x23, 0x10, 0xbd, 0x30, 0xee, 0x38, 0xad, 0xae, 0x53, 0x88, 0xbd, 0x6a, 0xdf, + 0xd1, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x77, 0x3b, 0xc1, 0xc6, 0x7e, 0x4c, 0x44, 0xf0, 0xea, + 0xcc, 0xe1, 0x7f, 0x93, 0xa3, 0x74, 0x0f, 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x11, 0xc3, 0xc3, + 0x4e, 0xcf, 0xe9, 0xfc, 0x60, 0x4a, 0x99, 0x59, 0x6c, 0xb5, 0x41, 0x61, 0xa7, 0x65, 0x42, 0x8a, + 0x9d, 0x92, 0xed, 0xed, 0x20, 0x0e, 0xfc, 0xd4, 0x09, 0x3d, 0x93, 0x7f, 0x4a, 0xd6, 0x33, 0xf0, + 0xbb, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x72, 0x61, 0xb2, 0x1d, 0x84, 0xf1, 0xbd, 0x20, + 0x94, 0xeb, 0x0b, 0xf5, 0xd0, 0x2b, 0x18, 0x98, 0xa2, 0x45, 0x16, 0x4c, 0xdd, 0x84, 0xe0, 0x14, + 0x4d, 0xf4, 0x59, 0x18, 0x8d, 0x9a, 0x4e, 0x8b, 0xd4, 0x6e, 0xcd, 0x9e, 0xca, 0xbf, 0x7e, 0x1a, + 0x1c, 0x25, 0x67, 0x75, 0xb1, 0xc9, 0x11, 0x28, 0x58, 0x92, 0x43, 0x2b, 0x30, 0xcc, 0x32, 0x22, + 0xb0, 0xb8, 0xdb, 0x39, 0x31, 0xa1, 0xba, 0x2c, 0x4c, 0xf9, 0xd9, 0xc4, 0x8a, 0x31, 0xaf, 0x4e, + 0xf7, 0x80, 0x60, 0xaf, 0x83, 0x68, 0xf6, 0x4c, 0xfe, 0x1e, 0x10, 0x5c, 0xf9, 0xad, 0x46, 0xaf, + 0x3d, 0xa0, 0x90, 0x70, 0x42, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x7d, 0xac, 0x87, 0x41, 0x4b, 0xee, + 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x8c, 0x76, 0xf3, 0x2c, 0x4c, 0x20, + 0xfb, 0xab, 0x56, 0xd7, 0x5b, 0xdd, 0x27, 0x07, 0xd5, 0x0f, 0x1d, 0x23, 0xb7, 0xfa, 0x25, 0x0b, + 0x1e, 0x6b, 0x67, 0x7e, 0x88, 0x60, 0x00, 0x06, 0x53, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0xf1, 0xb3, + 0xe1, 0x38, 0xa7, 0xa5, 0xb4, 0x44, 0x50, 0x7c, 0xdf, 0x12, 0xc1, 0x2a, 0x94, 0x18, 0x93, 0xd9, + 0x27, 0x3f, 0x5c, 0x5a, 0x30, 0x62, 0xac, 0xc4, 0x92, 0xa8, 0x88, 0x15, 0x09, 0xf4, 0x83, 0x16, + 0x9c, 0x4f, 0x77, 0x1d, 0x13, 0x06, 0x16, 0x91, 0xe4, 0xb9, 0x2c, 0xb8, 0x22, 0xbe, 0xff, 0x7c, + 0xbd, 0x17, 0xf2, 0x61, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0xcd, 0x10, 0x46, 0x47, 0x4c, 0x05, + 0xfc, 0x00, 0x02, 0xe9, 0x4b, 0x30, 0xbe, 0x1b, 0x74, 0xfc, 0x58, 0xd8, 0xbf, 0x08, 0x8f, 0x45, + 0xf6, 0xe0, 0xbc, 0xaa, 0x95, 0x63, 0x03, 0x2b, 0x25, 0xc6, 0x96, 0x1e, 0x5a, 0x8c, 0x7d, 0x3b, + 0x95, 0x50, 0xbe, 0x9c, 0x1f, 0xb1, 0x50, 0x48, 0xfc, 0x47, 0x48, 0x2b, 0x7f, 0xb2, 0xb2, 0xd1, + 0x4f, 0x59, 0x19, 0x4c, 0x3d, 0x97, 0x96, 0x5f, 0x33, 0xa5, 0xe5, 0x4b, 0x69, 0x69, 0xb9, 0x4b, + 0xf9, 0x6a, 0x08, 0xca, 0x83, 0x87, 0xbd, 0x1e, 0x34, 0x8e, 0x9c, 0xdd, 0x82, 0x8b, 0xfd, 0xae, + 0x25, 0x66, 0x08, 0xe5, 0xaa, 0xa7, 0xb6, 0xc4, 0x10, 0xca, 0xad, 0x55, 0x31, 0x83, 0x0c, 0x1a, + 0x68, 0xc4, 0xfe, 0x9f, 0x16, 0x14, 0xeb, 0x81, 0x7b, 0x02, 0xca, 0xe4, 0xcf, 0x18, 0xca, 0xe4, + 0x27, 0x72, 0x12, 0xfd, 0xe7, 0xaa, 0x8e, 0x97, 0x53, 0xaa, 0xe3, 0xf3, 0x79, 0x04, 0x7a, 0x2b, + 0x8a, 0x7f, 0xbc, 0x08, 0x63, 0xf5, 0xc0, 0x55, 0x56, 0xc8, 0xbf, 0xfe, 0x30, 0x56, 0xc8, 0xb9, + 0x61, 0x61, 0x35, 0xca, 0xcc, 0x7e, 0x4a, 0x3a, 0xe1, 0xfd, 0x39, 0x33, 0x46, 0xbe, 0x4b, 0xbc, + 0xad, 0xed, 0x98, 0xb8, 0xe9, 0xcf, 0x39, 0x39, 0x63, 0xe4, 0xff, 0x61, 0xc1, 0x54, 0xaa, 0x75, + 0xd4, 0x82, 0x89, 0x96, 0xae, 0x09, 0x14, 0xeb, 0xf4, 0xa1, 0x94, 0x88, 0xc2, 0x98, 0x53, 0x2b, + 0xc2, 0x26, 0x71, 0x34, 0x0f, 0xa0, 0x5e, 0xea, 0xa4, 0x06, 0x8c, 0x71, 0xfd, 0xea, 0x29, 0x2f, + 0xc2, 0x1a, 0x06, 0x7a, 0x19, 0xc6, 0xe2, 0xa0, 0x1d, 0xb4, 0x82, 0xad, 0xfd, 0x1b, 0x44, 0x86, + 0xb6, 0x51, 0x26, 0x5a, 0xeb, 0x09, 0x08, 0xeb, 0x78, 0xf6, 0x4f, 0x16, 0xf9, 0x87, 0xfa, 0xb1, + 0xf7, 0xcd, 0x35, 0xf9, 0xe1, 0x5e, 0x93, 0xdf, 0xb0, 0x60, 0x9a, 0xb6, 0xce, 0xcc, 0x45, 0xe4, + 0x65, 0xab, 0xd2, 0xef, 0x58, 0x3d, 0xd2, 0xef, 0x5c, 0xa2, 0x67, 0x97, 0x1b, 0x74, 0x62, 0xa1, + 0x41, 0xd3, 0x0e, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x50, 0x3a, 0x1e, + 0x09, 0x43, 0x2c, 0xa0, 0x32, 0x3b, 0xcf, 0x50, 0x4e, 0x76, 0x1e, 0x16, 0xa8, 0x4f, 0x18, 0x16, + 0x08, 0xb6, 0x47, 0x0b, 0xd4, 0x27, 0x2d, 0x0e, 0x12, 0x1c, 0xfb, 0xe7, 0x8a, 0x30, 0x5e, 0x0f, + 0xdc, 0xe4, 0xad, 0xec, 0x25, 0xe3, 0xad, 0xec, 0x62, 0xea, 0xad, 0x6c, 0x5a, 0xc7, 0xfd, 0xe6, + 0xcb, 0xd8, 0x07, 0xf5, 0x32, 0xf6, 0xaf, 0x2c, 0x36, 0x6b, 0xd5, 0xb5, 0x86, 0xc8, 0x0e, 0xfc, + 0x02, 0x8c, 0xb1, 0x03, 0x89, 0x39, 0xdd, 0xc9, 0x07, 0x24, 0x16, 0x78, 0x7f, 0x2d, 0x29, 0xc6, + 0x3a, 0x0e, 0xba, 0x0c, 0xa5, 0x88, 0x38, 0x61, 0x73, 0x5b, 0x9d, 0x71, 0xe2, 0x79, 0x85, 0x97, + 0x61, 0x05, 0x45, 0x6f, 0x26, 0x31, 0xe2, 0x8a, 0xf9, 0x79, 0x6e, 0xf5, 0xfe, 0xf0, 0x2d, 0x92, + 0x1f, 0x18, 0xce, 0xbe, 0x0b, 0xa8, 0x1b, 0x7f, 0x80, 0xe0, 0x48, 0x15, 0x33, 0x38, 0x52, 0xb9, + 0x2b, 0x30, 0xd2, 0x9f, 0x5a, 0x30, 0x59, 0x0f, 0x5c, 0xba, 0x75, 0xff, 0x22, 0xed, 0x53, 0x3d, + 0x40, 0xe6, 0x48, 0x8f, 0x00, 0x99, 0xff, 0xd0, 0x82, 0xd1, 0x7a, 0xe0, 0x9e, 0x80, 0xde, 0xfd, + 0x35, 0x53, 0xef, 0xfe, 0x78, 0xce, 0x92, 0xc8, 0x51, 0xb5, 0xff, 0x42, 0x11, 0x26, 0x68, 0x3f, + 0x83, 0x2d, 0x39, 0x4b, 0xc6, 0x88, 0x58, 0x03, 0x8c, 0x08, 0x65, 0x73, 0x83, 0x56, 0x2b, 0xb8, + 0x97, 0x9e, 0xb1, 0x15, 0x56, 0x8a, 0x05, 0x14, 0x3d, 0x07, 0xa5, 0x76, 0x48, 0xf6, 0xbc, 0x40, + 0xf0, 0x8f, 0xda, 0x2b, 0x46, 0x5d, 0x94, 0x63, 0x85, 0x41, 0xe5, 0xae, 0xc8, 0xf3, 0x9b, 0x44, + 0x26, 0xd9, 0x1e, 0x62, 0x79, 0xb8, 0x78, 0xe4, 0x6b, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x2e, 0x94, + 0xd9, 0x7f, 0x76, 0xa2, 0x1c, 0x3d, 0x6f, 0x90, 0x48, 0x37, 0x21, 0x08, 0xe0, 0x84, 0x16, 0xba, + 0x0a, 0x10, 0xcb, 0xe8, 0xc8, 0x91, 0x88, 0x71, 0xa3, 0x78, 0x6d, 0x15, 0x37, 0x39, 0xc2, 0x1a, + 0x16, 0x7a, 0x16, 0xca, 0xb1, 0xe3, 0xb5, 0x6e, 0x7a, 0x3e, 0x89, 0x98, 0xca, 0xb9, 0x28, 0xb3, + 0x49, 0x88, 0x42, 0x9c, 0xc0, 0x29, 0xaf, 0xc3, 0x1c, 0xc0, 0x79, 0xd6, 0xb1, 0x12, 0xc3, 0x66, + 0xbc, 0xce, 0x4d, 0x55, 0x8a, 0x35, 0x0c, 0xfb, 0x15, 0x38, 0x53, 0x0f, 0xdc, 0x7a, 0x10, 0xc6, + 0x2b, 0x41, 0x78, 0xcf, 0x09, 0x5d, 0x39, 0x7f, 0x15, 0x99, 0xd8, 0x80, 0x9e, 0x3d, 0xc3, 0x7c, + 0x67, 0x1a, 0x29, 0x0b, 0x5e, 0x64, 0xdc, 0xce, 0x11, 0x9d, 0x3a, 0x9a, 0xec, 0xde, 0x55, 0x09, + 0x06, 0xaf, 0x39, 0x31, 0x41, 0xb7, 0x58, 0x52, 0xb2, 0xe4, 0x0a, 0x12, 0xd5, 0x9f, 0xd1, 0x92, + 0x92, 0x25, 0xc0, 0xcc, 0x3b, 0xcb, 0xac, 0x6f, 0xff, 0xcc, 0x10, 0x3b, 0x8d, 0x52, 0xf9, 0xf6, + 0xd0, 0x17, 0x60, 0x32, 0x22, 0x37, 0x3d, 0xbf, 0x73, 0x5f, 0x0a, 0xe1, 0x3d, 0xdc, 0x72, 0x1a, + 0xcb, 0x3a, 0x26, 0x57, 0xe5, 0x99, 0x65, 0x38, 0x45, 0x8d, 0xce, 0x53, 0xd8, 0xf1, 0x17, 0xa2, + 0xdb, 0x11, 0x09, 0x45, 0xbe, 0x37, 0x36, 0x4f, 0x58, 0x16, 0xe2, 0x04, 0x4e, 0xd7, 0x25, 0xfb, + 0xb3, 0x16, 0xf8, 0x38, 0x08, 0x62, 0xb9, 0x92, 0x59, 0xc6, 0x20, 0xad, 0x1c, 0x1b, 0x58, 0x68, + 0x05, 0x50, 0xd4, 0x69, 0xb7, 0x5b, 0xec, 0x61, 0xdf, 0x69, 0x5d, 0x0b, 0x83, 0x4e, 0x9b, 0xbf, + 0x7a, 0x16, 0x79, 0x60, 0xc2, 0x46, 0x17, 0x14, 0x67, 0xd4, 0xa0, 0xa7, 0xcf, 0x66, 0xc4, 0x7e, + 0xb3, 0xd5, 0x5d, 0x14, 0xea, 0xf5, 0x06, 0x2b, 0xc2, 0x12, 0x46, 0x17, 0x13, 0x6b, 0x9e, 0x63, + 0x8e, 0x24, 0x8b, 0x09, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x65, 0x18, 0x8d, 0xf6, 0xa3, 0x66, 0x2c, + 0x22, 0x32, 0xe5, 0x64, 0xee, 0x6c, 0x30, 0x14, 0x2d, 0x9b, 0x04, 0xaf, 0x82, 0x65, 0x5d, 0xb4, + 0x0b, 0x93, 0xf7, 0x3c, 0xdf, 0x0d, 0xee, 0x45, 0x72, 0xa2, 0x4a, 0xf9, 0xaa, 0xd1, 0xbb, 0x1c, + 0x33, 0x35, 0xd9, 0xc6, 0xbc, 0xdd, 0x35, 0x88, 0xe1, 0x14, 0x71, 0xfb, 0xbb, 0xd8, 0xdd, 0xcb, + 0x92, 0x91, 0xc5, 0x9d, 0x90, 0xa0, 0x5d, 0x98, 0x68, 0xb3, 0x15, 0x26, 0x42, 0x65, 0x8b, 0x65, + 0xf2, 0xd2, 0x80, 0x42, 0xf4, 0x3d, 0x7a, 0xae, 0x29, 0x25, 0x17, 0x93, 0x4e, 0xea, 0x3a, 0x39, + 0x6c, 0x52, 0xb7, 0xbf, 0x8a, 0xd8, 0x11, 0xdf, 0xe0, 0x92, 0xf1, 0xa8, 0xb0, 0x64, 0x16, 0x62, + 0xc0, 0x5c, 0xbe, 0x8a, 0x26, 0x19, 0x40, 0x61, 0x0d, 0x8d, 0x65, 0x5d, 0xf4, 0x26, 0x7b, 0x14, + 0xe7, 0xe7, 0x6a, 0xbf, 0x9c, 0xd0, 0x1c, 0xcb, 0x78, 0xff, 0x16, 0x15, 0xb1, 0x46, 0x04, 0xdd, + 0x84, 0x09, 0x91, 0xbb, 0x4a, 0xe8, 0xe0, 0x8a, 0x86, 0x8e, 0x65, 0x02, 0xeb, 0xc0, 0xc3, 0x74, + 0x01, 0x36, 0x2b, 0xa3, 0x2d, 0x38, 0xaf, 0x25, 0x72, 0xbc, 0x16, 0x3a, 0xec, 0xa1, 0xd4, 0x63, + 0x7b, 0x56, 0x3b, 0xa6, 0x9f, 0x7c, 0x70, 0x50, 0x39, 0xbf, 0xde, 0x0b, 0x11, 0xf7, 0xa6, 0x83, + 0x6e, 0xc1, 0x19, 0xee, 0x30, 0x58, 0x25, 0x8e, 0xdb, 0xf2, 0x7c, 0x75, 0x0f, 0xf0, 0x65, 0x7f, + 0xf6, 0xc1, 0x41, 0xe5, 0xcc, 0x42, 0x16, 0x02, 0xce, 0xae, 0x87, 0x5e, 0x83, 0xb2, 0xeb, 0x47, + 0x62, 0x0c, 0x46, 0x8c, 0x1c, 0xa5, 0xe5, 0xea, 0x5a, 0x43, 0x7d, 0x7f, 0xf2, 0x07, 0x27, 0x15, + 0xd0, 0x16, 0xd7, 0xc3, 0x29, 0xb1, 0x77, 0x34, 0x3f, 0x1f, 0xbd, 0x58, 0x12, 0x86, 0xcb, 0x10, + 0x57, 0x40, 0x2b, 0x93, 0x5b, 0xc3, 0x9b, 0xc8, 0x20, 0x8c, 0xde, 0x00, 0x44, 0xf9, 0x42, 0xaf, + 0x49, 0x16, 0x9a, 0x2c, 0x62, 0x39, 0x53, 0x5b, 0x96, 0x0c, 0x17, 0x0d, 0xd4, 0xe8, 0xc2, 0xc0, + 0x19, 0xb5, 0xd0, 0x75, 0x7a, 0x6e, 0xea, 0xa5, 0xc2, 0x74, 0x58, 0xca, 0x12, 0xb3, 0x55, 0xd2, + 0x0e, 0x49, 0xd3, 0x89, 0x89, 0x6b, 0x52, 0xc4, 0xa9, 0x7a, 0xf4, 0xea, 0x56, 0xc9, 0x8b, 0xc0, + 0x8c, 0xd2, 0xd1, 0x9d, 0xc0, 0x88, 0x8a, 0xe1, 0xdb, 0x41, 0x14, 0xaf, 0x91, 0xf8, 0x5e, 0x10, + 0xee, 0x88, 0xa0, 0x68, 0x49, 0x7c, 0xce, 0x04, 0x84, 0x75, 0x3c, 0xca, 0x76, 0xb3, 0x57, 0xe9, + 0x5a, 0x95, 0x3d, 0x08, 0x96, 0x92, 0x7d, 0x72, 0x9d, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xb5, 0xfa, + 0x12, 0x7b, 0xdc, 0x4b, 0xa1, 0xd6, 0xea, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0xff, 0x3a, 0x99, + 0xaf, 0x44, 0xed, 0xbe, 0x7d, 0x06, 0x4c, 0x01, 0xeb, 0xc3, 0xb4, 0xca, 0x3c, 0xcb, 0xa3, 0xc5, + 0x45, 0xb3, 0x53, 0x6c, 0x91, 0x0c, 0x1e, 0x6a, 0x4e, 0xa9, 0xa5, 0x6b, 0x29, 0x4a, 0xb8, 0x8b, + 0xb6, 0x11, 0x37, 0x65, 0xba, 0x6f, 0xf2, 0xa9, 0x2b, 0x50, 0x8e, 0x3a, 0x1b, 0x6e, 0xb0, 0xeb, + 0x78, 0x3e, 0x7b, 0x8b, 0xd3, 0x78, 0xba, 0x86, 0x04, 0xe0, 0x04, 0x07, 0xad, 0x40, 0xc9, 0x91, + 0x3a, 0x67, 0x94, 0x1f, 0x24, 0x41, 0x69, 0x9a, 0xb9, 0xdf, 0xb0, 0xd4, 0x32, 0xab, 0xba, 0xe8, + 0x55, 0x98, 0x10, 0x6e, 0x62, 0x3c, 0x74, 0x04, 0x7b, 0x2b, 0xd3, 0xfc, 0x00, 0x1a, 0x3a, 0x10, + 0x9b, 0xb8, 0xe8, 0xf3, 0x30, 0x49, 0xa9, 0x24, 0x07, 0xdb, 0xec, 0xe9, 0x41, 0x4e, 0x44, 0x2d, + 0xa9, 0x88, 0x5e, 0x19, 0xa7, 0x88, 0x21, 0x17, 0xce, 0x39, 0x9d, 0x38, 0x60, 0x7a, 0x7b, 0x73, + 0xfd, 0xaf, 0x07, 0x3b, 0xc4, 0x67, 0x4f, 0x66, 0xa5, 0xc5, 0x8b, 0x0f, 0x0e, 0x2a, 0xe7, 0x16, + 0x7a, 0xe0, 0xe1, 0x9e, 0x54, 0xd0, 0x6d, 0x18, 0x8b, 0x83, 0x16, 0xb3, 0xc8, 0xa7, 0x17, 0xe2, + 0x63, 0xf9, 0x71, 0x87, 0xd6, 0x15, 0x9a, 0xae, 0xb3, 0x52, 0x55, 0xb1, 0x4e, 0x07, 0xad, 0xf3, + 0x3d, 0xc6, 0x22, 0xb2, 0x92, 0x68, 0xf6, 0xf1, 0xfc, 0x81, 0x51, 0x81, 0x5b, 0xcd, 0x2d, 0x28, + 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x06, 0x33, 0xed, 0xd0, 0x0b, 0xd8, 0xc2, 0x56, 0x6f, 0x26, 0xb3, + 0x66, 0x1e, 0x89, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0xa1, 0x32, 0xad, 0x2c, 0x9c, 0x3d, 0xcb, 0x93, + 0x92, 0x71, 0x3e, 0x9f, 0x97, 0x61, 0x05, 0x45, 0xab, 0xec, 0x5c, 0xe6, 0xd2, 0xe7, 0xec, 0x5c, + 0x7e, 0x70, 0x09, 0x5d, 0x4a, 0xe5, 0xec, 0x99, 0xfa, 0x8b, 0x13, 0x0a, 0xf4, 0xde, 0x88, 0xb6, + 0x9d, 0x90, 0xd4, 0xc3, 0xa0, 0x49, 0x22, 0x2d, 0x08, 0xf4, 0x13, 0x3c, 0x70, 0x24, 0xbd, 0x37, + 0x1a, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x72, 0xb5, 0x5c, 0xdc, 0x94, 0xeb, 0x8d, 0x66, 0xcf, 0xf5, + 0xb0, 0x6f, 0x4a, 0xb1, 0xc8, 0xc9, 0x5a, 0x34, 0x8a, 0x23, 0x9c, 0xa2, 0x89, 0xbe, 0x0d, 0xa6, + 0x45, 0x9c, 0xa5, 0x64, 0xdc, 0xcf, 0x27, 0x86, 0x93, 0x38, 0x05, 0xc3, 0x5d, 0xd8, 0x3c, 0xf4, + 0xb5, 0xb3, 0xd1, 0x22, 0x62, 0x11, 0xde, 0xf4, 0xfc, 0x9d, 0x68, 0xf6, 0x02, 0xfb, 0x6a, 0x11, + 0xfa, 0x3a, 0x0d, 0xc5, 0x19, 0x35, 0xe6, 0xbe, 0x15, 0x66, 0xba, 0x6e, 0xae, 0x23, 0x85, 0x8b, + 0xff, 0x93, 0x61, 0x28, 0xab, 0x37, 0x00, 0x74, 0xc5, 0x7c, 0xda, 0x39, 0x9b, 0x7e, 0xda, 0x29, + 0x51, 0x51, 0x44, 0x7f, 0xcd, 0x59, 0x37, 0xec, 0x02, 0x0b, 0xf9, 0xc9, 0xd9, 0x74, 0x61, 0xa2, + 0xaf, 0x8f, 0xa1, 0xa6, 0xd2, 0x29, 0x0e, 0xfc, 0x46, 0x34, 0xd4, 0x53, 0x4b, 0x34, 0x60, 0x6e, + 0x64, 0xf4, 0x14, 0x95, 0xc7, 0xdc, 0x5a, 0x3d, 0x9d, 0x2c, 0xb4, 0x4e, 0x0b, 0x31, 0x87, 0x31, + 0xb9, 0x95, 0xb2, 0x59, 0x4c, 0x6e, 0x1d, 0x7d, 0x48, 0xb9, 0x55, 0x12, 0xc0, 0x09, 0x2d, 0xd4, + 0x82, 0x99, 0xa6, 0x99, 0xe7, 0x55, 0xf9, 0x15, 0x3e, 0xd5, 0x37, 0xe3, 0x6a, 0x47, 0x4b, 0xaa, + 0xb7, 0x94, 0xa6, 0x82, 0xbb, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x6e, 0x10, 0xb1, 0x45, 0x29, 0x78, + 0x0d, 0xe9, 0x7f, 0x55, 0x7a, 0xf3, 0x56, 0x83, 0x95, 0x1f, 0x1e, 0x54, 0xc6, 0xea, 0x81, 0x2b, + 0xff, 0x62, 0x55, 0x01, 0xdd, 0x87, 0x33, 0xc6, 0x09, 0xad, 0xba, 0x0b, 0x83, 0x77, 0xf7, 0xbc, + 0x68, 0xee, 0x4c, 0x2d, 0x8b, 0x12, 0xce, 0x6e, 0x80, 0x1e, 0x7b, 0x7e, 0x20, 0x72, 0x24, 0x4b, + 0x7e, 0x86, 0xb1, 0x2d, 0x65, 0xdd, 0xfb, 0x3e, 0x85, 0x80, 0xbb, 0xeb, 0xd8, 0xbf, 0xcc, 0x9f, + 0x4c, 0x84, 0x62, 0x95, 0x44, 0x9d, 0xd6, 0x49, 0xa4, 0xe0, 0x5a, 0x36, 0x74, 0xbe, 0x0f, 0xfd, + 0x2c, 0xf7, 0x6b, 0x16, 0x7b, 0x96, 0x5b, 0x27, 0xbb, 0xed, 0x16, 0x15, 0xef, 0x1f, 0x7d, 0xc7, + 0xdf, 0x84, 0x52, 0x2c, 0x5a, 0xeb, 0x95, 0x35, 0x4c, 0xeb, 0x14, 0x7b, 0x9a, 0x54, 0x9c, 0x8e, + 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0xcf, 0xf9, 0x0c, 0x48, 0xc8, 0x09, 0xe8, 0xdf, 0xaa, 0xa6, 0xfe, + 0xad, 0xd2, 0xe7, 0x0b, 0x72, 0xf4, 0x70, 0xff, 0xcc, 0xec, 0x37, 0x13, 0x2a, 0x3f, 0xec, 0xef, + 0xc1, 0xf6, 0x0f, 0x59, 0x70, 0x3a, 0xcb, 0x80, 0x8a, 0x72, 0xa7, 0x5c, 0xa4, 0x55, 0xef, 0xe3, + 0x6a, 0x04, 0xef, 0x88, 0x72, 0xac, 0x30, 0x06, 0x4e, 0xc8, 0x71, 0xb4, 0x00, 0x75, 0xb7, 0x60, + 0xa2, 0x1e, 0x12, 0xed, 0x0e, 0x78, 0x9d, 0x3b, 0xf2, 0xf1, 0xfe, 0x3c, 0x77, 0x64, 0x27, 0x3e, + 0xfb, 0xa7, 0x0b, 0x70, 0x9a, 0x3f, 0x70, 0x2d, 0xec, 0x05, 0x9e, 0x5b, 0x0f, 0x5c, 0x91, 0x4c, + 0xe5, 0x2d, 0x18, 0x6f, 0x6b, 0x7a, 0x88, 0x5e, 0x21, 0xb2, 0x74, 0x7d, 0x45, 0x22, 0x0f, 0xea, + 0xa5, 0xd8, 0xa0, 0x85, 0x5c, 0x18, 0x27, 0x7b, 0x5e, 0x53, 0xbd, 0x92, 0x14, 0x8e, 0x7c, 0x37, + 0xa8, 0x56, 0x96, 0x35, 0x3a, 0xd8, 0xa0, 0xfa, 0x08, 0xf2, 0xeb, 0xd9, 0x3f, 0x6c, 0xc1, 0xe3, + 0x39, 0x01, 0xb5, 0x68, 0x73, 0xf7, 0xd8, 0x53, 0xa2, 0x48, 0xd5, 0xa5, 0x9a, 0xe3, 0x0f, 0x8c, + 0x58, 0x40, 0xd1, 0x67, 0x01, 0xf8, 0x03, 0x21, 0x15, 0x8f, 0xfa, 0x45, 0x1e, 0x32, 0x82, 0xa6, + 0x68, 0xc1, 0x2e, 0x64, 0x7d, 0xac, 0xd1, 0xb2, 0x7f, 0xa2, 0x08, 0xc3, 0xec, 0x41, 0x0a, 0xad, + 0xc0, 0xe8, 0x36, 0x0f, 0x31, 0x3d, 0x48, 0x34, 0xeb, 0x44, 0xce, 0xe4, 0x05, 0x58, 0x56, 0x46, + 0xab, 0x70, 0x8a, 0x87, 0xe8, 0x6e, 0x55, 0x49, 0xcb, 0xd9, 0x97, 0xea, 0x0a, 0x9e, 0xde, 0x4a, + 0x05, 0xee, 0xa8, 0x75, 0xa3, 0xe0, 0xac, 0x7a, 0xe8, 0x75, 0x98, 0xa4, 0xfc, 0x5d, 0xd0, 0x89, + 0x25, 0x25, 0x1e, 0x9c, 0x5b, 0x31, 0x94, 0xeb, 0x06, 0x14, 0xa7, 0xb0, 0xa9, 0xe0, 0xd5, 0xee, + 0x52, 0xcc, 0x0c, 0x27, 0x82, 0x97, 0xa9, 0x8c, 0x31, 0x71, 0x99, 0xe5, 0x54, 0x87, 0xd9, 0x89, + 0xad, 0x6f, 0x87, 0x24, 0xda, 0x0e, 0x5a, 0xae, 0xc8, 0x8e, 0x9e, 0x58, 0x4e, 0xa5, 0xe0, 0xb8, + 0xab, 0x06, 0xa5, 0xb2, 0xe9, 0x78, 0xad, 0x4e, 0x48, 0x12, 0x2a, 0x23, 0x26, 0x95, 0x95, 0x14, + 0x1c, 0x77, 0xd5, 0xa0, 0xeb, 0xe8, 0x8c, 0x48, 0x57, 0x2e, 0xc3, 0x09, 0x28, 0x73, 0xb8, 0x51, + 0xe9, 0x58, 0xd5, 0x23, 0x9e, 0x8e, 0x30, 0x18, 0x52, 0x09, 0xcf, 0x35, 0xf5, 0xa5, 0x70, 0xa9, + 0x92, 0x54, 0x1e, 0x26, 0x69, 0xf6, 0xf7, 0x17, 0xe0, 0x54, 0x86, 0xd9, 0x2d, 0x3f, 0xaa, 0xb6, + 0xbc, 0x28, 0x56, 0x29, 0x7c, 0xb4, 0xa3, 0x8a, 0x97, 0x63, 0x85, 0x41, 0xf7, 0x03, 0x3f, 0x0c, + 0xd3, 0x07, 0xa0, 0x30, 0x6b, 0x13, 0xd0, 0x23, 0x26, 0xc3, 0xb9, 0x08, 0x43, 0x9d, 0x88, 0xc8, + 0x48, 0x58, 0xea, 0xfc, 0x66, 0x0a, 0x6d, 0x06, 0xa1, 0xac, 0xe9, 0x96, 0xd2, 0x25, 0x6b, 0xac, + 0x29, 0x57, 0x10, 0x73, 0x18, 0xed, 0x5c, 0x4c, 0x7c, 0xc7, 0x8f, 0x05, 0x03, 0x9b, 0xc4, 0x6f, + 0x61, 0xa5, 0x58, 0x40, 0xed, 0xaf, 0x14, 0xe1, 0x6c, 0xae, 0x21, 0x3e, 0xed, 0xfa, 0x6e, 0xe0, + 0x7b, 0x71, 0xa0, 0x1e, 0x45, 0x79, 0xcc, 0x16, 0xd2, 0xde, 0x5e, 0x15, 0xe5, 0x58, 0x61, 0xa0, + 0x4b, 0x32, 0xc1, 0x7e, 0x3a, 0x99, 0xd1, 0x62, 0xd5, 0xc8, 0xb1, 0x3f, 0x68, 0xa2, 0xb8, 0xa7, + 0x60, 0xa8, 0x1d, 0x04, 0xad, 0xf4, 0xa1, 0x45, 0xbb, 0x1b, 0x04, 0x2d, 0xcc, 0x80, 0xe8, 0x63, + 0x62, 0xbc, 0x52, 0xaf, 0x80, 0xd8, 0x71, 0x83, 0x48, 0x1b, 0xb4, 0x67, 0x60, 0x74, 0x87, 0xec, + 0x87, 0x9e, 0xbf, 0x95, 0x7e, 0x1d, 0xbe, 0xc1, 0x8b, 0xb1, 0x84, 0x9b, 0xa9, 0x2d, 0x46, 0x8f, + 0x3b, 0xc3, 0x5b, 0xa9, 0xef, 0x15, 0xf8, 0x03, 0x45, 0x98, 0xc2, 0x8b, 0xd5, 0x6f, 0x4e, 0xc4, + 0xed, 0xee, 0x89, 0x38, 0xee, 0x0c, 0x6f, 0xfd, 0x67, 0xe3, 0x17, 0x2c, 0x98, 0x62, 0xe1, 0x9f, + 0x45, 0xa4, 0x10, 0x2f, 0xf0, 0x4f, 0x80, 0xc5, 0x7b, 0x0a, 0x86, 0x43, 0xda, 0x68, 0x3a, 0x8b, + 0x11, 0xeb, 0x09, 0xe6, 0x30, 0x74, 0x0e, 0x86, 0x58, 0x17, 0xe8, 0xe4, 0x8d, 0xf3, 0x04, 0x10, + 0x55, 0x27, 0x76, 0x30, 0x2b, 0x65, 0xee, 0xef, 0x98, 0xb4, 0x5b, 0x1e, 0xef, 0x74, 0xf2, 0x04, + 0xf2, 0xe1, 0x70, 0x7f, 0xcf, 0xec, 0xda, 0xfb, 0x73, 0x7f, 0xcf, 0x26, 0xd9, 0x5b, 0x7c, 0xfa, + 0xc3, 0x02, 0x5c, 0xc8, 0xac, 0x37, 0xb0, 0xfb, 0x7b, 0xef, 0xda, 0xc7, 0x63, 0xe4, 0x93, 0x6d, + 0x7b, 0x53, 0x3c, 0x41, 0xdb, 0x9b, 0xa1, 0x41, 0x39, 0xcc, 0xe1, 0x01, 0xbc, 0xd2, 0x33, 0x87, + 0xec, 0x43, 0xe2, 0x95, 0x9e, 0xd9, 0xb7, 0x1c, 0xf1, 0xef, 0xcf, 0x0a, 0x39, 0xdf, 0xc2, 0x04, + 0xc1, 0xcb, 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xe3, 0xfc, 0x8c, 0xe1, 0x65, 0x58, 0x41, + 0x91, 0xa7, 0xf9, 0x77, 0x17, 0xf2, 0x93, 0x7a, 0xe6, 0x36, 0x35, 0x6f, 0xbe, 0x58, 0xa9, 0x21, + 0xc8, 0xf0, 0xf5, 0x5e, 0xd5, 0x84, 0xf7, 0xe2, 0xe0, 0xc2, 0xfb, 0x78, 0xb6, 0xe0, 0x8e, 0x16, + 0x60, 0x6a, 0xd7, 0xf3, 0xe9, 0xb1, 0xb9, 0x6f, 0xb2, 0xac, 0x2a, 0xdc, 0xc9, 0xaa, 0x09, 0xc6, + 0x69, 0xfc, 0xb9, 0x57, 0x61, 0xe2, 0xe1, 0xd5, 0x96, 0xdf, 0x28, 0xc2, 0x13, 0x3d, 0xb6, 0x3d, + 0x3f, 0xeb, 0x8d, 0x39, 0xd0, 0xce, 0xfa, 0xae, 0x79, 0xa8, 0xc3, 0xe9, 0xcd, 0x4e, 0xab, 0xb5, + 0xcf, 0xcc, 0x5b, 0x89, 0x2b, 0x31, 0x04, 0x4f, 0x79, 0x4e, 0xa6, 0xdc, 0x58, 0xc9, 0xc0, 0xc1, + 0x99, 0x35, 0xd1, 0x1b, 0x80, 0x02, 0x91, 0x51, 0xf8, 0x1a, 0xf1, 0xc5, 0x3b, 0x00, 0x1b, 0xf8, + 0x62, 0xb2, 0x19, 0x6f, 0x75, 0x61, 0xe0, 0x8c, 0x5a, 0x54, 0x38, 0xa0, 0xb7, 0xd2, 0xbe, 0xea, + 0x56, 0x4a, 0x38, 0xc0, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x1a, 0xcc, 0x38, 0x7b, 0x8e, 0xc7, 0xc3, + 0x00, 0x4a, 0x02, 0x5c, 0x3a, 0x50, 0xca, 0xb2, 0x85, 0x34, 0x02, 0xee, 0xae, 0x93, 0xf2, 0x00, + 0x1f, 0xc9, 0xf7, 0x00, 0xef, 0x7d, 0x2e, 0xf6, 0xd3, 0xfd, 0xda, 0xff, 0xd5, 0xa2, 0xd7, 0x97, + 0x96, 0xe8, 0x5f, 0x3d, 0xb6, 0xbe, 0xca, 0x0c, 0x58, 0xb8, 0x32, 0x50, 0x73, 0xc6, 0x3e, 0xa3, + 0x19, 0xb0, 0x24, 0x40, 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0x3e, 0x40, 0x06, 0x8b, 0x2f, 0x82, + 0x39, 0x28, 0x0c, 0xf4, 0x39, 0x18, 0x75, 0xbd, 0x3d, 0x2f, 0x0a, 0x42, 0xb1, 0x59, 0x8e, 0xe8, + 0x49, 0x91, 0x9c, 0x83, 0x55, 0x4e, 0x06, 0x4b, 0x7a, 0xf6, 0x0f, 0x14, 0x60, 0x42, 0xb6, 0xf8, + 0x66, 0x27, 0x88, 0x9d, 0x13, 0xb8, 0x96, 0xaf, 0x19, 0xd7, 0xf2, 0xc7, 0x7a, 0x45, 0xb4, 0x60, + 0x5d, 0xca, 0xbd, 0x8e, 0x6f, 0xa5, 0xae, 0xe3, 0xa7, 0xfb, 0x93, 0xea, 0x7d, 0x0d, 0xff, 0x0b, + 0x0b, 0x66, 0x0c, 0xfc, 0x13, 0xb8, 0x0d, 0x56, 0xcc, 0xdb, 0xe0, 0xc9, 0xbe, 0xdf, 0x90, 0x73, + 0x0b, 0x7c, 0x6f, 0x31, 0xd5, 0x77, 0x76, 0xfa, 0xbf, 0x0b, 0x43, 0xdb, 0x4e, 0xe8, 0xf6, 0x8a, + 0x9c, 0xdb, 0x55, 0x69, 0xfe, 0xba, 0x13, 0xba, 0xfc, 0x0c, 0x7f, 0x4e, 0xa5, 0xe5, 0x74, 0x42, + 0xb7, 0xaf, 0xcb, 0x1b, 0x6b, 0x0a, 0xbd, 0x02, 0x23, 0x51, 0x33, 0x68, 0x2b, 0x83, 0xd4, 0x8b, + 0x3c, 0x65, 0x27, 0x2d, 0x39, 0x3c, 0xa8, 0x20, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x0b, + 0x26, 0xd8, 0x2f, 0x65, 0x29, 0x51, 0xcc, 0xcf, 0xd7, 0xd0, 0xd0, 0x11, 0xb9, 0xc1, 0x8d, 0x51, + 0x84, 0x4d, 0x52, 0x73, 0x5b, 0x50, 0x56, 0x9f, 0xf5, 0x48, 0x5d, 0x95, 0xfe, 0x53, 0x11, 0x4e, + 0x65, 0xac, 0x39, 0x14, 0x19, 0x33, 0xf1, 0xc2, 0x80, 0x4b, 0xf5, 0x7d, 0xce, 0x45, 0xc4, 0xa4, + 0x21, 0x57, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x1d, 0x91, 0x74, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, + 0xb1, 0x13, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0x2e, 0xc2, 0xe9, 0xac, + 0x20, 0x3b, 0xe8, 0x3b, 0x53, 0xb9, 0x7b, 0x5e, 0x1a, 0x34, 0x3c, 0x0f, 0x4f, 0xe8, 0x23, 0x52, + 0x6f, 0xcf, 0x9b, 0xd9, 0x7c, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0xf9, 0xb7, 0x86, 0x3c, 0xe7, 0x92, + 0x3c, 0x3e, 0x3e, 0x39, 0x70, 0x07, 0x44, 0xb2, 0xa6, 0x28, 0xe5, 0xdf, 0x2a, 0x8b, 0xfb, 0xfb, + 0xb7, 0xca, 0x96, 0xe7, 0x3c, 0x18, 0xd3, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, + 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x61, 0x0b, 0x52, 0xd6, 0x9f, 0x4a, 0x2d, 0x66, 0xe5, 0xaa, 0xc5, + 0x2e, 0xc2, 0x50, 0x18, 0xb4, 0x48, 0x3a, 0x55, 0x0e, 0x0e, 0x5a, 0x04, 0x33, 0x08, 0xc5, 0x88, + 0x13, 0x65, 0xc7, 0xb8, 0x2e, 0xc8, 0x09, 0x11, 0xed, 0x29, 0x18, 0x6e, 0x91, 0x3d, 0xd2, 0x4a, + 0xc7, 0xa1, 0xbf, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x30, 0x04, 0xe7, 0x7b, 0x7a, 0x88, 0x53, + 0x71, 0x68, 0xcb, 0x89, 0xc9, 0x3d, 0x67, 0x3f, 0x1d, 0x30, 0xfa, 0x1a, 0x2f, 0xc6, 0x12, 0xce, + 0x0c, 0xe2, 0x79, 0x80, 0xc8, 0x94, 0x12, 0x51, 0xc4, 0x85, 0x14, 0x50, 0x53, 0x29, 0x55, 0x3c, + 0x0e, 0xa5, 0xd4, 0x55, 0x80, 0x28, 0x6a, 0x71, 0xfb, 0x02, 0x57, 0x58, 0xda, 0x27, 0x81, 0x44, + 0x1b, 0x37, 0x05, 0x04, 0x6b, 0x58, 0xa8, 0x0a, 0xd3, 0xed, 0x30, 0x88, 0xb9, 0x4e, 0xb6, 0xca, + 0x0d, 0x93, 0x86, 0x4d, 0xe7, 0xdc, 0x7a, 0x0a, 0x8e, 0xbb, 0x6a, 0xa0, 0x97, 0x61, 0x4c, 0x38, + 0xec, 0xd6, 0x83, 0xa0, 0x25, 0xd4, 0x40, 0xca, 0xcc, 0xa5, 0x91, 0x80, 0xb0, 0x8e, 0xa7, 0x55, + 0x63, 0x8a, 0xde, 0xd1, 0xcc, 0x6a, 0x5c, 0xd9, 0xab, 0xe1, 0xa5, 0x02, 0x6e, 0x95, 0x06, 0x0a, + 0xb8, 0x95, 0x28, 0xc6, 0xca, 0x03, 0xbf, 0x6d, 0x41, 0x5f, 0x55, 0xd2, 0xcf, 0x0e, 0xc1, 0x29, + 0xb1, 0x70, 0x1e, 0xf5, 0x72, 0xb9, 0xdd, 0xbd, 0x5c, 0x8e, 0x43, 0x75, 0xf6, 0xcd, 0x35, 0x73, + 0xd2, 0x6b, 0xe6, 0x07, 0x2d, 0x30, 0xd9, 0x2b, 0xf4, 0x97, 0x72, 0x23, 0xee, 0xbf, 0x9c, 0xcb, + 0xae, 0xb9, 0xf2, 0x02, 0x79, 0x9f, 0xb1, 0xf7, 0xed, 0xff, 0x62, 0xc1, 0x93, 0x7d, 0x29, 0xa2, + 0x65, 0x28, 0x33, 0x1e, 0x50, 0x93, 0xce, 0x9e, 0x56, 0x86, 0x8b, 0x12, 0x90, 0xc3, 0x92, 0x26, + 0x35, 0xd1, 0x72, 0x57, 0x6a, 0x83, 0x67, 0x32, 0x52, 0x1b, 0x9c, 0x31, 0x86, 0xe7, 0x21, 0x73, + 0x1b, 0xfc, 0x72, 0x11, 0x46, 0xf8, 0x8a, 0x3f, 0x01, 0x31, 0x6c, 0x45, 0xe8, 0x6d, 0x7b, 0x84, + 0xdc, 0xe2, 0x7d, 0x99, 0xaf, 0x3a, 0xb1, 0xc3, 0xd9, 0x04, 0x75, 0x5b, 0x25, 0x1a, 0x5e, 0x34, + 0x6f, 0xdc, 0x67, 0x73, 0x29, 0xc5, 0x24, 0x70, 0x1a, 0xda, 0xed, 0xf6, 0x05, 0x80, 0x28, 0x0e, + 0x3d, 0x7f, 0x8b, 0xd2, 0x10, 0xc1, 0xdb, 0x3e, 0xde, 0xa3, 0xf5, 0x86, 0x42, 0xe6, 0x7d, 0x48, + 0x76, 0xba, 0x02, 0x60, 0x8d, 0xe2, 0xdc, 0xa7, 0xa0, 0xac, 0x90, 0xfb, 0x69, 0x71, 0xc6, 0x75, + 0xe6, 0xe2, 0x33, 0x30, 0x95, 0x6a, 0xeb, 0x48, 0x4a, 0xa0, 0x5f, 0xb4, 0x60, 0x8a, 0x77, 0x79, + 0xd9, 0xdf, 0x13, 0x67, 0xea, 0x7b, 0x70, 0xba, 0x95, 0x71, 0xb6, 0x89, 0x19, 0x1d, 0xfc, 0x2c, + 0x54, 0x4a, 0x9f, 0x2c, 0x28, 0xce, 0x6c, 0x03, 0x5d, 0xa6, 0xeb, 0x96, 0x9e, 0x5d, 0x4e, 0x4b, + 0x38, 0x57, 0x8d, 0xf3, 0x35, 0xcb, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x65, 0xc1, 0x0c, 0xef, 0xf9, + 0x0d, 0xb2, 0xaf, 0x76, 0xf8, 0x07, 0xd9, 0x77, 0x91, 0x6d, 0xa4, 0x90, 0x93, 0x6d, 0x44, 0xff, + 0xb4, 0x62, 0xcf, 0x4f, 0xfb, 0x69, 0x0b, 0xc4, 0x0a, 0x3c, 0x01, 0x51, 0xfe, 0x5b, 0x4d, 0x51, + 0x7e, 0x2e, 0x7f, 0x51, 0xe7, 0xc8, 0xf0, 0x7f, 0x6a, 0xc1, 0x34, 0x47, 0x48, 0xde, 0x9c, 0x3f, + 0xd0, 0x79, 0x18, 0x24, 0x6d, 0xa0, 0xca, 0x25, 0x9e, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0xf5, 0x9c, + 0x2c, 0x57, 0x6e, 0xa0, 0x23, 0xa4, 0xcc, 0x3c, 0x72, 0xd4, 0x6e, 0xfb, 0x0f, 0x2c, 0x40, 0xbc, + 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, 0xd5, 0xae, 0x8b, 0xe4, 0xa8, 0x51, 0x10, 0xac, 0x61, + 0x1d, 0xcb, 0xf0, 0xa4, 0x0c, 0x07, 0x8a, 0xfd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0xbf, 0x3f, 0x0c, + 0x69, 0xf7, 0x03, 0x74, 0x07, 0xc6, 0x9b, 0x4e, 0xdb, 0xd9, 0xf0, 0x5a, 0x5e, 0xec, 0x91, 0xa8, + 0x97, 0xc5, 0xd1, 0x92, 0x86, 0x27, 0x9e, 0x7a, 0xb5, 0x12, 0x6c, 0xd0, 0x41, 0xf3, 0x00, 0xed, + 0xd0, 0xdb, 0xf3, 0x5a, 0x64, 0x8b, 0x69, 0x1c, 0x98, 0x3b, 0x27, 0x37, 0xa3, 0x91, 0xa5, 0x58, + 0xc3, 0xc8, 0xf0, 0xcc, 0x2b, 0x3e, 0x3a, 0xcf, 0xbc, 0xa1, 0x23, 0x7a, 0xe6, 0x0d, 0x0f, 0xe4, + 0x99, 0x87, 0xe1, 0x31, 0xc9, 0x22, 0xd1, 0xff, 0x2b, 0x5e, 0x8b, 0x08, 0xbe, 0x98, 0x3b, 0x79, + 0xce, 0x3d, 0x38, 0xa8, 0x3c, 0x86, 0x33, 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x59, 0x98, 0x75, 0x5a, + 0xad, 0xe0, 0x9e, 0x1a, 0xb5, 0xe5, 0xa8, 0xe9, 0xb4, 0xb8, 0xc6, 0x7e, 0x94, 0x51, 0x3d, 0xf7, + 0xe0, 0xa0, 0x32, 0xbb, 0x90, 0x83, 0x83, 0x73, 0x6b, 0xa7, 0x1c, 0xfb, 0x4a, 0x7d, 0x1d, 0xfb, + 0x5e, 0x83, 0x72, 0x3b, 0x0c, 0x9a, 0xab, 0x9a, 0xf7, 0xcf, 0x05, 0x96, 0x90, 0x5f, 0x16, 0x1e, + 0x1e, 0x54, 0x26, 0xd4, 0x1f, 0x76, 0xc3, 0x27, 0x15, 0x32, 0xfc, 0xf9, 0xe0, 0x51, 0xfa, 0xf3, + 0xed, 0xc0, 0xa9, 0x06, 0x09, 0x3d, 0x96, 0x59, 0xd4, 0x4d, 0xce, 0x8f, 0x75, 0x28, 0x87, 0xa9, + 0x13, 0x73, 0xa0, 0x30, 0x55, 0x5a, 0xf4, 0x64, 0x79, 0x42, 0x26, 0x84, 0xec, 0x3f, 0xb1, 0x60, + 0x54, 0x18, 0xbe, 0x9f, 0x00, 0xa3, 0xb6, 0x60, 0xe8, 0xcb, 0x2b, 0xd9, 0xb7, 0x0a, 0xeb, 0x4c, + 0xae, 0xa6, 0xbc, 0x96, 0xd2, 0x94, 0x3f, 0xd9, 0x8b, 0x48, 0x6f, 0x1d, 0xf9, 0xdf, 0x2d, 0xc2, + 0xa4, 0xe9, 0xab, 0x72, 0x02, 0x43, 0xb0, 0x06, 0xa3, 0x91, 0x70, 0x8c, 0x2a, 0xe4, 0x1b, 0x74, + 0xa7, 0x27, 0x31, 0xb1, 0xd6, 0x12, 0xae, 0x50, 0x92, 0x48, 0xa6, 0xc7, 0x55, 0xf1, 0x11, 0x7a, + 0x5c, 0xf5, 0x73, 0x17, 0x1a, 0x3a, 0x0e, 0x77, 0x21, 0xfb, 0x6b, 0xec, 0x66, 0xd3, 0xcb, 0x4f, + 0x80, 0xe9, 0xb9, 0x66, 0xde, 0x81, 0x76, 0x8f, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xfc, 0xfc, 0xbc, + 0x05, 0xe7, 0x33, 0xbe, 0x4a, 0xe3, 0x84, 0x9e, 0x83, 0x92, 0xd3, 0x71, 0x3d, 0xb5, 0x97, 0xb5, + 0x57, 0xb3, 0x05, 0x51, 0x8e, 0x15, 0x06, 0x5a, 0x82, 0x19, 0x72, 0xbf, 0xed, 0xf1, 0x67, 0x4b, + 0xdd, 0xa4, 0xb2, 0xc8, 0x43, 0xf7, 0x2e, 0xa7, 0x81, 0xb8, 0x1b, 0x5f, 0x39, 0xb7, 0x17, 0x73, + 0x9d, 0xdb, 0xff, 0xb1, 0x05, 0x63, 0xca, 0x09, 0xe6, 0x91, 0x8f, 0xf6, 0xb7, 0x99, 0xa3, 0xfd, + 0x44, 0x8f, 0xd1, 0xce, 0x19, 0xe6, 0xbf, 0x5f, 0x50, 0xfd, 0xad, 0x07, 0x61, 0x3c, 0x00, 0x87, + 0xf5, 0x0a, 0x94, 0xda, 0x61, 0x10, 0x07, 0xcd, 0xa0, 0x25, 0x18, 0xac, 0x73, 0x49, 0xec, 0x05, + 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, 0x82, 0xa9, 0x49, 0x46, 0x2f, + 0x08, 0x63, 0xcc, 0x20, 0xc8, 0x05, 0x88, 0x9d, 0x70, 0x8b, 0xc4, 0xb4, 0x4c, 0x84, 0x71, 0xc9, + 0x3f, 0x3c, 0x3a, 0xb1, 0xd7, 0x9a, 0xf7, 0xfc, 0x38, 0x8a, 0xc3, 0xf9, 0x9a, 0x1f, 0xdf, 0x0a, + 0xb9, 0xbc, 0xa6, 0x05, 0x53, 0x50, 0xb4, 0xb0, 0x46, 0x57, 0xba, 0xa0, 0xb2, 0x36, 0x86, 0xcd, + 0xf7, 0xf7, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x29, 0x76, 0x95, 0xb0, 0x01, 0x3a, 0x5a, 0x9c, + 0x83, 0xaf, 0x97, 0xd4, 0xd0, 0xb2, 0xc7, 0xb7, 0xaa, 0x1e, 0x4d, 0xa1, 0xf7, 0xc9, 0x4d, 0x1b, + 0xd6, 0xdd, 0x7b, 0x92, 0x90, 0x0b, 0xe8, 0xdb, 0xbb, 0xcc, 0x32, 0x9e, 0xef, 0x73, 0x05, 0x1c, + 0xc1, 0x10, 0x83, 0x85, 0x13, 0x67, 0xc1, 0x96, 0x6b, 0x75, 0xb1, 0xc8, 0xb5, 0x70, 0xe2, 0x02, + 0x80, 0x13, 0x1c, 0x74, 0x45, 0x48, 0xfb, 0x43, 0x46, 0x52, 0x41, 0x29, 0xed, 0xcb, 0xcf, 0xd7, + 0xc4, 0xfd, 0x17, 0x60, 0x4c, 0x25, 0x17, 0xac, 0xf3, 0x1c, 0x6d, 0x22, 0xa8, 0xcd, 0x72, 0x52, + 0x8c, 0x75, 0x1c, 0xb4, 0x0e, 0x53, 0x11, 0x57, 0xf5, 0xa8, 0xd8, 0x85, 0x5c, 0x65, 0xf6, 0x71, + 0x69, 0xce, 0xd1, 0x30, 0xc1, 0x87, 0xac, 0x88, 0x1f, 0x1d, 0xd2, 0x8f, 0x34, 0x4d, 0x02, 0xbd, + 0x0e, 0x93, 0x2d, 0x3d, 0x8d, 0x7f, 0x5d, 0x68, 0xd4, 0x94, 0x55, 0xb4, 0x91, 0xe4, 0xbf, 0x8e, + 0x53, 0xd8, 0x94, 0x31, 0xd3, 0x4b, 0x44, 0xbc, 0x4d, 0xc7, 0xdf, 0x22, 0x91, 0x48, 0x8d, 0xc6, + 0x18, 0xb3, 0x9b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0x7a, 0x05, 0xc6, 0xe5, 0xe7, 0x6b, 0x5e, 0xd2, + 0x89, 0xed, 0xbd, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x07, 0x67, 0xe4, 0xff, 0xf5, 0xd0, 0xd9, 0xdc, + 0xf4, 0x9a, 0xc2, 0x49, 0x9d, 0x3b, 0x20, 0x2d, 0x48, 0x8f, 0xa6, 0xe5, 0x2c, 0xa4, 0xc3, 0x83, + 0xca, 0x45, 0x31, 0x6a, 0x99, 0x70, 0x36, 0x89, 0xd9, 0xf4, 0xd1, 0x2a, 0x9c, 0xda, 0x26, 0x4e, + 0x2b, 0xde, 0x5e, 0xda, 0x26, 0xcd, 0x1d, 0xb9, 0x89, 0x98, 0xef, 0xb5, 0x66, 0xb1, 0x7e, 0xbd, + 0x1b, 0x05, 0x67, 0xd5, 0x43, 0x6f, 0xc3, 0x6c, 0xbb, 0xb3, 0xd1, 0xf2, 0xa2, 0xed, 0xb5, 0x20, + 0x66, 0x16, 0x24, 0x2a, 0x37, 0x9f, 0x70, 0xd2, 0x56, 0x7e, 0xe7, 0xf5, 0x1c, 0x3c, 0x9c, 0x4b, + 0x01, 0xbd, 0x07, 0x67, 0x52, 0x8b, 0x41, 0xb8, 0x8c, 0x4e, 0xe6, 0x47, 0x2f, 0x6e, 0x64, 0x55, + 0x10, 0x2e, 0xa0, 0x59, 0x20, 0x9c, 0xdd, 0xc4, 0xfb, 0xb3, 0x2b, 0x7a, 0x97, 0x56, 0xd6, 0x98, + 0x32, 0xf4, 0x45, 0x18, 0xd7, 0x57, 0x91, 0xb8, 0x60, 0x2e, 0x65, 0xf3, 0x2c, 0xda, 0x6a, 0xe3, + 0x2c, 0x9d, 0x5a, 0x51, 0x3a, 0x0c, 0x1b, 0x14, 0x6d, 0x02, 0xd9, 0xdf, 0x87, 0x6e, 0x42, 0xa9, + 0xd9, 0xf2, 0x88, 0x1f, 0xd7, 0xea, 0xbd, 0x42, 0xa8, 0x2c, 0x09, 0x1c, 0x31, 0x60, 0x22, 0xdc, + 0x2b, 0x2f, 0xc3, 0x8a, 0x82, 0xfd, 0xab, 0x05, 0xa8, 0xf4, 0x89, 0x1d, 0x9c, 0x52, 0x7f, 0x5b, + 0x03, 0xa9, 0xbf, 0x17, 0x64, 0xa6, 0xc1, 0xb5, 0x94, 0x4e, 0x20, 0x95, 0x45, 0x30, 0xd1, 0x0c, + 0xa4, 0xf1, 0x07, 0x36, 0x47, 0xd6, 0x35, 0xe8, 0x43, 0x7d, 0x0d, 0xea, 0x8d, 0x97, 0xb3, 0xe1, + 0xc1, 0x05, 0x91, 0xdc, 0x57, 0x10, 0xfb, 0x6b, 0x05, 0x38, 0xa3, 0x86, 0xf0, 0x2f, 0xee, 0xc0, + 0xdd, 0xee, 0x1e, 0xb8, 0x63, 0x78, 0x43, 0xb2, 0x6f, 0xc1, 0x08, 0x0f, 0x41, 0x33, 0x00, 0x03, + 0xf4, 0x94, 0x19, 0xaf, 0x4c, 0x5d, 0xd3, 0x46, 0xcc, 0xb2, 0xbf, 0x66, 0xc1, 0xd4, 0xfa, 0x52, + 0xbd, 0x11, 0x34, 0x77, 0x48, 0xbc, 0xc0, 0x19, 0x56, 0x2c, 0xf8, 0x1f, 0xeb, 0x21, 0xf9, 0x9a, + 0x2c, 0x8e, 0xe9, 0x22, 0x0c, 0x6d, 0x07, 0x51, 0x9c, 0x7e, 0x60, 0xbe, 0x1e, 0x44, 0x31, 0x66, + 0x10, 0xfb, 0xb7, 0x2d, 0x18, 0x66, 0xf9, 0x71, 0xfb, 0x25, 0x6d, 0x1e, 0xe4, 0xbb, 0xd0, 0xcb, + 0x30, 0x42, 0x36, 0x37, 0x49, 0x33, 0x16, 0xb3, 0x2a, 0xbd, 0x64, 0x47, 0x96, 0x59, 0x29, 0xbd, + 0xf4, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x2e, 0x94, 0x63, 0x6f, 0x97, 0x2c, 0xb8, 0xae, + 0x78, 0xa2, 0x7b, 0x08, 0xa7, 0xe4, 0x75, 0x49, 0x00, 0x27, 0xb4, 0xec, 0xaf, 0x14, 0x00, 0x92, + 0xc8, 0x06, 0xfd, 0x3e, 0x71, 0xb1, 0xeb, 0xf1, 0xe6, 0x52, 0xc6, 0xe3, 0x0d, 0x4a, 0x08, 0x66, + 0xbc, 0xdc, 0xa8, 0x61, 0x2a, 0x0e, 0x34, 0x4c, 0x43, 0x47, 0x19, 0xa6, 0x25, 0x98, 0x49, 0x22, + 0x33, 0x98, 0x61, 0x6a, 0x98, 0x90, 0xb2, 0x9e, 0x06, 0xe2, 0x6e, 0x7c, 0x9b, 0xc0, 0x45, 0x19, + 0x9f, 0x54, 0xde, 0x35, 0xcc, 0x02, 0xf4, 0x08, 0xf9, 0xbb, 0x93, 0xd7, 0xa9, 0x42, 0xee, 0xeb, + 0xd4, 0x8f, 0x59, 0x70, 0x3a, 0xdd, 0x0e, 0x73, 0xc9, 0xfb, 0xb2, 0x05, 0x67, 0xd8, 0x1b, 0x1d, + 0x6b, 0xb5, 0xfb, 0x45, 0xf0, 0xa5, 0xec, 0x88, 0x15, 0xbd, 0x7b, 0x9c, 0xb8, 0x63, 0xaf, 0x66, + 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0x5f, 0xb6, 0xe0, 0x6c, 0x6e, 0x5a, 0x26, 0x74, 0x19, 0x4a, 0x4e, + 0xdb, 0xe3, 0x0a, 0x30, 0xb1, 0xdf, 0x99, 0xf4, 0x58, 0xaf, 0x71, 0xf5, 0x97, 0x82, 0xaa, 0x74, + 0x91, 0x85, 0xdc, 0x74, 0x91, 0x7d, 0xb3, 0x3f, 0xda, 0xdf, 0x67, 0x81, 0xf0, 0xc2, 0x1a, 0xe0, + 0x90, 0x79, 0x4b, 0x66, 0xdb, 0x35, 0x42, 0xc3, 0x5f, 0xcc, 0x77, 0x4b, 0x13, 0x01, 0xe1, 0xd5, + 0xa5, 0x6e, 0x84, 0x81, 0x37, 0x68, 0xd9, 0x2e, 0x08, 0x68, 0x95, 0x30, 0x9d, 0x55, 0xff, 0xde, + 0x5c, 0x05, 0x70, 0x19, 0xae, 0x96, 0x73, 0x53, 0x5d, 0x21, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0xfd, + 0x1f, 0x0a, 0x30, 0x26, 0x43, 0x91, 0x77, 0xfc, 0x41, 0x24, 0xcb, 0x23, 0xe5, 0x26, 0x62, 0x49, + 0x6a, 0x29, 0xe1, 0x7a, 0x22, 0x90, 0x27, 0x49, 0x6a, 0x25, 0x00, 0x27, 0x38, 0xe8, 0x19, 0x18, + 0x8d, 0x3a, 0x1b, 0x0c, 0x3d, 0xe5, 0x33, 0xd4, 0xe0, 0xc5, 0x58, 0xc2, 0xd1, 0x67, 0x61, 0x9a, + 0xd7, 0x0b, 0x83, 0xb6, 0xb3, 0xc5, 0xb5, 0xad, 0xc3, 0xca, 0xd9, 0x77, 0x7a, 0x35, 0x05, 0x3b, + 0x3c, 0xa8, 0x9c, 0x4e, 0x97, 0x31, 0x3d, 0x7d, 0x17, 0x15, 0xf6, 0xf6, 0xcf, 0x1b, 0xa1, 0xcb, + 0xb4, 0xcb, 0x64, 0x20, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0x22, 0xa0, 0xee, 0xa0, 0xec, 0xe8, 0x0d, + 0x6e, 0xf0, 0xe5, 0x85, 0xc4, 0xed, 0xa5, 0xb7, 0xd7, 0x5d, 0x5a, 0xa5, 0xb9, 0x3f, 0xaf, 0x85, + 0x55, 0x7d, 0xfb, 0x6f, 0x14, 0x61, 0x3a, 0xed, 0xe0, 0x88, 0xae, 0xc3, 0x08, 0xbf, 0x23, 0x05, + 0xf9, 0x1e, 0xcf, 0xc2, 0x9a, 0x5b, 0x24, 0x3b, 0x2d, 0xc4, 0x35, 0x2b, 0xea, 0xa3, 0xb7, 0x61, + 0xcc, 0x0d, 0xee, 0xf9, 0xf7, 0x9c, 0xd0, 0x5d, 0xa8, 0xd7, 0xc4, 0x72, 0xce, 0x64, 0xb5, 0xab, + 0x09, 0x9a, 0xee, 0x6a, 0xc9, 0x9e, 0x40, 0x12, 0x10, 0xd6, 0xc9, 0xa1, 0x75, 0x16, 0x68, 0x72, + 0xd3, 0xdb, 0x5a, 0x75, 0xda, 0xbd, 0xac, 0x7f, 0x97, 0x24, 0x92, 0x46, 0x79, 0x42, 0x44, 0xa3, + 0xe4, 0x00, 0x9c, 0x10, 0x42, 0xdf, 0x09, 0xa7, 0xa2, 0x1c, 0xed, 0x5c, 0x5e, 0x8e, 0x8e, 0x5e, + 0x0a, 0xab, 0xc5, 0xc7, 0xa9, 0x10, 0x94, 0xa5, 0xc7, 0xcb, 0x6a, 0xc6, 0xfe, 0xb5, 0x53, 0x60, + 0x6c, 0x62, 0x23, 0x65, 0x93, 0x75, 0x4c, 0x29, 0x9b, 0x30, 0x94, 0xc8, 0x6e, 0x3b, 0xde, 0xaf, + 0x7a, 0x61, 0xaf, 0x94, 0x82, 0xcb, 0x02, 0xa7, 0x9b, 0xa6, 0x84, 0x60, 0x45, 0x27, 0x3b, 0xaf, + 0x56, 0xf1, 0x03, 0xcc, 0xab, 0x35, 0x74, 0x82, 0x79, 0xb5, 0xd6, 0x60, 0x74, 0xcb, 0x8b, 0x31, + 0x69, 0x07, 0x82, 0x3b, 0xcd, 0x5c, 0x87, 0xd7, 0x38, 0x4a, 0x77, 0x06, 0x17, 0x01, 0xc0, 0x92, + 0x08, 0x7a, 0x43, 0xed, 0xc0, 0x91, 0x7c, 0xe1, 0xae, 0xfb, 0xfd, 0x32, 0x73, 0x0f, 0x8a, 0xec, + 0x59, 0xa3, 0x0f, 0x9b, 0x3d, 0x6b, 0x45, 0xe6, 0xbc, 0x2a, 0xe5, 0x9b, 0xea, 0xb3, 0x94, 0x56, + 0x7d, 0x32, 0x5d, 0xdd, 0xd1, 0xf3, 0x84, 0x95, 0xf3, 0x4f, 0x02, 0x95, 0x02, 0x6c, 0xc0, 0xec, + 0x60, 0xdf, 0x67, 0xc1, 0x99, 0x76, 0x56, 0xca, 0x3c, 0xf1, 0xd6, 0xf4, 0xf2, 0xc0, 0x39, 0x01, + 0x8d, 0x06, 0x99, 0x94, 0x9f, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0x6e, 0xb8, 0x22, 0xbd, + 0xd5, 0x53, 0x39, 0x69, 0xc6, 0x7a, 0x24, 0x17, 0x5b, 0xcf, 0x48, 0x69, 0xf5, 0xd1, 0xbc, 0x94, + 0x56, 0x03, 0x27, 0xb2, 0x7a, 0x43, 0x25, 0x18, 0x9b, 0xc8, 0x5f, 0x4a, 0x3c, 0x7d, 0x58, 0xdf, + 0xb4, 0x62, 0x6f, 0xa8, 0xb4, 0x62, 0x3d, 0x22, 0xe0, 0xf1, 0xa4, 0x61, 0x7d, 0x93, 0x89, 0x69, + 0x09, 0xc1, 0xa6, 0x8e, 0x27, 0x21, 0x98, 0x71, 0xd5, 0xf0, 0x9c, 0x54, 0xcf, 0xf6, 0xb9, 0x6a, + 0x0c, 0xba, 0xbd, 0x2f, 0x1b, 0x9e, 0xfc, 0x6c, 0xe6, 0xa1, 0x92, 0x9f, 0xdd, 0xd1, 0x93, 0x89, + 0xa1, 0x3e, 0xd9, 0xb2, 0x28, 0xd2, 0x80, 0x29, 0xc4, 0xee, 0xe8, 0x17, 0xe0, 0xa9, 0x7c, 0xba, + 0xea, 0x9e, 0xeb, 0xa6, 0x9b, 0x79, 0x05, 0x76, 0xa5, 0x26, 0x3b, 0x7d, 0x32, 0xa9, 0xc9, 0xce, + 0x1c, 0x7b, 0x6a, 0xb2, 0xc7, 0x4e, 0x20, 0x35, 0xd9, 0xe3, 0x1f, 0x68, 0x6a, 0xb2, 0xd9, 0x47, + 0x90, 0x9a, 0x6c, 0x2d, 0x49, 0x4d, 0x76, 0x36, 0x7f, 0x4a, 0x32, 0xec, 0x87, 0x73, 0x12, 0x92, + 0xdd, 0x61, 0x46, 0x04, 0x3c, 0x02, 0x87, 0x08, 0xd1, 0x97, 0x9d, 0x86, 0x39, 0x2b, 0x4c, 0x07, + 0x9f, 0x12, 0x05, 0xc2, 0x09, 0x29, 0x4a, 0x37, 0x49, 0x50, 0xf6, 0x44, 0x0f, 0x3d, 0x6e, 0x96, + 0x86, 0xac, 0x47, 0x5a, 0xb2, 0xd7, 0x79, 0x5a, 0xb2, 0x73, 0xf9, 0x27, 0x79, 0xfa, 0xba, 0x33, + 0x93, 0x91, 0x7d, 0x7f, 0x01, 0x2e, 0xf4, 0xde, 0x17, 0x89, 0x7a, 0xae, 0x9e, 0x3c, 0x27, 0xa5, + 0xd4, 0x73, 0x5c, 0xb6, 0x4a, 0xb0, 0x06, 0x0e, 0x73, 0x74, 0x0d, 0x66, 0x94, 0xe1, 0x71, 0xcb, + 0x6b, 0xee, 0x6b, 0xe9, 0x9d, 0x95, 0x83, 0x65, 0x23, 0x8d, 0x80, 0xbb, 0xeb, 0xa0, 0x05, 0x98, + 0x32, 0x0a, 0x6b, 0x55, 0x21, 0x43, 0x29, 0x7d, 0x60, 0xc3, 0x04, 0xe3, 0x34, 0xbe, 0xfd, 0x53, + 0x16, 0x3c, 0x9e, 0x93, 0xf5, 0x63, 0xe0, 0x28, 0x3e, 0x9b, 0x30, 0xd5, 0x36, 0xab, 0xf6, 0x09, + 0xf6, 0x65, 0xe4, 0x16, 0x51, 0x7d, 0x4d, 0x01, 0x70, 0x9a, 0xa8, 0x5d, 0x81, 0xf3, 0xbd, 0x6d, + 0x50, 0x2e, 0xff, 0xc6, 0xef, 0x5e, 0xf8, 0xc8, 0x6f, 0xfe, 0xee, 0x85, 0x8f, 0xfc, 0xd6, 0xef, + 0x5e, 0xf8, 0xc8, 0x77, 0x3f, 0xb8, 0x60, 0xfd, 0xc6, 0x83, 0x0b, 0xd6, 0x6f, 0x3e, 0xb8, 0x60, + 0xfd, 0xd6, 0x83, 0x0b, 0xd6, 0xef, 0x3c, 0xb8, 0x60, 0x7d, 0xe5, 0xf7, 0x2e, 0x7c, 0xe4, 0xad, + 0xc2, 0xde, 0x0b, 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x04, 0xe1, 0xa5, 0xbc, 0x39, 0xec, 0x00, + 0x00, } diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 020f1183bbd..ff865d5d574 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -218,6 +218,15 @@ message CSIPersistentVolumeSource { // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerExpandSecretRef = 9; } // Represents a source location of a volume to mount, managed by an external CSI driver diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 30125a26144..a1ffef0cb2e 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -1692,6 +1692,15 @@ type CSIPersistentVolumeSource struct { // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` } // Represents a source location of a volume to mount, managed by an external CSI driver diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 8f651d1484e..4a533d18f29 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -126,6 +126,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index 2672a930dd1..2d7741cca81 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -237,6 +237,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } From 121e4741463043eac188bb4eed51f07122262d69 Mon Sep 17 00:00:00 2001 From: Mike Spreitzer Date: Thu, 9 May 2019 01:34:02 -0400 Subject: [PATCH 067/194] Made the comment on SharedInformer give a complete description This comment formerly contained only a contrast with "standard informer", but there is no longer such a thing so the comment lacked much important information. --- .../client-go/tools/cache/shared_informer.go | 80 +++++++++++++++---- 1 file changed, 66 insertions(+), 14 deletions(-) diff --git a/staging/src/k8s.io/client-go/tools/cache/shared_informer.go b/staging/src/k8s.io/client-go/tools/cache/shared_informer.go index 9715d344ecc..d76ade8e612 100644 --- a/staging/src/k8s.io/client-go/tools/cache/shared_informer.go +++ b/staging/src/k8s.io/client-go/tools/cache/shared_informer.go @@ -31,31 +31,83 @@ import ( "k8s.io/klog" ) -// SharedInformer has a shared data cache and is capable of distributing notifications for changes -// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is -// one behavior change compared to a standard Informer. When you receive a notification, the cache -// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend -// on the contents of the cache exactly matching the notification you've received in handler -// functions. If there was a create, followed by a delete, the cache may NOT have your item. This -// has advantages over the broadcaster since it allows us to share a common cache across many -// controllers. Extending the broadcaster would have required us keep duplicate caches for each -// watch. +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace, and name. One SharedInfomer provides linkage to objects +// of a particular API group and kind/resource. The linked object +// collection of a SharedInformer may be further restricted to one +// namespace and/or by label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// A state is either "absent" or present with a ResourceVersion and +// other appropriate content. +// +// A SharedInformer maintains a local cache, exposed by Store(), of +// the state of each relevant object. This cache is eventually +// consistent with the authoritative state. This means that, unless +// prevented by persistent communication problems, if ever a +// particular object ID X is authoritatively associated with a state S +// then for every SharedInformer I whose collection includes (X, S) +// eventually either (1) I's cache associates X with S or a later +// state of X, (2) I is stopped, or (3) the authoritative state +// service for X terminates. To be formally complete, we say that the +// absent state meets any restriction by label selector or field +// selector. +// +// As a simple example, if a collection of objects is henceforeth +// unchanging and a SharedInformer is created that links to that +// collection then that SharedInformer's cache eventually holds an +// exact copy of that collection (unless it is stopped too soon, the +// authoritative state service ends, or communication problems between +// the two persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service emnds, or communication +// problems persistently thwart the desired result). +// +// The keys in Store() are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client, +// eventually either the SharedInformer is stopped or the client is +// notified of the update. These notifications happen after the +// corresponding cache update and, in the case of a +// SharedIndexInformer, after the corresponding index updates. It is +// possible that additional cache and index updates happen before such +// a prescribed notification. For a given SharedInformer and client, +// all notifications are delivered sequentially. For a given +// SharedInformer, client, and object ID, the notifications are +// delivered in order. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. type SharedInformer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. AddEventHandler(handler ResourceEventHandler) - // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is - // no coordination between different handlers. + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer using the specified resync period. The resync + // operation consists of delivering to the handler a create + // notification for every object in the informer's local cache; it + // does not add any interactions with the authoritative storage. AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) - // GetStore returns the Store. + // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController gives back a synthetic interface that "votes" to start the informer GetController() Controller // Run starts the shared informer, which will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) - // HasSynced returns true if the shared informer's store has synced. + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not From beba9921aa1a73431bf322ffa17df6b2aa314b0c Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 9 May 2019 11:25:12 -0700 Subject: [PATCH 068/194] Bump cluster-proportional-autoscaler to 1.6.0 --- .../typha-horizontal-autoscaler-deployment.yaml | 2 +- .../dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml index 85c477a685b..82c5a935db9 100644 --- a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml @@ -24,7 +24,7 @@ spec: supplementalGroups: [ 65534 ] fsGroup: 65534 containers: - - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.6.0 name: autoscaler command: - /cluster-proportional-autoscaler diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index 338756aa1c4..49fd35e76a7 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -85,7 +85,7 @@ spec: fsGroup: 65534 containers: - name: autoscaler - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 + image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.6.0 resources: requests: cpu: "20m" From 2576713a403d2d05a5280dd93d24d3a9d2e8a83c Mon Sep 17 00:00:00 2001 From: Jake Sanders Date: Wed, 8 May 2019 09:21:09 -0700 Subject: [PATCH 069/194] when disabled, don't create the API server's insecure port mapping --- cluster/gce/gci/apiserver_manifest_test.go | 1 + cluster/gce/gci/configure-helper.sh | 8 ++++++++ cluster/gce/manifests/kube-apiserver.manifest | 10 ++++------ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/cluster/gce/gci/apiserver_manifest_test.go b/cluster/gce/gci/apiserver_manifest_test.go index f2b91cde61f..3c7ce508428 100644 --- a/cluster/gce/gci/apiserver_manifest_test.go +++ b/cluster/gce/gci/apiserver_manifest_test.go @@ -49,6 +49,7 @@ readonly APISERVER_SERVER_KEY_PATH=/foo/bar readonly APISERVER_CLIENT_CERT_PATH=/foo/bar readonly CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," readonly CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," +readonly INSECURE_PORT_MAPPING="{ \"name\": \"local\", \"containerPort\": 8080, \"hostPort\": 8080}," readonly DOCKER_REGISTRY="k8s.gcr.io" readonly ENABLE_LEGACY_ABAC=false readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 94cc3b70e0d..12ac4efbec4 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1498,6 +1498,7 @@ function start-etcd-servers { # DOCKER_REGISTRY # FLEXVOLUME_HOSTPATH_MOUNT # FLEXVOLUME_HOSTPATH_VOLUME +# INSECURE_PORT_MAPPING function compute-master-manifest-variables { CLOUD_CONFIG_OPT="" CLOUD_CONFIG_VOLUME="" @@ -1518,6 +1519,11 @@ function compute-master-manifest-variables { FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true}," FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}}," fi + + INSECURE_PORT_MAPPING="" + if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then + INSECURE_PORT_MAPPING="{ \"name\": \"local\", \"containerPort\": 8080, \"hostPort\": 8080}," + fi } # A helper function that bind mounts kubelet dirs for running mount in a chroot @@ -1542,6 +1548,7 @@ function prepare-mounter-rootfs { # CLOUD_CONFIG_VOLUME # CLOUD_CONFIG_MOUNT # DOCKER_REGISTRY +# INSECURE_PORT_MAPPING function start-kube-apiserver { echo "Start kubernetes api-server" prepare-log-file "${KUBE_API_SERVER_LOG_PATH:-/var/log/kube-apiserver.log}" @@ -1858,6 +1865,7 @@ function start-kube-apiserver { sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}" sed -i -e "s@{{secure_port}}@443@g" "${src_file}" + sed -i -e "s@{{insecure_port_mapping}}@${INSECURE_PORT_MAPPING}@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}" diff --git a/cluster/gce/manifests/kube-apiserver.manifest b/cluster/gce/manifests/kube-apiserver.manifest index 636f10f4588..2b93941b8ba 100644 --- a/cluster/gce/manifests/kube-apiserver.manifest +++ b/cluster/gce/manifests/kube-apiserver.manifest @@ -53,13 +53,11 @@ "timeoutSeconds": 15 }, "ports":[ + {{insecure_port_mapping}} { "name": "https", - "containerPort": {{secure_port}}, - "hostPort": {{secure_port}}},{ - "name": "local", - "containerPort": 8080, - "hostPort": 8080} - ], + "containerPort": {{secure_port}}, + "hostPort": {{secure_port}}} + ], "volumeMounts": [ {{kms_socket_mount}} {{encryption_provider_mount}} From 2a7202b71f1dd2030d0ebc3b380f38584d62bc98 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Thu, 9 May 2019 19:11:09 +0000 Subject: [PATCH 070/194] Use framework.ExpectNoError() for e2e/ The e2e test framework has ExpectNoError() for readable test code. This replaces Expect(err).NotTo(HaveOccurred()) with it. --- test/e2e/examples.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 950691b4dbe..5fad6423868 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -34,7 +34,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework/testfiles" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" ) const ( @@ -77,7 +76,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { passed := true checkRestart := func(podName string, timeout time.Duration) { err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) @@ -123,11 +122,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("checking if secret was read correctly") _, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) }) @@ -141,13 +140,13 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("creating the pod") framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("checking if name and namespace were passed correctly") _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) }) }) From 8542b24e6af6e550021c6a6564e392cabc1ac05b Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 29 Apr 2019 14:27:54 +0200 Subject: [PATCH 071/194] apiextensions: add NonStructuralSchema condition controller --- .../pkg/apiserver/apiserver.go | 3 + .../nonstructuralschema_controller.go | 236 ++++++++++++++++++ 2 files changed, 239 insertions(+) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go index 4084910359c..1a1496ec8f6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go @@ -30,6 +30,7 @@ import ( internalinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" "k8s.io/apiextensions-apiserver/pkg/controller/establish" "k8s.io/apiextensions-apiserver/pkg/controller/finalizer" + "k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema" openapicontroller "k8s.io/apiextensions-apiserver/pkg/controller/openapi" "k8s.io/apiextensions-apiserver/pkg/controller/status" apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" @@ -195,6 +196,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) crdController := NewDiscoveryController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), versionDiscoveryHandler, groupDiscoveryHandler) namingController := status.NewNamingConditionController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), crdClient.Apiextensions()) + nonStructuralSchemaController := nonstructuralschema.NewConditionController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), crdClient.Apiextensions()) finalizingController := finalizer.NewCRDFinalizer( s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), crdClient.Apiextensions(), @@ -217,6 +219,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) go crdController.Run(context.StopCh) go namingController.Run(context.StopCh) go establishingController.Run(context.StopCh) + go nonStructuralSchemaController.Run(5, context.StopCh) go finalizingController.Run(5, context.StopCh) return nil }) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go new file mode 100644 index 00000000000..1986cedd2cc --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go @@ -0,0 +1,236 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nonstructuralschema + +import ( + "fmt" + "sort" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" + client "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion" + informers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion" + listers "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion" +) + +// ConditionController is maintaining the NonStructuralSchema condition. +type ConditionController struct { + crdClient client.CustomResourceDefinitionsGetter + + crdLister listers.CustomResourceDefinitionLister + crdSynced cache.InformerSynced + + // To allow injection for testing. + syncFn func(key string) error + + queue workqueue.RateLimitingInterface +} + +// NewConditionController constructs a non-structural schema condition controller. +func NewConditionController( + crdInformer informers.CustomResourceDefinitionInformer, + crdClient client.CustomResourceDefinitionsGetter, +) *ConditionController { + c := &ConditionController{ + crdClient: crdClient, + crdLister: crdInformer.Lister(), + crdSynced: crdInformer.Informer().HasSynced, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "non_structural_schema_condition_controller"), + } + + crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addCustomResourceDefinition, + UpdateFunc: c.updateCustomResourceDefinition, + DeleteFunc: nil, + }) + + c.syncFn = c.sync + + return c +} + +func calculateCondition(in *apiextensions.CustomResourceDefinition) *apiextensions.CustomResourceDefinitionCondition { + cond := &apiextensions.CustomResourceDefinitionCondition{ + Type: apiextensions.NonStructuralSchema, + Status: apiextensions.ConditionUnknown, + } + + allErrs := field.ErrorList{} + + if in.Spec.Validation != nil && in.Spec.Validation.OpenAPIV3Schema != nil { + s, err := schema.NewStructural(in.Spec.Validation.OpenAPIV3Schema) + if err != nil { + cond.Reason = "StructuralError" + cond.Message = fmt.Sprintf("failed to check global validation schema: %v", err) + return cond + } + + pth := field.NewPath("spec", "validation", "openAPIV3Schema") + + allErrs = append(allErrs, schema.ValidateStructural(s, pth)...) + } + + for _, v := range in.Spec.Versions { + if v.Schema == nil || v.Schema.OpenAPIV3Schema == nil { + continue + } + + s, err := schema.NewStructural(v.Schema.OpenAPIV3Schema) + if err != nil { + cond.Reason = "StructuralError" + cond.Message = fmt.Sprintf("failed to check validation schema for version %s: %v", v.Name, err) + return cond + } + + pth := field.NewPath("spec", "version").Key(v.Name).Child("schema", "openAPIV3Schema") + + allErrs = append(allErrs, schema.ValidateStructural(s, pth)...) + } + + if len(allErrs) == 0 { + return nil + } + + // sort error messages. Otherwise, the condition message will change every sync due to + // randomized map iteration. + sort.Slice(allErrs, func(i, j int) bool { + return allErrs[i].Error() < allErrs[j].Error() + }) + + cond.Status = apiextensions.ConditionTrue + cond.Reason = "Violations" + cond.Message = allErrs.ToAggregate().Error() + + return cond +} + +func (c *ConditionController) sync(key string) error { + inCustomResourceDefinition, err := c.crdLister.Get(key) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + // check old condition + cond := calculateCondition(inCustomResourceDefinition) + old := apiextensions.FindCRDCondition(inCustomResourceDefinition, apiextensions.NonStructuralSchema) + + if cond == nil && old == nil { + return nil + } + if cond != nil && old != nil && old.Status == cond.Status && old.Reason == cond.Reason && old.Message == cond.Message { + return nil + } + + // update condition + crd := inCustomResourceDefinition.DeepCopy() + if cond == nil { + apiextensions.RemoveCRDCondition(crd, apiextensions.NonStructuralSchema) + } else { + cond.LastTransitionTime = metav1.NewTime(time.Now()) + apiextensions.SetCRDCondition(crd, *cond) + } + + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { + // deleted or changed in the meantime, we'll get called again + return nil + } + if err != nil { + return err + } + + return nil +} + +// Run starts the controller. +func (c *ConditionController) Run(threadiness int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting NonStructuralSchemaConditionController") + defer klog.Infof("Shutting down NonStructuralSchemaConditionController") + + if !cache.WaitForCacheSync(stopCh, c.crdSynced) { + return + } + + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *ConditionController) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit. +func (c *ConditionController) processNextWorkItem() bool { + key, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(key) + + err := c.syncFn(key.(string)) + if err == nil { + c.queue.Forget(key) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", key, err)) + c.queue.AddRateLimited(key) + + return true +} + +func (c *ConditionController) enqueue(obj *apiextensions.CustomResourceDefinition) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", obj, err)) + return + } + + c.queue.Add(key) +} + +func (c *ConditionController) addCustomResourceDefinition(obj interface{}) { + castObj := obj.(*apiextensions.CustomResourceDefinition) + klog.V(4).Infof("Adding %s", castObj.Name) + c.enqueue(castObj) +} + +func (c *ConditionController) updateCustomResourceDefinition(obj, _ interface{}) { + castObj := obj.(*apiextensions.CustomResourceDefinition) + klog.V(4).Infof("Updating %s", castObj.Name) + c.enqueue(castObj) +} From 9581919eaf0e5bf7e6208645c07ac33a2a15a172 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 9 May 2019 10:43:45 +0200 Subject: [PATCH 072/194] apiextensions: add scructural schema validation tests --- .../test/integration/validation_test.go | 788 ++++++++++++++++++ 1 file changed, 788 insertions(+) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index 0874a1ee572..e1f379763e1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -22,10 +22,12 @@ import ( "testing" "time" + clientschema "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/yaml" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" @@ -539,6 +541,792 @@ func TestForbiddenFieldsInSchema(t *testing.T) { } } +func TestNonStructuralSchemaConditionUpdate(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, apiextensionsfeatures.CustomResourceWebhookConversion, true)() + + tearDown, apiExtensionClient, _, err := fixtures.StartDefaultServerWithClients(t) + if err != nil { + t.Fatal(err) + } + defer tearDown() + + manifest := ` +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: foos.tests.apiextensions.k8s.io +spec: + group: tests.apiextensions.k8s.io + version: v1beta1 + names: + plural: foos + singular: foo + kind: Foo + listKind: Foolist + scope: Namespaced + validation: + openAPIV3Schema: + type: object + properties: + a: {} + versions: + - name: v1beta1 + served: true + storage: true +` + + // decode CRD manifest + obj, _, err := clientschema.Codecs.UniversalDeserializer().Decode([]byte(manifest), nil, nil) + if err != nil { + t.Fatalf("failed decoding of: %v\n\n%s", err, manifest) + } + crd := obj.(*apiextensionsv1beta1.CustomResourceDefinition) + name := crd.Name + + // save schema for later + origSchema := crd.Spec.Validation.OpenAPIV3Schema + + // create CRDs + t.Logf("Creating CRD %s", crd.Name) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + if err != nil { + t.Fatalf("unexpected create error: %v", err) + } + + // wait for condition with violations + t.Log("Waiting for NonStructuralSchema condition") + var cond *apiextensionsv1beta1.CustomResourceDefinitionCondition + err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + cond = findCRDCondition(obj, apiextensionsv1beta1.NonStructuralSchema) + return cond != nil, nil + }) + if err != nil { + t.Fatalf("unexpected error waiting for NonStructuralSchema condition: %v", cond) + } + if v := "spec.validation.openAPIV3Schema.properties[a].type: Required value: must not be empty for specified object fields"; !strings.Contains(cond.Message, v) { + t.Fatalf("expected violation %q, but got: %v", v, cond.Message) + } + + // remove schema + t.Log("Remove schema") + for retry := 0; retry < 5; retry++ { + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected get error: %v", err) + } + crd.Spec.Validation = nil + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + if apierrors.IsConflict(err) { + continue + } + if err != nil { + t.Fatalf("unexpected update error: %v", err) + } + } + if err != nil { + t.Fatalf("unexpected update error: %v", err) + } + + // wait for condition to go away + t.Log("Wait for condition to disappear") + err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + cond = findCRDCondition(obj, apiextensionsv1beta1.NonStructuralSchema) + return cond == nil, nil + }) + if err != nil { + t.Fatalf("unexpected error waiting for NonStructuralSchema condition: %v", cond) + } + + // readd schema + t.Log("Readd schema") + for retry := 0; retry < 5; retry++ { + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected get error: %v", err) + } + crd.Spec.Validation = &apiextensionsv1beta1.CustomResourceValidation{OpenAPIV3Schema: origSchema} + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + if apierrors.IsConflict(err) { + continue + } + if err != nil { + t.Fatalf("unexpected update error: %v", err) + } + } + if err != nil { + t.Fatalf("unexpected update error: %v", err) + } + + // wait for condition with violations + t.Log("Wait for condition to reappear") + err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + cond = findCRDCondition(obj, apiextensionsv1beta1.NonStructuralSchema) + return cond != nil, nil + }) + if err != nil { + t.Fatalf("unexpected error waiting for NonStructuralSchema condition: %v", cond) + } + if v := "spec.validation.openAPIV3Schema.properties[a].type: Required value: must not be empty for specified object fields"; !strings.Contains(cond.Message, v) { + t.Fatalf("expected violation %q, but got: %v", v, cond.Message) + } +} + +func TestNonStructuralSchemaCondition(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, apiextensionsfeatures.CustomResourceWebhookConversion, true)() + + tearDown, apiExtensionClient, _, err := fixtures.StartDefaultServerWithClients(t) + if err != nil { + t.Fatal(err) + } + defer tearDown() + + tmpl := ` +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +spec: + version: v1beta1 + names: + plural: foos + singular: foo + kind: Foo + listKind: Foolist + scope: Namespaced + validation: GLOBAL_SCHEMA + versions: + - name: v1beta1 + served: true + storage: true + schema: V1BETA1_SCHEMA + - name: v1 + served: true + schema: V1_SCHEMA +` + + type Test struct { + desc string + globalSchema, v1Schema, v1beta1Schema string + expectedCreateError bool + expectedViolations []string + unexpectedViolations []string + } + tests := []Test{ + {"empty", "", "", "", false, nil, nil}, + { + desc: "int-or-string and preserve-unknown-fields true", + globalSchema: ` +x-kubernetes-preserve-unknown-fields: true +x-kubernetes-int-or-string: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.x-kubernetes-preserve-unknown-fields: Invalid value: true: must be false if x-kubernetes-int-or-string is true", + }, + }, + { + desc: "int-or-string and embedded-resource true", + globalSchema: ` +type: object +x-kubernetes-embedded-resource: true +x-kubernetes-int-or-string: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.x-kubernetes-embedded-resource: Invalid value: true: must be false if x-kubernetes-int-or-string is true", + }, + }, + { + desc: "embedded-resource without preserve-unknown-fields", + globalSchema: ` +type: object +x-kubernetes-embedded-resource: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties: Required value: must not be empty if x-kubernetes-embedded-resource is true without x-kubernetes-preserve-unknown-fields", + }, + }, + { + desc: "embedded-resource without preserve-unknown-fields, but properties", + globalSchema: ` +type: object +x-kubernetes-embedded-resource: true +properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object +`, + expectedViolations: []string{}, + }, + { + desc: "embedded-resource with preserve-unknown-fields", + globalSchema: ` +type: object +x-kubernetes-embedded-resource: true +x-kubernetes-preserve-unknown-fields: true +`, + expectedViolations: []string{}, + }, + { + desc: "embedded-resource with wrong type", + globalSchema: ` +type: array +x-kubernetes-embedded-resource: true +x-kubernetes-preserve-unknown-fields: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.type: Invalid value: \"array\": must be object if x-kubernetes-embedded-resource is true", + }, + }, + { + desc: "embedded-resource with empty type", + globalSchema: ` +type: "" +x-kubernetes-embedded-resource: true +x-kubernetes-preserve-unknown-fields: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.type: Required value: must be object if x-kubernetes-embedded-resource is true", + }, + }, + { + desc: "no top-level type", + globalSchema: ` +type: "" +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.type: Required value: must not be empty at the root", + }, + }, + { + desc: "non-object top-level type", + globalSchema: ` +type: "integer" +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.type: Invalid value: \"integer\": must be object at the root", + }, + }, + { + desc: "forbidden in nested value validation", + globalSchema: ` +type: object +properties: + foo: + type: string +not: + type: string + additionalProperties: true + title: hello + description: world + nullable: true +allOf: +- properties: + foo: + type: string + additionalProperties: true + title: hello + description: world + nullable: true +anyOf: +- items: + type: string + additionalProperties: true + title: hello + description: world + nullable: true +oneOf: +- properties: + foo: + type: string + additionalProperties: true + title: hello + description: world + nullable: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.anyOf[0].items.type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].items.additionalProperties: Forbidden: must be undefined to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].items.title: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].items.description: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].items.nullable: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[foo].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[foo].additionalProperties: Forbidden: must be undefined to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[foo].title: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[foo].description: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[foo].nullable: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[foo].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[foo].additionalProperties: Forbidden: must be undefined to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[foo].title: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[foo].description: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[foo].nullable: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.not.type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.not.additionalProperties: Forbidden: must be undefined to be structural", + "spec.validation.openAPIV3Schema.not.title: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.not.description: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.not.nullable: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.items: Required value: because it is defined in spec.validation.openAPIV3Schema.anyOf[0].items", + }, + unexpectedViolations: []string{ + "spec.validation.openAPIV3Schema.not.default", + }, + }, + { + desc: "forbidden vendor extensions in nested value validation", + globalSchema: ` +type: object +properties: + int-or-string: + x-kubernetes-int-or-string: true + embedded-resource: + type: object + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true +not: + properties: + int-or-string: + x-kubernetes-int-or-string: true + embedded-resource: + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true +allOf: +- properties: + int-or-string: + x-kubernetes-int-or-string: true + embedded-resource: + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true +anyOf: +- properties: + int-or-string: + x-kubernetes-int-or-string: true + embedded-resource: + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true +oneOf: +- properties: + int-or-string: + x-kubernetes-int-or-string: true + embedded-resource: + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.allOf[0].properties[embedded-resource].x-kubernetes-preserve-unknown-fields: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[embedded-resource].x-kubernetes-embedded-resource: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.allOf[0].properties[int-or-string].x-kubernetes-int-or-string: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].properties[embedded-resource].x-kubernetes-preserve-unknown-fields: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].properties[embedded-resource].x-kubernetes-embedded-resource: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.anyOf[0].properties[int-or-string].x-kubernetes-int-or-string: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[embedded-resource].x-kubernetes-preserve-unknown-fields: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[embedded-resource].x-kubernetes-embedded-resource: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.oneOf[0].properties[int-or-string].x-kubernetes-int-or-string: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.not.properties[embedded-resource].x-kubernetes-preserve-unknown-fields: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.not.properties[embedded-resource].x-kubernetes-embedded-resource: Forbidden: must be false to be structural", + "spec.validation.openAPIV3Schema.not.properties[int-or-string].x-kubernetes-int-or-string: Forbidden: must be false to be structural", + }, + }, + { + desc: "missing types", + globalSchema: ` +properties: + foo: + properties: + a: {} + bar: + items: + additionalProperties: + properties: + a: {} + items: {} + abc: + additionalProperties: + properties: + a: + items: + additionalProperties: + items: + json: + x-kubernetes-preserve-unknown-fields: true + properties: + a: {} + int-or-string: + x-kubernetes-int-or-string: true + properties: + a: {} +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties[foo].properties[a].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[foo].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[int-or-string].properties[a].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[json].properties[a].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[abc].additionalProperties.properties[a].items.additionalProperties.type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[abc].additionalProperties.properties[a].items.type: Required value: must not be empty for specified array items", + "spec.validation.openAPIV3Schema.properties[abc].additionalProperties.properties[a].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[abc].additionalProperties.type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[abc].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[bar].items.additionalProperties.items.type: Required value: must not be empty for specified array items", + "spec.validation.openAPIV3Schema.properties[bar].items.additionalProperties.properties[a].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[bar].items.additionalProperties.type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[bar].items.type: Required value: must not be empty for specified array items", + "spec.validation.openAPIV3Schema.properties[bar].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.type: Required value: must not be empty at the root", + }, + }, + { + desc: "int-or-string variants", + globalSchema: ` +type: object +properties: + a: + x-kubernetes-int-or-string: true + b: + x-kubernetes-int-or-string: true + anyOf: + - type: integer + - type: string + allOf: + - pattern: abc + c: + x-kubernetes-int-or-string: true + allOf: + - anyOf: + - type: integer + - type: string + - pattern: abc + - pattern: abc + d: + x-kubernetes-int-or-string: true + anyOf: + - type: integer + - type: string + pattern: abc + e: + x-kubernetes-int-or-string: true + allOf: + - anyOf: + - type: integer + - type: string + pattern: abc + - pattern: abc + f: + x-kubernetes-int-or-string: true + anyOf: + - type: integer + - type: string + - pattern: abc + g: + x-kubernetes-int-or-string: true + anyOf: + - type: string + - type: integer +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties[d].anyOf[0].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[d].anyOf[1].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[e].allOf[0].anyOf[0].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[e].allOf[0].anyOf[1].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[f].anyOf[0].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[f].anyOf[1].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[g].anyOf[0].type: Forbidden: must be empty to be structural", + "spec.validation.openAPIV3Schema.properties[g].anyOf[1].type: Forbidden: must be empty to be structural", + }, + unexpectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties[a]", + "spec.validation.openAPIV3Schema.properties[b]", + "spec.validation.openAPIV3Schema.properties[c]", + }, + }, + { + desc: "structural incomplete", + globalSchema: ` +type: object +properties: + b: + type: object + properties: + b: + type: array + c: + type: array + items: + type: object + d: + type: array +not: + properties: + a: {} + b: + not: + properties: + a: {} + b: + items: {} + c: + items: + not: + items: + properties: + a: {} + d: + items: {} +allOf: +- properties: + e: {} +anyOf: +- properties: + f: {} +oneOf: +- properties: + g: {} +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties[d].items: Required value: because it is defined in spec.validation.openAPIV3Schema.not.properties[d].items", + "spec.validation.openAPIV3Schema.properties[a]: Required value: because it is defined in spec.validation.openAPIV3Schema.not.properties[a]", + "spec.validation.openAPIV3Schema.properties[b].properties[a]: Required value: because it is defined in spec.validation.openAPIV3Schema.not.properties[b].not.properties[a]", + "spec.validation.openAPIV3Schema.properties[b].properties[b].items: Required value: because it is defined in spec.validation.openAPIV3Schema.not.properties[b].not.properties[b].items", + "spec.validation.openAPIV3Schema.properties[c].items.items: Required value: because it is defined in spec.validation.openAPIV3Schema.not.properties[c].items.not.items", + "spec.validation.openAPIV3Schema.properties[e]: Required value: because it is defined in spec.validation.openAPIV3Schema.allOf[0].properties[e]", + "spec.validation.openAPIV3Schema.properties[f]: Required value: because it is defined in spec.validation.openAPIV3Schema.anyOf[0].properties[f]", + "spec.validation.openAPIV3Schema.properties[g]: Required value: because it is defined in spec.validation.openAPIV3Schema.oneOf[0].properties[g]", + }, + }, + { + desc: "structural complete", + globalSchema: ` +type: object +properties: + a: + type: string + b: + type: object + properties: + a: + type: string + b: + type: array + items: + type: string + c: + type: array + items: + type: array + items: + type: object + properties: + a: + type: string + d: + type: array + items: + type: string + e: + type: string + f: + type: string + g: + type: string +not: + properties: + a: {} + b: + not: + properties: + a: {} + b: + items: {} + c: + items: + not: + items: + properties: + a: {} + d: + items: {} +allOf: +- properties: + e: {} +anyOf: +- properties: + f: {} +oneOf: +- properties: + g: {} +`, + expectedViolations: nil, + }, + { + desc: "invalid v1beta1 schema", + v1beta1Schema: ` +type: object +properties: + a: {} +not: + properties: + b: {} +`, + v1Schema: ` +type: object +properties: + a: + type: string +`, + expectedViolations: []string{ + "spec.version[v1beta1].schema.openAPIV3Schema.properties[a].type: Required value: must not be empty for specified object fields", + "spec.version[v1beta1].schema.openAPIV3Schema.properties[b]: Required value: because it is defined in spec.version[v1beta1].schema.openAPIV3Schema.not.properties[b]", + }, + }, + { + desc: "invalid v1beta1 and v1 schemas", + v1beta1Schema: ` +type: object +properties: + a: {} +not: + properties: + b: {} +`, + v1Schema: ` +type: object +properties: + c: {} +not: + properties: + d: {} +`, + expectedViolations: []string{ + "spec.version[v1beta1].schema.openAPIV3Schema.properties[a].type: Required value: must not be empty for specified object fields", + "spec.version[v1beta1].schema.openAPIV3Schema.properties[b]: Required value: because it is defined in spec.version[v1beta1].schema.openAPIV3Schema.not.properties[b]", + "spec.version[v1].schema.openAPIV3Schema.properties[c].type: Required value: must not be empty for specified object fields", + "spec.version[v1].schema.openAPIV3Schema.properties[d]: Required value: because it is defined in spec.version[v1].schema.openAPIV3Schema.not.properties[d]", + }, + }, + } + + for i := range tests { + tst := tests[i] + t.Run(tst.desc, func(t *testing.T) { + // plug in schemas + manifest := strings.NewReplacer( + "GLOBAL_SCHEMA", toValidationJSON(tst.globalSchema), + "V1BETA1_SCHEMA", toValidationJSON(tst.v1beta1Schema), + "V1_SCHEMA", toValidationJSON(tst.v1Schema), + ).Replace(tmpl) + + // decode CRD manifest + obj, _, err := clientschema.Codecs.UniversalDeserializer().Decode([]byte(manifest), nil, nil) + if err != nil { + t.Fatalf("failed decoding of: %v\n\n%s", err, manifest) + } + crd := obj.(*apiextensionsv1beta1.CustomResourceDefinition) + crd.Spec.Group = fmt.Sprintf("tests-%d.apiextension.k8s.io", i) + crd.Name = fmt.Sprintf("foos.%s", crd.Spec.Group) + + // create CRDs + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + if tst.expectedCreateError && err == nil { + t.Fatalf("expected error, got none") + } else if !tst.expectedCreateError && err != nil { + t.Fatalf("unexpected create error: %v", err) + } + if err != nil { + return + } + + if len(tst.expectedViolations) == 0 { + // wait for condition to not appear + var cond *apiextensionsv1beta1.CustomResourceDefinitionCondition + err := wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + cond = findCRDCondition(obj, apiextensionsv1beta1.NonStructuralSchema) + if cond == nil { + return false, nil + } + return true, nil + }) + if err != wait.ErrWaitTimeout { + t.Fatalf("expected no NonStructuralSchema condition, but got one: %v", cond) + } + return + } + + // wait for condition to appear with the given violations + var cond *apiextensionsv1beta1.CustomResourceDefinitionCondition + err = wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + cond = findCRDCondition(obj, apiextensionsv1beta1.NonStructuralSchema) + if cond != nil { + return true, nil + } + return false, nil + }) + if err != nil { + t.Fatalf("unexpected error waiting for violations in NonStructuralSchema condition: %v", err) + } + + // check that the condition looks good + if cond.Reason != "Violations" { + t.Errorf("expected reason Violations, got: %v", cond.Reason) + } + if cond.Status != apiextensionsv1beta1.ConditionTrue { + t.Errorf("expected reason True, got: %v", cond.Status) + } + + // check that we got all violations + t.Logf("Got violations: %q", cond.Message) + for _, v := range tst.expectedViolations { + if strings.Index(cond.Message, v) == -1 { + t.Errorf("expected violation %q, but didn't get it", v) + } + } + for _, v := range tst.unexpectedViolations { + if strings.Index(cond.Message, v) != -1 { + t.Errorf("unexpected violation %q", v) + } + } + }) + } +} + +// findCRDCondition returns the condition you're looking for or nil. +func findCRDCondition(crd *apiextensionsv1beta1.CustomResourceDefinition, conditionType apiextensionsv1beta1.CustomResourceDefinitionConditionType) *apiextensionsv1beta1.CustomResourceDefinitionCondition { + for i := range crd.Status.Conditions { + if crd.Status.Conditions[i].Type == conditionType { + return &crd.Status.Conditions[i] + } + } + + return nil +} + +func toValidationJSON(yml string) string { + if len(yml) == 0 { + return "null" + } + bs, err := yaml.ToJSON([]byte(yml)) + if err != nil { + panic(err) + } + return fmt.Sprintf("{\"openAPIV3Schema\": %s}", string(bs)) +} + func float64Ptr(f float64) *float64 { return &f } From c3996213e0b3fbb39277fd5a5eaecb3212aa6228 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 9 May 2019 13:11:30 +0200 Subject: [PATCH 073/194] apiextensions: disallow additionalProperties at the root --- .../pkg/apiserver/schema/validation.go | 8 ++++++-- .../test/integration/validation_test.go | 10 ++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go index f0bc9fa62bf..1e59ee18303 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go @@ -55,6 +55,7 @@ const ( // - ... zero or more // // * every specified field or array in s is also specified outside of value validation. +// * additionalProperties at the root is not allowed. func ValidateStructural(s *Structural, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -76,7 +77,7 @@ func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) for k, v := range s.Properties { allErrs = append(allErrs, validateStructuralInvariants(&v, fieldLevel, fldPath.Child("properties").Key(k))...) } - allErrs = append(allErrs, validateGeneric(&s.Generic, fldPath)...) + allErrs = append(allErrs, validateGeneric(&s.Generic, lvl, fldPath)...) allErrs = append(allErrs, validateExtensions(&s.Extensions, fldPath)...) // detect the two IntOrString exceptions: @@ -129,7 +130,7 @@ func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) } // validateGeneric checks the generic fields of a structural schema. -func validateGeneric(g *Generic, fldPath *field.Path) field.ErrorList { +func validateGeneric(g *Generic, lvl level, fldPath *field.Path) field.ErrorList { if g == nil { return nil } @@ -137,6 +138,9 @@ func validateGeneric(g *Generic, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if g.AdditionalProperties != nil { + if lvl == rootLevel { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must not be used at the root")) + } if g.AdditionalProperties.Structural != nil { allErrs = append(allErrs, validateStructuralInvariants(g.AdditionalProperties.Structural, fieldLevel, fldPath.Child("additionalProperties"))...) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index e1f379763e1..59b0f22870f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -1048,6 +1048,16 @@ properties: "spec.validation.openAPIV3Schema.properties[c]", }, }, + { + desc: "forbidden additionalProperties at the root", + globalSchema: ` +type: object +additionalProperties: false +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.additionalProperties: Forbidden: must not be used at the root", + }, + }, { desc: "structural incomplete", globalSchema: ` From c836a2518913c90d266fdd3919fabc55b6893b63 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 29 Apr 2019 14:44:54 +0200 Subject: [PATCH 074/194] Update generated files --- api/openapi-spec/swagger.json | 12 + .../src/k8s.io/apiextensions-apiserver/BUILD | 1 + .../apiextensions/v1beta1/generated.pb.go | 454 +++++++++++------- .../apiextensions/v1beta1/generated.proto | 31 ++ .../v1beta1/zz_generated.conversion.go | 6 + .../pkg/apiserver/BUILD | 2 + .../pkg/apiserver/schema/BUILD | 44 ++ .../apiserver/schema/zz_generated.deepcopy.go | 245 ++++++++++ .../pkg/controller/nonstructuralschema/BUILD | 38 ++ .../test/integration/BUILD | 2 + vendor/modules.txt | 2 + 11 files changed, 661 insertions(+), 176 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/BUILD diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 5fad24c09f2..e5a88859755 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -16808,6 +16808,18 @@ }, "uniqueItems": { "type": "boolean" + }, + "x-kubernetes-embedded-resource": { + "description": "x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).", + "type": "boolean" + }, + "x-kubernetes-int-or-string": { + "description": "x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:\n\n1) anyOf:\n - type: integer\n - type: string\n2) allOf:\n - anyOf:\n - type: integer\n - type: string\n - ... zero or more", + "type": "boolean" + }, + "x-kubernetes-preserve-unknown-fields": { + "description": "x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema.", + "type": "boolean" } }, "type": "object" diff --git a/staging/src/k8s.io/apiextensions-apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/BUILD index a52a7a2b2f0..946698df4d5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/BUILD @@ -47,6 +47,7 @@ filegroup( "//staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer:all-srcs", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/crdserverscheme:all-srcs", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 58cc3b64c05..c928a97d2a6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -1431,6 +1431,36 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0 } i++ + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x2 + i++ + if m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x2 + i++ + if m.XEmbeddedResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x2 + i++ + if m.XIntOrString { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -2080,6 +2110,9 @@ func (m *JSONSchemaProps) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } n += 3 + n += 3 + n += 3 + n += 3 return n } @@ -2478,6 +2511,9 @@ func (this *JSONSchemaProps) String() string { `ExternalDocs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalDocs), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, `Example:` + strings.Replace(fmt.Sprintf("%v", this.Example), "JSON", "JSON", 1) + `,`, `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + fmt.Sprintf("%v", this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, `}`, }, "") return s @@ -6603,6 +6639,66 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } } m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.XPreserveUnknownFields = bool(v != 0) + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7365,180 +7461,186 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2796 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4b, 0x6f, 0x23, 0xc7, - 0xf1, 0xdf, 0x21, 0x45, 0x89, 0x6a, 0x49, 0x2b, 0xa9, 0xd7, 0x5a, 0xcf, 0xea, 0x2f, 0x93, 0x12, - 0xfd, 0xb7, 0xa1, 0x38, 0xbb, 0x94, 0xbd, 0xb1, 0x63, 0xc7, 0x40, 0x0e, 0x22, 0x25, 0x1b, 0x72, - 0x56, 0x8f, 0x34, 0x77, 0x6d, 0x27, 0x7e, 0xb6, 0x86, 0x4d, 0x6a, 0x56, 0xf3, 0xda, 0xe9, 0x19, - 0x4a, 0x82, 0x93, 0x20, 0x0f, 0x18, 0x09, 0x82, 0x24, 0x0e, 0xe2, 0xbd, 0x04, 0x49, 0x0e, 0x4e, - 0x90, 0x4b, 0x0e, 0xc9, 0x21, 0xb9, 0x25, 0x1f, 0x60, 0x8f, 0x46, 0x4e, 0x46, 0x10, 0x10, 0x59, - 0xfa, 0x2b, 0x04, 0x08, 0xa0, 0x53, 0xd0, 0x8f, 0xe9, 0x19, 0x0e, 0xc9, 0x5d, 0xc1, 0x4b, 0x7a, - 0x73, 0xd3, 0x54, 0x55, 0xd7, 0xaf, 0xba, 0xba, 0xaa, 0xba, 0xba, 0x28, 0xd0, 0x38, 0x7c, 0x81, - 0x96, 0x4d, 0x77, 0xed, 0x30, 0xdc, 0x27, 0xbe, 0x43, 0x02, 0x42, 0xd7, 0x5a, 0xc4, 0xa9, 0xbb, - 0xfe, 0x9a, 0x64, 0x60, 0xcf, 0x24, 0xc7, 0x01, 0x71, 0xa8, 0xe9, 0x3a, 0xf4, 0x0a, 0xf6, 0x4c, - 0x4a, 0xfc, 0x16, 0xf1, 0xd7, 0xbc, 0xc3, 0x26, 0xe3, 0xd1, 0x6e, 0x81, 0xb5, 0xd6, 0x33, 0xfb, - 0x24, 0xc0, 0xcf, 0xac, 0x35, 0x89, 0x43, 0x7c, 0x1c, 0x90, 0x7a, 0xd9, 0xf3, 0xdd, 0xc0, 0x85, - 0x5f, 0x15, 0xea, 0xca, 0x5d, 0xd2, 0xef, 0x28, 0x75, 0x65, 0xef, 0xb0, 0xc9, 0x78, 0xb4, 0x5b, - 0xa0, 0x2c, 0xd5, 0x2d, 0x5e, 0x69, 0x9a, 0xc1, 0x41, 0xb8, 0x5f, 0x36, 0x5c, 0x7b, 0xad, 0xe9, - 0x36, 0xdd, 0x35, 0xae, 0x75, 0x3f, 0x6c, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0xcf, - 0xc6, 0xc6, 0xdb, 0xd8, 0x38, 0x30, 0x1d, 0xe2, 0x9f, 0xc4, 0x16, 0xdb, 0x24, 0xc0, 0x6b, 0xad, - 0x1e, 0x1b, 0x17, 0xd7, 0x06, 0xad, 0xf2, 0x43, 0x27, 0x30, 0x6d, 0xd2, 0xb3, 0xe0, 0xcb, 0xf7, - 0x5b, 0x40, 0x8d, 0x03, 0x62, 0xe3, 0xf4, 0xba, 0xd2, 0xa9, 0x06, 0xe6, 0xab, 0xae, 0xd3, 0x22, - 0x3e, 0xdb, 0x25, 0x22, 0xb7, 0x42, 0x42, 0x03, 0x58, 0x01, 0xd9, 0xd0, 0xac, 0xeb, 0xda, 0xb2, - 0xb6, 0x3a, 0x59, 0x79, 0xfa, 0x4e, 0xbb, 0x78, 0xae, 0xd3, 0x2e, 0x66, 0x6f, 0x6c, 0x6d, 0x9c, - 0xb6, 0x8b, 0x2b, 0x83, 0x90, 0x82, 0x13, 0x8f, 0xd0, 0xf2, 0x8d, 0xad, 0x0d, 0xc4, 0x16, 0xc3, - 0x97, 0xc1, 0x7c, 0x9d, 0x50, 0xd3, 0x27, 0xf5, 0xf5, 0xbd, 0xad, 0x57, 0x85, 0x7e, 0x3d, 0xc3, - 0x35, 0x5e, 0x92, 0x1a, 0xe7, 0x37, 0xd2, 0x02, 0xa8, 0x77, 0x0d, 0x7c, 0x1d, 0x4c, 0xb8, 0xfb, - 0x37, 0x89, 0x11, 0x50, 0x3d, 0xbb, 0x9c, 0x5d, 0x9d, 0xba, 0x7a, 0xa5, 0x1c, 0x9f, 0xa0, 0x32, - 0x81, 0x1f, 0x9b, 0xdc, 0x6c, 0x19, 0xe1, 0xa3, 0xcd, 0xe8, 0xe4, 0x2a, 0xb3, 0x12, 0x6d, 0x62, - 0x57, 0x68, 0x41, 0x91, 0xba, 0xd2, 0xef, 0x32, 0x00, 0x26, 0x37, 0x4f, 0x3d, 0xd7, 0xa1, 0x64, - 0x28, 0xbb, 0xa7, 0x60, 0xce, 0xe0, 0x9a, 0x03, 0x52, 0x97, 0xb8, 0x7a, 0xe6, 0xb3, 0x58, 0xaf, - 0x4b, 0xfc, 0xb9, 0x6a, 0x4a, 0x1d, 0xea, 0x01, 0x80, 0xd7, 0xc1, 0xb8, 0x4f, 0x68, 0x68, 0x05, - 0x7a, 0x76, 0x59, 0x5b, 0x9d, 0xba, 0x7a, 0x79, 0x20, 0x14, 0x8f, 0x6f, 0x16, 0x7c, 0xe5, 0xd6, - 0x33, 0xe5, 0x5a, 0x80, 0x83, 0x90, 0x56, 0xce, 0x4b, 0xa4, 0x71, 0xc4, 0x75, 0x20, 0xa9, 0xab, - 0xf4, 0xa3, 0x0c, 0x98, 0x4b, 0x7a, 0xa9, 0x65, 0x92, 0x23, 0x78, 0x04, 0x26, 0x7c, 0x11, 0x2c, - 0xdc, 0x4f, 0x53, 0x57, 0xf7, 0xca, 0x0f, 0x94, 0x56, 0xe5, 0x9e, 0x20, 0xac, 0x4c, 0xb1, 0x33, - 0x93, 0x1f, 0x28, 0x42, 0x83, 0xef, 0x81, 0xbc, 0x2f, 0x0f, 0x8a, 0x47, 0xd3, 0xd4, 0xd5, 0xaf, - 0x0f, 0x11, 0x59, 0x28, 0xae, 0x4c, 0x77, 0xda, 0xc5, 0x7c, 0xf4, 0x85, 0x14, 0x60, 0xe9, 0xc3, - 0x0c, 0x28, 0x54, 0x43, 0x1a, 0xb8, 0x36, 0x22, 0xd4, 0x0d, 0x7d, 0x83, 0x54, 0x5d, 0x2b, 0xb4, - 0x9d, 0x0d, 0xd2, 0x30, 0x1d, 0x33, 0x60, 0xd1, 0xba, 0x0c, 0xc6, 0x1c, 0x6c, 0x13, 0x19, 0x3d, - 0xd3, 0xd2, 0xa7, 0x63, 0x3b, 0xd8, 0x26, 0x88, 0x73, 0x98, 0x04, 0x0b, 0x16, 0x99, 0x0b, 0x4a, - 0xe2, 0xfa, 0x89, 0x47, 0x10, 0xe7, 0xc0, 0x27, 0xc1, 0x78, 0xc3, 0xf5, 0x6d, 0x2c, 0xce, 0x71, - 0x32, 0x3e, 0x99, 0x97, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0e, 0x4c, 0xd5, 0x09, 0x35, 0x7c, 0xd3, - 0x63, 0xd0, 0xfa, 0x18, 0x17, 0xbe, 0x20, 0x85, 0xa7, 0x36, 0x62, 0x16, 0x4a, 0xca, 0xc1, 0xcb, - 0x20, 0xef, 0xf9, 0xa6, 0xeb, 0x9b, 0xc1, 0x89, 0x9e, 0x5b, 0xd6, 0x56, 0x73, 0x95, 0x39, 0xb9, - 0x26, 0xbf, 0x27, 0xe9, 0x48, 0x49, 0xc0, 0x65, 0x90, 0x7f, 0xa5, 0xb6, 0xbb, 0xb3, 0x87, 0x83, - 0x03, 0x7d, 0x9c, 0x23, 0x8c, 0x31, 0x69, 0x94, 0xbf, 0x29, 0xa9, 0xa5, 0x7f, 0x66, 0x80, 0x9e, - 0xf6, 0x4a, 0xe4, 0x52, 0xf8, 0x12, 0xc8, 0xd3, 0x80, 0x55, 0x9c, 0xe6, 0x89, 0xf4, 0xc9, 0x53, - 0x11, 0x58, 0x4d, 0xd2, 0x4f, 0xdb, 0xc5, 0x8b, 0xf1, 0x8a, 0x88, 0xca, 0xfd, 0xa1, 0xd6, 0xc2, - 0xdf, 0x68, 0xe0, 0xc2, 0x11, 0xd9, 0x3f, 0x70, 0xdd, 0xc3, 0xaa, 0x65, 0x12, 0x27, 0xa8, 0xba, - 0x4e, 0xc3, 0x6c, 0xca, 0x18, 0x40, 0x0f, 0x18, 0x03, 0xaf, 0xf5, 0x6a, 0xae, 0x3c, 0xda, 0x69, - 0x17, 0x2f, 0xf4, 0x61, 0xa0, 0x7e, 0x76, 0xc0, 0xd7, 0x81, 0x6e, 0xa4, 0x92, 0x44, 0x16, 0x30, - 0x51, 0xb6, 0x26, 0x2b, 0x4b, 0x9d, 0x76, 0x51, 0xaf, 0x0e, 0x90, 0x41, 0x03, 0x57, 0x97, 0x7e, - 0x90, 0x4d, 0xbb, 0x37, 0x11, 0x6e, 0xef, 0x82, 0x3c, 0x4b, 0xe3, 0x3a, 0x0e, 0xb0, 0x4c, 0xc4, - 0xa7, 0xcf, 0x96, 0xf4, 0xa2, 0x66, 0x6c, 0x93, 0x00, 0x57, 0xa0, 0x3c, 0x10, 0x10, 0xd3, 0x90, - 0xd2, 0x0a, 0xbf, 0x0d, 0xc6, 0xa8, 0x47, 0x0c, 0xe9, 0xe8, 0x37, 0x1e, 0x34, 0xd9, 0x06, 0x6c, - 0xa4, 0xe6, 0x11, 0x23, 0xce, 0x05, 0xf6, 0x85, 0x38, 0x2c, 0x7c, 0x5f, 0x03, 0xe3, 0x94, 0x17, - 0x28, 0x59, 0xd4, 0xde, 0x1a, 0x95, 0x05, 0xa9, 0x2a, 0x28, 0xbe, 0x91, 0x04, 0x2f, 0xfd, 0x3b, - 0x03, 0x56, 0x06, 0x2d, 0xad, 0xba, 0x4e, 0x5d, 0x1c, 0xc7, 0x96, 0xcc, 0x6d, 0x11, 0xe9, 0xcf, - 0x25, 0x73, 0xfb, 0xb4, 0x5d, 0x7c, 0xe2, 0xbe, 0x0a, 0x12, 0x45, 0xe0, 0x2b, 0x6a, 0xdf, 0xa2, - 0x50, 0xac, 0x74, 0x1b, 0x76, 0xda, 0x2e, 0xce, 0xaa, 0x65, 0xdd, 0xb6, 0xc2, 0x16, 0x80, 0x16, - 0xa6, 0xc1, 0x75, 0x1f, 0x3b, 0x54, 0xa8, 0x35, 0x6d, 0x22, 0xdd, 0xf7, 0xd4, 0xd9, 0xc2, 0x83, - 0xad, 0xa8, 0x2c, 0x4a, 0x48, 0x78, 0xad, 0x47, 0x1b, 0xea, 0x83, 0xc0, 0xea, 0x96, 0x4f, 0x30, - 0x55, 0xa5, 0x28, 0x71, 0xa3, 0x30, 0x2a, 0x92, 0x5c, 0xf8, 0x05, 0x30, 0x61, 0x13, 0x4a, 0x71, - 0x93, 0xf0, 0xfa, 0x33, 0x19, 0x5f, 0xd1, 0xdb, 0x82, 0x8c, 0x22, 0x3e, 0xeb, 0x4f, 0x96, 0x06, - 0x79, 0xed, 0x9a, 0x49, 0x03, 0xf8, 0x66, 0x4f, 0x02, 0x94, 0xcf, 0xb6, 0x43, 0xb6, 0x9a, 0x87, - 0xbf, 0x2a, 0x7e, 0x11, 0x25, 0x11, 0xfc, 0xdf, 0x02, 0x39, 0x33, 0x20, 0x76, 0x74, 0x77, 0xbf, - 0x36, 0xa2, 0xd8, 0xab, 0xcc, 0x48, 0x1b, 0x72, 0x5b, 0x0c, 0x0d, 0x09, 0xd0, 0xd2, 0xef, 0x33, - 0xe0, 0xb1, 0x41, 0x4b, 0xd8, 0x85, 0x42, 0x99, 0xc7, 0x3d, 0x2b, 0xf4, 0xb1, 0x25, 0x23, 0x4e, - 0x79, 0x7c, 0x8f, 0x53, 0x91, 0xe4, 0xb2, 0x92, 0x4f, 0x4d, 0xa7, 0x19, 0x5a, 0xd8, 0x97, 0xe1, - 0xa4, 0x76, 0x5d, 0x93, 0x74, 0xa4, 0x24, 0x60, 0x19, 0x00, 0x7a, 0xe0, 0xfa, 0x01, 0xc7, 0x90, - 0xd5, 0xeb, 0x3c, 0x2b, 0x10, 0x35, 0x45, 0x45, 0x09, 0x09, 0x76, 0xa3, 0x1d, 0x9a, 0x4e, 0x5d, - 0x9e, 0xba, 0xca, 0xe2, 0xaf, 0x99, 0x4e, 0x1d, 0x71, 0x0e, 0xc3, 0xb7, 0x4c, 0x1a, 0x30, 0x8a, - 0x3c, 0xf2, 0x2e, 0xaf, 0x73, 0x49, 0x25, 0xc1, 0xf0, 0x0d, 0x56, 0xf5, 0x5d, 0xdf, 0x24, 0x54, - 0x1f, 0x8f, 0xf1, 0xab, 0x8a, 0x8a, 0x12, 0x12, 0xa5, 0x5f, 0xe5, 0x07, 0x07, 0x09, 0x2b, 0x25, - 0xf0, 0x71, 0x90, 0x6b, 0xfa, 0x6e, 0xe8, 0x49, 0x2f, 0x29, 0x6f, 0xbf, 0xcc, 0x88, 0x48, 0xf0, - 0x58, 0x54, 0xb6, 0xba, 0xda, 0x54, 0x15, 0x95, 0x51, 0x73, 0x1a, 0xf1, 0xe1, 0xf7, 0x34, 0x90, - 0x73, 0xa4, 0x73, 0x58, 0xc8, 0xbd, 0x39, 0xa2, 0xb8, 0xe0, 0xee, 0x8d, 0xcd, 0x15, 0x9e, 0x17, - 0xc8, 0xf0, 0x59, 0x90, 0xa3, 0x86, 0xeb, 0x11, 0xe9, 0xf5, 0x42, 0x24, 0x54, 0x63, 0xc4, 0xd3, - 0x76, 0x71, 0x26, 0x52, 0xc7, 0x09, 0x48, 0x08, 0xc3, 0x1f, 0x6a, 0x00, 0xb4, 0xb0, 0x65, 0xd6, - 0x31, 0x6f, 0x19, 0x72, 0xdc, 0xfc, 0xe1, 0x86, 0xf5, 0xab, 0x4a, 0xbd, 0x38, 0xb4, 0xf8, 0x1b, - 0x25, 0xa0, 0xe1, 0x07, 0x1a, 0x98, 0xa6, 0xe1, 0xbe, 0x2f, 0x57, 0x51, 0xde, 0x5c, 0x4c, 0x5d, - 0xfd, 0xc6, 0x50, 0x6d, 0xa9, 0x25, 0x00, 0x2a, 0x73, 0x9d, 0x76, 0x71, 0x3a, 0x49, 0x41, 0x5d, - 0x06, 0xc0, 0x9f, 0x68, 0x20, 0xdf, 0x8a, 0xee, 0xec, 0x09, 0x9e, 0xf0, 0x6f, 0x8f, 0xe8, 0x60, - 0x65, 0x44, 0xc5, 0x59, 0xa0, 0xfa, 0x00, 0x65, 0x01, 0xfc, 0xab, 0x06, 0x74, 0x5c, 0x17, 0x05, - 0x1e, 0x5b, 0x7b, 0xbe, 0xe9, 0x04, 0xc4, 0x17, 0xfd, 0x26, 0xd5, 0xf3, 0xdc, 0xbc, 0xe1, 0xde, - 0x85, 0xe9, 0x5e, 0xb6, 0xb2, 0x2c, 0xad, 0xd3, 0xd7, 0x07, 0x98, 0x81, 0x06, 0x1a, 0xc8, 0x03, - 0x2d, 0x6e, 0x69, 0xf4, 0xc9, 0x11, 0x04, 0x5a, 0xdc, 0x4b, 0xc9, 0xea, 0x10, 0x77, 0x50, 0x09, - 0xe8, 0xd2, 0x07, 0xd9, 0x74, 0xd3, 0x9e, 0xbe, 0xf4, 0xe1, 0x6d, 0x61, 0xac, 0xd8, 0x0a, 0xd5, - 0x35, 0xee, 0xdc, 0x77, 0x47, 0x74, 0xf6, 0xea, 0xd6, 0x8e, 0x1b, 0x2f, 0x45, 0xa2, 0x28, 0x61, - 0x07, 0xfc, 0xa5, 0x06, 0x66, 0xb0, 0x61, 0x10, 0x2f, 0x20, 0x75, 0x51, 0x8b, 0x33, 0x9f, 0x43, - 0xb9, 0x59, 0x90, 0x56, 0xcd, 0xac, 0x27, 0xa1, 0x51, 0xb7, 0x25, 0xf0, 0x45, 0x70, 0x9e, 0x06, - 0xae, 0x4f, 0xea, 0xa9, 0x2e, 0x17, 0x76, 0xda, 0xc5, 0xf3, 0xb5, 0x2e, 0x0e, 0x4a, 0x49, 0x96, - 0x3e, 0x1d, 0x03, 0xc5, 0xfb, 0x64, 0xc6, 0x19, 0xde, 0x51, 0x4f, 0x82, 0x71, 0xbe, 0xdd, 0x3a, - 0xf7, 0x4a, 0x3e, 0xd1, 0xb9, 0x71, 0x2a, 0x92, 0x5c, 0x56, 0xd7, 0x19, 0x3e, 0xeb, 0x36, 0xb2, - 0x5c, 0x50, 0xd5, 0xf5, 0x9a, 0x20, 0xa3, 0x88, 0x0f, 0xdf, 0x03, 0xe3, 0x62, 0x4e, 0xc2, 0x8b, - 0xea, 0x08, 0x0b, 0x23, 0xe0, 0x76, 0x72, 0x28, 0x24, 0x21, 0x7b, 0x0b, 0x62, 0xee, 0x61, 0x17, - 0xc4, 0x7b, 0x56, 0xa0, 0xf1, 0xff, 0xf1, 0x0a, 0x54, 0xfa, 0x8f, 0x96, 0xce, 0xfb, 0xc4, 0x56, - 0x6b, 0x06, 0xb6, 0x08, 0xdc, 0x00, 0x73, 0xec, 0x91, 0x81, 0x88, 0x67, 0x99, 0x06, 0xa6, 0xfc, - 0x8d, 0x2b, 0x02, 0x4e, 0x8d, 0x5d, 0x6a, 0x29, 0x3e, 0xea, 0x59, 0x01, 0x5f, 0x01, 0x50, 0x34, - 0xde, 0x5d, 0x7a, 0x44, 0x0f, 0xa1, 0x5a, 0xe8, 0x5a, 0x8f, 0x04, 0xea, 0xb3, 0x0a, 0x56, 0xc1, - 0xbc, 0x85, 0xf7, 0x89, 0x55, 0x23, 0x16, 0x31, 0x02, 0xd7, 0xe7, 0xaa, 0xc4, 0x14, 0x60, 0xa1, - 0xd3, 0x2e, 0xce, 0x5f, 0x4b, 0x33, 0x51, 0xaf, 0x7c, 0x69, 0x25, 0x9d, 0x5e, 0xc9, 0x8d, 0x8b, - 0xe7, 0xcc, 0x47, 0x19, 0xb0, 0x38, 0x38, 0x32, 0xe0, 0xf7, 0xe3, 0x57, 0x97, 0x68, 0xaa, 0xdf, - 0x1e, 0x55, 0x14, 0xca, 0x67, 0x17, 0xe8, 0x7d, 0x72, 0xc1, 0xef, 0xb0, 0x0e, 0x07, 0x5b, 0xd1, - 0x9c, 0xe7, 0xad, 0x91, 0x99, 0xc0, 0x40, 0x2a, 0x93, 0xa2, 0x79, 0xc2, 0x16, 0xef, 0x95, 0xb0, - 0x45, 0x4a, 0x7f, 0xd0, 0xd2, 0x0f, 0xef, 0x38, 0x83, 0xe1, 0x4f, 0x35, 0x30, 0xeb, 0x7a, 0xc4, - 0x59, 0xdf, 0xdb, 0x7a, 0xf5, 0x4b, 0x22, 0x93, 0xa5, 0xab, 0x76, 0x1e, 0xd0, 0xce, 0x57, 0x6a, - 0xbb, 0x3b, 0x42, 0xe1, 0x9e, 0xef, 0x7a, 0xb4, 0x72, 0xa1, 0xd3, 0x2e, 0xce, 0xee, 0x76, 0x43, - 0xa1, 0x34, 0x76, 0xc9, 0x06, 0x0b, 0x9b, 0xc7, 0x01, 0xf1, 0x1d, 0x6c, 0x6d, 0xb8, 0x46, 0x68, - 0x13, 0x27, 0x10, 0x86, 0xa6, 0x86, 0x44, 0xda, 0x19, 0x87, 0x44, 0x8f, 0x81, 0x6c, 0xe8, 0x5b, - 0x32, 0x8a, 0xa7, 0xd4, 0x10, 0x14, 0x5d, 0x43, 0x8c, 0x5e, 0x5a, 0x01, 0x63, 0xcc, 0x4e, 0x78, - 0x09, 0x64, 0x7d, 0x7c, 0xc4, 0xb5, 0x4e, 0x57, 0x26, 0x98, 0x08, 0xc2, 0x47, 0x88, 0xd1, 0x4a, - 0xff, 0x58, 0x02, 0xb3, 0xa9, 0xbd, 0xc0, 0x45, 0x90, 0x51, 0x93, 0x55, 0x20, 0x95, 0x66, 0xb6, - 0x36, 0x50, 0xc6, 0xac, 0xc3, 0xe7, 0x55, 0xf1, 0x15, 0xa0, 0x45, 0x55, 0xcf, 0x39, 0x95, 0xb5, - 0xb4, 0xb1, 0x3a, 0x66, 0x48, 0x54, 0x38, 0x99, 0x0d, 0xa4, 0x21, 0xb3, 0x44, 0xd8, 0x40, 0x1a, - 0x88, 0xd1, 0x3e, 0xeb, 0x84, 0x2c, 0x1a, 0xd1, 0xe5, 0xce, 0x30, 0xa2, 0x1b, 0xbf, 0xe7, 0x88, - 0xee, 0x71, 0x90, 0x0b, 0xcc, 0xc0, 0x22, 0xfa, 0x44, 0xf7, 0xcb, 0xe3, 0x3a, 0x23, 0x22, 0xc1, - 0x83, 0x37, 0xc1, 0x44, 0x9d, 0x34, 0x70, 0x68, 0x05, 0x7a, 0x9e, 0x87, 0x50, 0x75, 0x08, 0x21, - 0x24, 0xe6, 0xa7, 0x1b, 0x42, 0x2f, 0x8a, 0x00, 0xe0, 0x13, 0x60, 0xc2, 0xc6, 0xc7, 0xa6, 0x1d, - 0xda, 0xbc, 0x27, 0xd3, 0x84, 0xd8, 0xb6, 0x20, 0xa1, 0x88, 0xc7, 0x2a, 0x23, 0x39, 0x36, 0xac, - 0x90, 0x9a, 0x2d, 0x22, 0x99, 0x3a, 0xe0, 0xb7, 0xa7, 0xaa, 0x8c, 0x9b, 0x29, 0x3e, 0xea, 0x59, - 0xc1, 0xc1, 0x4c, 0x87, 0x2f, 0x9e, 0x4a, 0x80, 0x09, 0x12, 0x8a, 0x78, 0xdd, 0x60, 0x52, 0x7e, - 0x7a, 0x10, 0x98, 0x5c, 0xdc, 0xb3, 0x02, 0x7e, 0x11, 0x4c, 0xda, 0xf8, 0xf8, 0x1a, 0x71, 0x9a, - 0xc1, 0x81, 0x3e, 0xb3, 0xac, 0xad, 0x66, 0x2b, 0x33, 0x9d, 0x76, 0x71, 0x72, 0x3b, 0x22, 0xa2, - 0x98, 0xcf, 0x85, 0x4d, 0x47, 0x0a, 0x9f, 0x4f, 0x08, 0x47, 0x44, 0x14, 0xf3, 0x59, 0x07, 0xe1, - 0xe1, 0x80, 0x25, 0x97, 0x3e, 0xdb, 0xfd, 0x32, 0xdc, 0x13, 0x64, 0x14, 0xf1, 0xe1, 0x2a, 0xc8, - 0xdb, 0xf8, 0x98, 0xbf, 0xe2, 0xf5, 0x39, 0xae, 0x96, 0xcf, 0x92, 0xb7, 0x25, 0x0d, 0x29, 0x2e, - 0x97, 0x34, 0x1d, 0x21, 0x39, 0x9f, 0x90, 0x94, 0x34, 0xa4, 0xb8, 0x2c, 0x88, 0x43, 0xc7, 0xbc, - 0x15, 0x12, 0x21, 0x0c, 0xb9, 0x67, 0x54, 0x10, 0xdf, 0x88, 0x59, 0x28, 0x29, 0xc7, 0x5e, 0xd1, - 0x76, 0x68, 0x05, 0xa6, 0x67, 0x91, 0xdd, 0x86, 0x7e, 0x81, 0xfb, 0x9f, 0xf7, 0xc9, 0xdb, 0x8a, - 0x8a, 0x12, 0x12, 0x90, 0x80, 0x31, 0xe2, 0x84, 0xb6, 0xfe, 0x08, 0xbf, 0xd8, 0x87, 0x12, 0x82, - 0x2a, 0x73, 0x36, 0x9d, 0xd0, 0x46, 0x5c, 0x3d, 0x7c, 0x1e, 0xcc, 0xd8, 0xf8, 0x98, 0x95, 0x03, - 0xe2, 0x07, 0xec, 0x7d, 0xbf, 0xc0, 0x37, 0x3f, 0xcf, 0x3a, 0xce, 0xed, 0x24, 0x03, 0x75, 0xcb, - 0xf1, 0x85, 0xa6, 0x93, 0x58, 0x78, 0x31, 0xb1, 0x30, 0xc9, 0x40, 0xdd, 0x72, 0xcc, 0xd3, 0x3e, - 0xb9, 0x15, 0x9a, 0x3e, 0xa9, 0xeb, 0x8f, 0xf2, 0x26, 0x55, 0xce, 0xf7, 0x05, 0x0d, 0x29, 0x2e, - 0x6c, 0x45, 0xe3, 0x1e, 0x9d, 0xa7, 0xe1, 0x8d, 0xe1, 0x56, 0xf2, 0x5d, 0x7f, 0xdd, 0xf7, 0xf1, - 0x89, 0xb8, 0x69, 0x92, 0x83, 0x1e, 0x48, 0x41, 0x0e, 0x5b, 0xd6, 0x6e, 0x43, 0xbf, 0xc4, 0x7d, - 0x3f, 0xec, 0x1b, 0x44, 0x55, 0x9d, 0x75, 0x06, 0x82, 0x04, 0x16, 0x03, 0x75, 0x1d, 0x16, 0x1a, - 0x8b, 0xa3, 0x05, 0xdd, 0x65, 0x20, 0x48, 0x60, 0xf1, 0x9d, 0x3a, 0x27, 0xbb, 0x0d, 0xfd, 0xff, - 0x46, 0xbc, 0x53, 0x06, 0x82, 0x04, 0x16, 0x34, 0x41, 0xd6, 0x71, 0x03, 0x7d, 0x69, 0x24, 0xd7, - 0x33, 0xbf, 0x70, 0x76, 0xdc, 0x00, 0x31, 0x0c, 0xf8, 0x0b, 0x0d, 0x00, 0x2f, 0x0e, 0xd1, 0xc7, - 0x86, 0x32, 0x45, 0x48, 0x41, 0x96, 0xe3, 0xd8, 0xde, 0x74, 0x02, 0xff, 0x24, 0x7e, 0x47, 0x26, - 0x72, 0x20, 0x61, 0x05, 0xfc, 0xad, 0x06, 0x1e, 0x49, 0xb6, 0xc9, 0xca, 0xbc, 0x02, 0xf7, 0xc8, - 0xf5, 0x61, 0x87, 0x79, 0xc5, 0x75, 0xad, 0x8a, 0xde, 0x69, 0x17, 0x1f, 0x59, 0xef, 0x83, 0x8a, - 0xfa, 0xda, 0x02, 0xff, 0xa8, 0x81, 0x79, 0x59, 0x45, 0x13, 0x16, 0x16, 0xb9, 0x03, 0xc9, 0xb0, - 0x1d, 0x98, 0xc6, 0x11, 0x7e, 0x54, 0xbf, 0x4b, 0xf7, 0xf0, 0x51, 0xaf, 0x69, 0xf0, 0x2f, 0x1a, - 0x98, 0xae, 0x13, 0x8f, 0x38, 0x75, 0xe2, 0x18, 0xcc, 0xd6, 0xe5, 0xa1, 0x8c, 0x0d, 0xd2, 0xb6, - 0x6e, 0x24, 0x20, 0x84, 0x99, 0x65, 0x69, 0xe6, 0x74, 0x92, 0x75, 0xda, 0x2e, 0x5e, 0x8c, 0x97, - 0x26, 0x39, 0xa8, 0xcb, 0x4a, 0xf8, 0xa1, 0x06, 0x66, 0xe3, 0x03, 0x10, 0x57, 0xca, 0xca, 0x08, - 0xe3, 0x80, 0xb7, 0xaf, 0xeb, 0xdd, 0x80, 0x28, 0x6d, 0x01, 0xfc, 0x93, 0xc6, 0x3a, 0xb5, 0xe8, - 0xdd, 0x47, 0xf5, 0x12, 0xf7, 0xe5, 0x3b, 0x43, 0xf7, 0xa5, 0x42, 0x10, 0xae, 0xbc, 0x1c, 0xb7, - 0x82, 0x8a, 0x73, 0xda, 0x2e, 0x2e, 0x24, 0x3d, 0xa9, 0x18, 0x28, 0x69, 0x21, 0xfc, 0xb1, 0x06, - 0xa6, 0x49, 0xdc, 0x71, 0x53, 0xfd, 0xf1, 0xa1, 0x38, 0xb1, 0x6f, 0x13, 0x2f, 0x5e, 0xea, 0x09, - 0x16, 0x45, 0x5d, 0xd8, 0xac, 0x83, 0x24, 0xc7, 0xd8, 0xf6, 0x2c, 0xa2, 0xff, 0xff, 0x90, 0x3b, - 0xc8, 0x4d, 0xa1, 0x17, 0x45, 0x00, 0xf0, 0x32, 0xc8, 0x3b, 0xa1, 0x65, 0xe1, 0x7d, 0x8b, 0xe8, - 0x4f, 0xf0, 0x5e, 0x44, 0x4d, 0x31, 0x77, 0x24, 0x1d, 0x29, 0x89, 0x45, 0xf6, 0x4e, 0x4a, 0xe5, - 0x19, 0x9c, 0x03, 0xd9, 0x43, 0x22, 0x7f, 0x0e, 0x46, 0xec, 0x4f, 0x58, 0x07, 0xb9, 0x16, 0xb6, - 0xc2, 0xe8, 0xa9, 0x37, 0xe4, 0x1a, 0x8d, 0x84, 0xf2, 0x17, 0x33, 0x2f, 0x68, 0x8b, 0xb7, 0x35, - 0x70, 0xb1, 0x7f, 0xfa, 0x3f, 0x54, 0xb3, 0x7e, 0xad, 0x81, 0xf9, 0x9e, 0x4c, 0xef, 0x63, 0xd1, - 0xad, 0x6e, 0x8b, 0xde, 0x18, 0x76, 0xca, 0xd6, 0x02, 0xdf, 0x74, 0x9a, 0xbc, 0x4f, 0x49, 0x9a, - 0xf7, 0x33, 0x0d, 0xcc, 0xa5, 0x93, 0xe7, 0x61, 0xfa, 0xab, 0x74, 0x3b, 0x03, 0x2e, 0xf6, 0x6f, - 0xaf, 0xa0, 0xaf, 0xde, 0x91, 0xa3, 0x79, 0x8f, 0xf7, 0x9b, 0xdd, 0xbd, 0xaf, 0x81, 0xa9, 0x9b, - 0x4a, 0x2e, 0xfa, 0xb9, 0x70, 0xe8, 0x93, 0x80, 0xa8, 0x5a, 0xc5, 0x0c, 0x8a, 0x92, 0xb8, 0xa5, - 0x3f, 0x6b, 0x60, 0xa1, 0x6f, 0x19, 0x66, 0x0f, 0x56, 0x6c, 0x59, 0xee, 0x91, 0x18, 0xe8, 0x24, - 0xa6, 0xa5, 0xeb, 0x9c, 0x8a, 0x24, 0x37, 0xe1, 0xbd, 0xcc, 0xe7, 0xe5, 0xbd, 0xd2, 0xdf, 0x34, - 0xb0, 0x74, 0xaf, 0x48, 0x7c, 0x28, 0x47, 0xba, 0x0a, 0xf2, 0xb2, 0x85, 0x3a, 0xe1, 0xc7, 0x29, - 0x5f, 0x0d, 0xb2, 0x68, 0xf0, 0xff, 0x90, 0x11, 0x7f, 0x95, 0x3e, 0xd2, 0xc0, 0x5c, 0x8d, 0xf8, - 0x2d, 0xd3, 0x20, 0x88, 0x34, 0x88, 0x4f, 0x1c, 0x83, 0xc0, 0x35, 0x30, 0xc9, 0x7f, 0xa7, 0xf3, - 0xb0, 0x11, 0x0d, 0xb1, 0xe7, 0xa5, 0xcb, 0x27, 0x77, 0x22, 0x06, 0x8a, 0x65, 0xd4, 0xc0, 0x3b, - 0x33, 0x70, 0xe0, 0xbd, 0x04, 0xc6, 0xbc, 0x78, 0x1c, 0x98, 0x67, 0x5c, 0x3e, 0x01, 0xe4, 0x54, - 0xce, 0x75, 0xfd, 0x80, 0xcf, 0x38, 0x72, 0x92, 0xeb, 0xfa, 0x01, 0xe2, 0xd4, 0xd2, 0xdf, 0x35, - 0xd0, 0xef, 0x7f, 0x59, 0x60, 0x0b, 0x4c, 0x50, 0x61, 0xba, 0x74, 0xed, 0xee, 0x03, 0xba, 0x36, - 0xed, 0x08, 0x71, 0x89, 0x44, 0xd4, 0x08, 0x8c, 0x79, 0xd7, 0xc0, 0x95, 0xd0, 0xa9, 0xcb, 0xf1, - 0xde, 0xb4, 0xf0, 0x6e, 0x75, 0x5d, 0xd0, 0x90, 0xe2, 0xc2, 0x4b, 0x62, 0x10, 0x95, 0x98, 0xee, - 0x44, 0x43, 0xa8, 0xca, 0x95, 0x3b, 0x77, 0x0b, 0xe7, 0x3e, 0xbe, 0x5b, 0x38, 0xf7, 0xc9, 0xdd, - 0xc2, 0xb9, 0xef, 0x76, 0x0a, 0xda, 0x9d, 0x4e, 0x41, 0xfb, 0xb8, 0x53, 0xd0, 0x3e, 0xe9, 0x14, - 0xb4, 0x7f, 0x75, 0x0a, 0xda, 0xcf, 0x3f, 0x2d, 0x9c, 0xfb, 0xe6, 0x84, 0x34, 0xed, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xd1, 0xda, 0xdd, 0xd0, 0x61, 0x2a, 0x00, 0x00, + // 2884 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, + 0x15, 0xf7, 0xac, 0xb4, 0xd2, 0xaa, 0x25, 0x59, 0x52, 0xdb, 0x56, 0xc6, 0x8a, 0xb3, 0x2b, 0x6f, + 0x48, 0x10, 0xc1, 0x5e, 0x25, 0x26, 0x21, 0x21, 0x55, 0x1c, 0xb4, 0x92, 0x92, 0x52, 0x62, 0x7d, + 0xd0, 0x6b, 0x27, 0x86, 0x7c, 0xb6, 0x76, 0x5a, 0xab, 0xb1, 0xe6, 0xcb, 0xdd, 0x33, 0x2b, 0xa9, + 0x02, 0x14, 0x1f, 0x95, 0x82, 0xa2, 0x80, 0x50, 0x24, 0x17, 0x0a, 0x38, 0x04, 0x8a, 0x0b, 0x07, + 0x38, 0xc0, 0x0d, 0xfe, 0x80, 0x1c, 0x53, 0x9c, 0x72, 0xa0, 0xb6, 0xf0, 0xe6, 0x2f, 0xa0, 0x8a, + 0x2a, 0xaa, 0x74, 0xa2, 0xfa, 0x63, 0x7a, 0x66, 0x67, 0x77, 0x6d, 0x57, 0xbc, 0x1b, 0x73, 0xd3, + 0xbc, 0xaf, 0xdf, 0xeb, 0xd7, 0xaf, 0x5f, 0xbf, 0x7e, 0x2b, 0xb0, 0x77, 0xf0, 0x1c, 0xab, 0xd8, + 0xfe, 0xf2, 0x41, 0xb4, 0x4b, 0xa8, 0x47, 0x42, 0xc2, 0x96, 0x9b, 0xc4, 0xb3, 0x7c, 0xba, 0xac, + 0x18, 0x38, 0xb0, 0xc9, 0x51, 0x48, 0x3c, 0x66, 0xfb, 0x1e, 0xbb, 0x8c, 0x03, 0x9b, 0x11, 0xda, + 0x24, 0x74, 0x39, 0x38, 0x68, 0x70, 0x1e, 0xeb, 0x14, 0x58, 0x6e, 0x3e, 0xb5, 0x4b, 0x42, 0xfc, + 0xd4, 0x72, 0x83, 0x78, 0x84, 0xe2, 0x90, 0x58, 0x95, 0x80, 0xfa, 0xa1, 0x0f, 0xbf, 0x2e, 0xcd, + 0x55, 0x3a, 0xa4, 0xdf, 0xd2, 0xe6, 0x2a, 0xc1, 0x41, 0x83, 0xf3, 0x58, 0xa7, 0x40, 0x45, 0x99, + 0x5b, 0xb8, 0xdc, 0xb0, 0xc3, 0xfd, 0x68, 0xb7, 0x52, 0xf7, 0xdd, 0xe5, 0x86, 0xdf, 0xf0, 0x97, + 0x85, 0xd5, 0xdd, 0x68, 0x4f, 0x7c, 0x89, 0x0f, 0xf1, 0x97, 0x44, 0x5b, 0x78, 0x3a, 0x71, 0xde, + 0xc5, 0xf5, 0x7d, 0xdb, 0x23, 0xf4, 0x38, 0xf1, 0xd8, 0x25, 0x21, 0x5e, 0x6e, 0x76, 0xf9, 0xb8, + 0xb0, 0xdc, 0x4f, 0x8b, 0x46, 0x5e, 0x68, 0xbb, 0xa4, 0x4b, 0xe1, 0xab, 0x77, 0x53, 0x60, 0xf5, + 0x7d, 0xe2, 0xe2, 0xac, 0x5e, 0xf9, 0xc4, 0x00, 0x73, 0xab, 0xbe, 0xd7, 0x24, 0x94, 0xaf, 0x12, + 0x91, 0x5b, 0x11, 0x61, 0x21, 0xac, 0x82, 0x91, 0xc8, 0xb6, 0x4c, 0x63, 0xd1, 0x58, 0x9a, 0xa8, + 0x3e, 0xf9, 0x51, 0xab, 0x74, 0xaa, 0xdd, 0x2a, 0x8d, 0x5c, 0xdf, 0x58, 0x3b, 0x69, 0x95, 0x2e, + 0xf6, 0x43, 0x0a, 0x8f, 0x03, 0xc2, 0x2a, 0xd7, 0x37, 0xd6, 0x10, 0x57, 0x86, 0x2f, 0x82, 0x39, + 0x8b, 0x30, 0x9b, 0x12, 0x6b, 0x65, 0x67, 0xe3, 0x15, 0x69, 0xdf, 0xcc, 0x09, 0x8b, 0xe7, 0x95, + 0xc5, 0xb9, 0xb5, 0xac, 0x00, 0xea, 0xd6, 0x81, 0x37, 0xc0, 0xb8, 0xbf, 0x7b, 0x93, 0xd4, 0x43, + 0x66, 0x8e, 0x2c, 0x8e, 0x2c, 0x4d, 0x5e, 0xb9, 0x5c, 0x49, 0x76, 0x50, 0xbb, 0x20, 0xb6, 0x4d, + 0x2d, 0xb6, 0x82, 0xf0, 0xe1, 0x7a, 0xbc, 0x73, 0xd5, 0x19, 0x85, 0x36, 0xbe, 0x2d, 0xad, 0xa0, + 0xd8, 0x5c, 0xf9, 0xf7, 0x39, 0x00, 0xd3, 0x8b, 0x67, 0x81, 0xef, 0x31, 0x32, 0x90, 0xd5, 0x33, + 0x30, 0x5b, 0x17, 0x96, 0x43, 0x62, 0x29, 0x5c, 0x33, 0xf7, 0x59, 0xbc, 0x37, 0x15, 0xfe, 0xec, + 0x6a, 0xc6, 0x1c, 0xea, 0x02, 0x80, 0xd7, 0xc0, 0x18, 0x25, 0x2c, 0x72, 0x42, 0x73, 0x64, 0xd1, + 0x58, 0x9a, 0xbc, 0x72, 0xa9, 0x2f, 0x94, 0xc8, 0x6f, 0x9e, 0x7c, 0x95, 0xe6, 0x53, 0x95, 0x5a, + 0x88, 0xc3, 0x88, 0x55, 0x4f, 0x2b, 0xa4, 0x31, 0x24, 0x6c, 0x20, 0x65, 0xab, 0xfc, 0xe3, 0x1c, + 0x98, 0x4d, 0x47, 0xa9, 0x69, 0x93, 0x43, 0x78, 0x08, 0xc6, 0xa9, 0x4c, 0x16, 0x11, 0xa7, 0xc9, + 0x2b, 0x3b, 0x95, 0xfb, 0x3a, 0x56, 0x95, 0xae, 0x24, 0xac, 0x4e, 0xf2, 0x3d, 0x53, 0x1f, 0x28, + 0x46, 0x83, 0xef, 0x80, 0x02, 0x55, 0x1b, 0x25, 0xb2, 0x69, 0xf2, 0xca, 0x37, 0x06, 0x88, 0x2c, + 0x0d, 0x57, 0xa7, 0xda, 0xad, 0x52, 0x21, 0xfe, 0x42, 0x1a, 0xb0, 0xfc, 0x7e, 0x0e, 0x14, 0x57, + 0x23, 0x16, 0xfa, 0x2e, 0x22, 0xcc, 0x8f, 0x68, 0x9d, 0xac, 0xfa, 0x4e, 0xe4, 0x7a, 0x6b, 0x64, + 0xcf, 0xf6, 0xec, 0x90, 0x67, 0xeb, 0x22, 0x18, 0xf5, 0xb0, 0x4b, 0x54, 0xf6, 0x4c, 0xa9, 0x98, + 0x8e, 0x6e, 0x61, 0x97, 0x20, 0xc1, 0xe1, 0x12, 0x3c, 0x59, 0xd4, 0x59, 0xd0, 0x12, 0xd7, 0x8e, + 0x03, 0x82, 0x04, 0x07, 0x3e, 0x0e, 0xc6, 0xf6, 0x7c, 0xea, 0x62, 0xb9, 0x8f, 0x13, 0xc9, 0xce, + 0xbc, 0x20, 0xa8, 0x48, 0x71, 0xe1, 0x33, 0x60, 0xd2, 0x22, 0xac, 0x4e, 0xed, 0x80, 0x43, 0x9b, + 0xa3, 0x42, 0xf8, 0x8c, 0x12, 0x9e, 0x5c, 0x4b, 0x58, 0x28, 0x2d, 0x07, 0x2f, 0x81, 0x42, 0x40, + 0x6d, 0x9f, 0xda, 0xe1, 0xb1, 0x99, 0x5f, 0x34, 0x96, 0xf2, 0xd5, 0x59, 0xa5, 0x53, 0xd8, 0x51, + 0x74, 0xa4, 0x25, 0xe0, 0x22, 0x28, 0xbc, 0x54, 0xdb, 0xde, 0xda, 0xc1, 0xe1, 0xbe, 0x39, 0x26, + 0x10, 0x46, 0xb9, 0x34, 0x2a, 0xdc, 0x54, 0xd4, 0xf2, 0x3f, 0x73, 0xc0, 0xcc, 0x46, 0x25, 0x0e, + 0x29, 0x7c, 0x01, 0x14, 0x58, 0xc8, 0x2b, 0x4e, 0xe3, 0x58, 0xc5, 0xe4, 0x89, 0x18, 0xac, 0xa6, + 0xe8, 0x27, 0xad, 0xd2, 0x7c, 0xa2, 0x11, 0x53, 0x45, 0x3c, 0xb4, 0x2e, 0xfc, 0xad, 0x01, 0xce, + 0x1c, 0x92, 0xdd, 0x7d, 0xdf, 0x3f, 0x58, 0x75, 0x6c, 0xe2, 0x85, 0xab, 0xbe, 0xb7, 0x67, 0x37, + 0x54, 0x0e, 0xa0, 0xfb, 0xcc, 0x81, 0x57, 0xbb, 0x2d, 0x57, 0x1f, 0x6a, 0xb7, 0x4a, 0x67, 0x7a, + 0x30, 0x50, 0x2f, 0x3f, 0xe0, 0x0d, 0x60, 0xd6, 0x33, 0x87, 0x44, 0x15, 0x30, 0x59, 0xb6, 0x26, + 0xaa, 0x17, 0xda, 0xad, 0x92, 0xb9, 0xda, 0x47, 0x06, 0xf5, 0xd5, 0x2e, 0xff, 0x70, 0x24, 0x1b, + 0xde, 0x54, 0xba, 0xbd, 0x0d, 0x0a, 0xfc, 0x18, 0x5b, 0x38, 0xc4, 0xea, 0x20, 0x3e, 0x79, 0x6f, + 0x87, 0x5e, 0xd6, 0x8c, 0x4d, 0x12, 0xe2, 0x2a, 0x54, 0x1b, 0x02, 0x12, 0x1a, 0xd2, 0x56, 0xe1, + 0x77, 0xc0, 0x28, 0x0b, 0x48, 0x5d, 0x05, 0xfa, 0xb5, 0xfb, 0x3d, 0x6c, 0x7d, 0x16, 0x52, 0x0b, + 0x48, 0x3d, 0x39, 0x0b, 0xfc, 0x0b, 0x09, 0x58, 0xf8, 0xae, 0x01, 0xc6, 0x98, 0x28, 0x50, 0xaa, + 0xa8, 0xbd, 0x31, 0x2c, 0x0f, 0x32, 0x55, 0x50, 0x7e, 0x23, 0x05, 0x5e, 0xfe, 0x4f, 0x0e, 0x5c, + 0xec, 0xa7, 0xba, 0xea, 0x7b, 0x96, 0xdc, 0x8e, 0x0d, 0x75, 0xb6, 0x65, 0xa6, 0x3f, 0x93, 0x3e, + 0xdb, 0x27, 0xad, 0xd2, 0x63, 0x77, 0x35, 0x90, 0x2a, 0x02, 0x5f, 0xd3, 0xeb, 0x96, 0x85, 0xe2, + 0x62, 0xa7, 0x63, 0x27, 0xad, 0xd2, 0x8c, 0x56, 0xeb, 0xf4, 0x15, 0x36, 0x01, 0x74, 0x30, 0x0b, + 0xaf, 0x51, 0xec, 0x31, 0x69, 0xd6, 0x76, 0x89, 0x0a, 0xdf, 0x13, 0xf7, 0x96, 0x1e, 0x5c, 0xa3, + 0xba, 0xa0, 0x20, 0xe1, 0xd5, 0x2e, 0x6b, 0xa8, 0x07, 0x02, 0xaf, 0x5b, 0x94, 0x60, 0xa6, 0x4b, + 0x51, 0xea, 0x46, 0xe1, 0x54, 0xa4, 0xb8, 0xf0, 0x4b, 0x60, 0xdc, 0x25, 0x8c, 0xe1, 0x06, 0x11, + 0xf5, 0x67, 0x22, 0xb9, 0xa2, 0x37, 0x25, 0x19, 0xc5, 0x7c, 0xde, 0x9f, 0x5c, 0xe8, 0x17, 0xb5, + 0xab, 0x36, 0x0b, 0xe1, 0xeb, 0x5d, 0x07, 0xa0, 0x72, 0x6f, 0x2b, 0xe4, 0xda, 0x22, 0xfd, 0x75, + 0xf1, 0x8b, 0x29, 0xa9, 0xe4, 0xff, 0x36, 0xc8, 0xdb, 0x21, 0x71, 0xe3, 0xbb, 0xfb, 0xd5, 0x21, + 0xe5, 0x5e, 0x75, 0x5a, 0xf9, 0x90, 0xdf, 0xe0, 0x68, 0x48, 0x82, 0x96, 0xff, 0x90, 0x03, 0x8f, + 0xf4, 0x53, 0xe1, 0x17, 0x0a, 0xe3, 0x11, 0x0f, 0x9c, 0x88, 0x62, 0x47, 0x65, 0x9c, 0x8e, 0xf8, + 0x8e, 0xa0, 0x22, 0xc5, 0xe5, 0x25, 0x9f, 0xd9, 0x5e, 0x23, 0x72, 0x30, 0x55, 0xe9, 0xa4, 0x57, + 0x5d, 0x53, 0x74, 0xa4, 0x25, 0x60, 0x05, 0x00, 0xb6, 0xef, 0xd3, 0x50, 0x60, 0xa8, 0xea, 0x75, + 0x9a, 0x17, 0x88, 0x9a, 0xa6, 0xa2, 0x94, 0x04, 0xbf, 0xd1, 0x0e, 0x6c, 0xcf, 0x52, 0xbb, 0xae, + 0x4f, 0xf1, 0xcb, 0xb6, 0x67, 0x21, 0xc1, 0xe1, 0xf8, 0x8e, 0xcd, 0x42, 0x4e, 0x51, 0x5b, 0xde, + 0x11, 0x75, 0x21, 0xa9, 0x25, 0x38, 0x7e, 0x9d, 0x57, 0x7d, 0x9f, 0xda, 0x84, 0x99, 0x63, 0x09, + 0xfe, 0xaa, 0xa6, 0xa2, 0x94, 0x44, 0xf9, 0xd7, 0x85, 0xfe, 0x49, 0xc2, 0x4b, 0x09, 0x7c, 0x14, + 0xe4, 0x1b, 0xd4, 0x8f, 0x02, 0x15, 0x25, 0x1d, 0xed, 0x17, 0x39, 0x11, 0x49, 0x1e, 0xcf, 0xca, + 0x66, 0x47, 0x9b, 0xaa, 0xb3, 0x32, 0x6e, 0x4e, 0x63, 0x3e, 0xfc, 0xbe, 0x01, 0xf2, 0x9e, 0x0a, + 0x0e, 0x4f, 0xb9, 0xd7, 0x87, 0x94, 0x17, 0x22, 0xbc, 0x89, 0xbb, 0x32, 0xf2, 0x12, 0x19, 0x3e, + 0x0d, 0xf2, 0xac, 0xee, 0x07, 0x44, 0x45, 0xbd, 0x18, 0x0b, 0xd5, 0x38, 0xf1, 0xa4, 0x55, 0x9a, + 0x8e, 0xcd, 0x09, 0x02, 0x92, 0xc2, 0xf0, 0x47, 0x06, 0x00, 0x4d, 0xec, 0xd8, 0x16, 0x16, 0x2d, + 0x43, 0x5e, 0xb8, 0x3f, 0xd8, 0xb4, 0x7e, 0x45, 0x9b, 0x97, 0x9b, 0x96, 0x7c, 0xa3, 0x14, 0x34, + 0x7c, 0xcf, 0x00, 0x53, 0x2c, 0xda, 0xa5, 0x4a, 0x8b, 0x89, 0xe6, 0x62, 0xf2, 0xca, 0x37, 0x07, + 0xea, 0x4b, 0x2d, 0x05, 0x50, 0x9d, 0x6d, 0xb7, 0x4a, 0x53, 0x69, 0x0a, 0xea, 0x70, 0x00, 0xfe, + 0xd4, 0x00, 0x85, 0x66, 0x7c, 0x67, 0x8f, 0x8b, 0x03, 0xff, 0xe6, 0x90, 0x36, 0x56, 0x65, 0x54, + 0x72, 0x0a, 0x74, 0x1f, 0xa0, 0x3d, 0x80, 0x7f, 0x33, 0x80, 0x89, 0x2d, 0x59, 0xe0, 0xb1, 0xb3, + 0x43, 0x6d, 0x2f, 0x24, 0x54, 0xf6, 0x9b, 0xcc, 0x2c, 0x08, 0xf7, 0x06, 0x7b, 0x17, 0x66, 0x7b, + 0xd9, 0xea, 0xa2, 0xf2, 0xce, 0x5c, 0xe9, 0xe3, 0x06, 0xea, 0xeb, 0xa0, 0x48, 0xb4, 0xa4, 0xa5, + 0x31, 0x27, 0x86, 0x90, 0x68, 0x49, 0x2f, 0xa5, 0xaa, 0x43, 0xd2, 0x41, 0xa5, 0xa0, 0xcb, 0xef, + 0x8d, 0x64, 0x9b, 0xf6, 0xec, 0xa5, 0x0f, 0x3f, 0x90, 0xce, 0xca, 0xa5, 0x30, 0xd3, 0x10, 0xc1, + 0x7d, 0x7b, 0x48, 0x7b, 0xaf, 0x6f, 0xed, 0xa4, 0xf1, 0xd2, 0x24, 0x86, 0x52, 0x7e, 0xc0, 0x5f, + 0x19, 0x60, 0x1a, 0xd7, 0xeb, 0x24, 0x08, 0x89, 0x25, 0x6b, 0x71, 0xee, 0x73, 0x28, 0x37, 0xe7, + 0x94, 0x57, 0xd3, 0x2b, 0x69, 0x68, 0xd4, 0xe9, 0x09, 0x7c, 0x1e, 0x9c, 0x66, 0xa1, 0x4f, 0x89, + 0x95, 0xe9, 0x72, 0x61, 0xbb, 0x55, 0x3a, 0x5d, 0xeb, 0xe0, 0xa0, 0x8c, 0x64, 0xf9, 0xd3, 0x51, + 0x50, 0xba, 0xcb, 0xc9, 0xb8, 0x87, 0x77, 0xd4, 0xe3, 0x60, 0x4c, 0x2c, 0xd7, 0x12, 0x51, 0x29, + 0xa4, 0x3a, 0x37, 0x41, 0x45, 0x8a, 0xcb, 0xeb, 0x3a, 0xc7, 0xe7, 0xdd, 0xc6, 0x88, 0x10, 0xd4, + 0x75, 0xbd, 0x26, 0xc9, 0x28, 0xe6, 0xc3, 0x77, 0xc0, 0x98, 0x9c, 0x93, 0x88, 0xa2, 0x3a, 0xc4, + 0xc2, 0x08, 0x84, 0x9f, 0x02, 0x0a, 0x29, 0xc8, 0xee, 0x82, 0x98, 0x7f, 0xd0, 0x05, 0xf1, 0x8e, + 0x15, 0x68, 0xec, 0xff, 0xbc, 0x02, 0x95, 0xff, 0x6b, 0x64, 0xcf, 0x7d, 0x6a, 0xa9, 0xb5, 0x3a, + 0x76, 0x08, 0x5c, 0x03, 0xb3, 0xfc, 0x91, 0x81, 0x48, 0xe0, 0xd8, 0x75, 0xcc, 0xc4, 0x1b, 0x57, + 0x26, 0x9c, 0x1e, 0xbb, 0xd4, 0x32, 0x7c, 0xd4, 0xa5, 0x01, 0x5f, 0x02, 0x50, 0x36, 0xde, 0x1d, + 0x76, 0x64, 0x0f, 0xa1, 0x5b, 0xe8, 0x5a, 0x97, 0x04, 0xea, 0xa1, 0x05, 0x57, 0xc1, 0x9c, 0x83, + 0x77, 0x89, 0x53, 0x23, 0x0e, 0xa9, 0x87, 0x3e, 0x15, 0xa6, 0xe4, 0x14, 0xe0, 0x5c, 0xbb, 0x55, + 0x9a, 0xbb, 0x9a, 0x65, 0xa2, 0x6e, 0xf9, 0xf2, 0xc5, 0xec, 0xf1, 0x4a, 0x2f, 0x5c, 0x3e, 0x67, + 0x3e, 0xcc, 0x81, 0x85, 0xfe, 0x99, 0x01, 0x7f, 0x90, 0xbc, 0xba, 0x64, 0x53, 0xfd, 0xe6, 0xb0, + 0xb2, 0x50, 0x3d, 0xbb, 0x40, 0xf7, 0x93, 0x0b, 0x7e, 0x97, 0x77, 0x38, 0xd8, 0x89, 0xe7, 0x3c, + 0x6f, 0x0c, 0xcd, 0x05, 0x0e, 0x52, 0x9d, 0x90, 0xcd, 0x13, 0x76, 0x44, 0xaf, 0x84, 0x1d, 0x52, + 0xfe, 0xa3, 0x91, 0x7d, 0x78, 0x27, 0x27, 0x18, 0xfe, 0xcc, 0x00, 0x33, 0x7e, 0x40, 0xbc, 0x95, + 0x9d, 0x8d, 0x57, 0xbe, 0x22, 0x4f, 0xb2, 0x0a, 0xd5, 0xd6, 0x7d, 0xfa, 0xf9, 0x52, 0x6d, 0x7b, + 0x4b, 0x1a, 0xdc, 0xa1, 0x7e, 0xc0, 0xaa, 0x67, 0xda, 0xad, 0xd2, 0xcc, 0x76, 0x27, 0x14, 0xca, + 0x62, 0x97, 0x5d, 0x70, 0x6e, 0xfd, 0x28, 0x24, 0xd4, 0xc3, 0xce, 0x9a, 0x5f, 0x8f, 0x5c, 0xe2, + 0x85, 0xd2, 0xd1, 0xcc, 0x90, 0xc8, 0xb8, 0xc7, 0x21, 0xd1, 0x23, 0x60, 0x24, 0xa2, 0x8e, 0xca, + 0xe2, 0x49, 0x3d, 0x04, 0x45, 0x57, 0x11, 0xa7, 0x97, 0x2f, 0x82, 0x51, 0xee, 0x27, 0x3c, 0x0f, + 0x46, 0x28, 0x3e, 0x14, 0x56, 0xa7, 0xaa, 0xe3, 0x5c, 0x04, 0xe1, 0x43, 0xc4, 0x69, 0xe5, 0x7f, + 0x17, 0xc1, 0x4c, 0x66, 0x2d, 0x70, 0x01, 0xe4, 0xf4, 0x64, 0x15, 0x28, 0xa3, 0xb9, 0x8d, 0x35, + 0x94, 0xb3, 0x2d, 0xf8, 0xac, 0x2e, 0xbe, 0x12, 0xb4, 0xa4, 0xeb, 0xb9, 0xa0, 0xf2, 0x96, 0x36, + 0x31, 0xc7, 0x1d, 0x89, 0x0b, 0x27, 0xf7, 0x81, 0xec, 0xa9, 0x53, 0x22, 0x7d, 0x20, 0x7b, 0x88, + 0xd3, 0x3e, 0xeb, 0x84, 0x2c, 0x1e, 0xd1, 0xe5, 0xef, 0x61, 0x44, 0x37, 0x76, 0xc7, 0x11, 0xdd, + 0xa3, 0x20, 0x1f, 0xda, 0xa1, 0x43, 0xcc, 0xf1, 0xce, 0x97, 0xc7, 0x35, 0x4e, 0x44, 0x92, 0x07, + 0x6f, 0x82, 0x71, 0x8b, 0xec, 0xe1, 0xc8, 0x09, 0xcd, 0x82, 0x48, 0xa1, 0xd5, 0x01, 0xa4, 0x90, + 0x9c, 0x9f, 0xae, 0x49, 0xbb, 0x28, 0x06, 0x80, 0x8f, 0x81, 0x71, 0x17, 0x1f, 0xd9, 0x6e, 0xe4, + 0x8a, 0x9e, 0xcc, 0x90, 0x62, 0x9b, 0x92, 0x84, 0x62, 0x1e, 0xaf, 0x8c, 0xe4, 0xa8, 0xee, 0x44, + 0xcc, 0x6e, 0x12, 0xc5, 0x34, 0x81, 0xb8, 0x3d, 0x75, 0x65, 0x5c, 0xcf, 0xf0, 0x51, 0x97, 0x86, + 0x00, 0xb3, 0x3d, 0xa1, 0x3c, 0x99, 0x02, 0x93, 0x24, 0x14, 0xf3, 0x3a, 0xc1, 0x94, 0xfc, 0x54, + 0x3f, 0x30, 0xa5, 0xdc, 0xa5, 0x01, 0xbf, 0x0c, 0x26, 0x5c, 0x7c, 0x74, 0x95, 0x78, 0x8d, 0x70, + 0xdf, 0x9c, 0x5e, 0x34, 0x96, 0x46, 0xaa, 0xd3, 0xed, 0x56, 0x69, 0x62, 0x33, 0x26, 0xa2, 0x84, + 0x2f, 0x84, 0x6d, 0x4f, 0x09, 0x9f, 0x4e, 0x09, 0xc7, 0x44, 0x94, 0xf0, 0x79, 0x07, 0x11, 0xe0, + 0x90, 0x1f, 0x2e, 0x73, 0xa6, 0xf3, 0x65, 0xb8, 0x23, 0xc9, 0x28, 0xe6, 0xc3, 0x25, 0x50, 0x70, + 0xf1, 0x91, 0x78, 0xc5, 0x9b, 0xb3, 0xc2, 0xac, 0x98, 0x25, 0x6f, 0x2a, 0x1a, 0xd2, 0x5c, 0x21, + 0x69, 0x7b, 0x52, 0x72, 0x2e, 0x25, 0xa9, 0x68, 0x48, 0x73, 0x79, 0x12, 0x47, 0x9e, 0x7d, 0x2b, + 0x22, 0x52, 0x18, 0x8a, 0xc8, 0xe8, 0x24, 0xbe, 0x9e, 0xb0, 0x50, 0x5a, 0x8e, 0xbf, 0xa2, 0xdd, + 0xc8, 0x09, 0xed, 0xc0, 0x21, 0xdb, 0x7b, 0xe6, 0x19, 0x11, 0x7f, 0xd1, 0x27, 0x6f, 0x6a, 0x2a, + 0x4a, 0x49, 0x40, 0x02, 0x46, 0x89, 0x17, 0xb9, 0xe6, 0x59, 0x71, 0xb1, 0x0f, 0x24, 0x05, 0xf5, + 0xc9, 0x59, 0xf7, 0x22, 0x17, 0x09, 0xf3, 0xf0, 0x59, 0x30, 0xed, 0xe2, 0x23, 0x5e, 0x0e, 0x08, + 0x0d, 0xf9, 0xfb, 0xfe, 0x9c, 0x58, 0xfc, 0x1c, 0xef, 0x38, 0x37, 0xd3, 0x0c, 0xd4, 0x29, 0x27, + 0x14, 0x6d, 0x2f, 0xa5, 0x38, 0x9f, 0x52, 0x4c, 0x33, 0x50, 0xa7, 0x1c, 0x8f, 0x34, 0x25, 0xb7, + 0x22, 0x9b, 0x12, 0xcb, 0x7c, 0x48, 0x34, 0xa9, 0x6a, 0xbe, 0x2f, 0x69, 0x48, 0x73, 0x61, 0x33, + 0x1e, 0xf7, 0x98, 0xe2, 0x18, 0x5e, 0x1f, 0x6c, 0x25, 0xdf, 0xa6, 0x2b, 0x94, 0xe2, 0x63, 0x79, + 0xd3, 0xa4, 0x07, 0x3d, 0x90, 0x81, 0x3c, 0x76, 0x9c, 0xed, 0x3d, 0xf3, 0xbc, 0x88, 0xfd, 0xa0, + 0x6f, 0x10, 0x5d, 0x75, 0x56, 0x38, 0x08, 0x92, 0x58, 0x1c, 0xd4, 0xf7, 0x78, 0x6a, 0x2c, 0x0c, + 0x17, 0x74, 0x9b, 0x83, 0x20, 0x89, 0x25, 0x56, 0xea, 0x1d, 0x6f, 0xef, 0x99, 0x0f, 0x0f, 0x79, + 0xa5, 0x1c, 0x04, 0x49, 0x2c, 0x68, 0x83, 0x11, 0xcf, 0x0f, 0xcd, 0x0b, 0x43, 0xb9, 0x9e, 0xc5, + 0x85, 0xb3, 0xe5, 0x87, 0x88, 0x63, 0xc0, 0x5f, 0x1a, 0x00, 0x04, 0x49, 0x8a, 0x3e, 0x32, 0x90, + 0x29, 0x42, 0x06, 0xb2, 0x92, 0xe4, 0xf6, 0xba, 0x17, 0xd2, 0xe3, 0xe4, 0x1d, 0x99, 0x3a, 0x03, + 0x29, 0x2f, 0xe0, 0xef, 0x0c, 0x70, 0x36, 0xdd, 0x26, 0x6b, 0xf7, 0x8a, 0x22, 0x22, 0xd7, 0x06, + 0x9d, 0xe6, 0x55, 0xdf, 0x77, 0xaa, 0x66, 0xbb, 0x55, 0x3a, 0xbb, 0xd2, 0x03, 0x15, 0xf5, 0xf4, + 0x05, 0xfe, 0xc9, 0x00, 0x73, 0xaa, 0x8a, 0xa6, 0x3c, 0x2c, 0x89, 0x00, 0x92, 0x41, 0x07, 0x30, + 0x8b, 0x23, 0xe3, 0xa8, 0x7f, 0x97, 0xee, 0xe2, 0xa3, 0x6e, 0xd7, 0xe0, 0x5f, 0x0d, 0x30, 0x65, + 0x91, 0x80, 0x78, 0x16, 0xf1, 0xea, 0xdc, 0xd7, 0xc5, 0x81, 0x8c, 0x0d, 0xb2, 0xbe, 0xae, 0xa5, + 0x20, 0xa4, 0x9b, 0x15, 0xe5, 0xe6, 0x54, 0x9a, 0x75, 0xd2, 0x2a, 0xcd, 0x27, 0xaa, 0x69, 0x0e, + 0xea, 0xf0, 0x12, 0xbe, 0x6f, 0x80, 0x99, 0x64, 0x03, 0xe4, 0x95, 0x72, 0x71, 0x88, 0x79, 0x20, + 0xda, 0xd7, 0x95, 0x4e, 0x40, 0x94, 0xf5, 0x00, 0xfe, 0xd9, 0xe0, 0x9d, 0x5a, 0xfc, 0xee, 0x63, + 0x66, 0x59, 0xc4, 0xf2, 0xad, 0x81, 0xc7, 0x52, 0x23, 0xc8, 0x50, 0x5e, 0x4a, 0x5a, 0x41, 0xcd, + 0x39, 0x69, 0x95, 0xce, 0xa5, 0x23, 0xa9, 0x19, 0x28, 0xed, 0x21, 0xfc, 0x89, 0x01, 0xa6, 0x48, + 0xd2, 0x71, 0x33, 0xf3, 0xd1, 0x81, 0x04, 0xb1, 0x67, 0x13, 0x2f, 0x5f, 0xea, 0x29, 0x16, 0x43, + 0x1d, 0xd8, 0xbc, 0x83, 0x24, 0x47, 0xd8, 0x0d, 0x1c, 0x62, 0x7e, 0x61, 0xc0, 0x1d, 0xe4, 0xba, + 0xb4, 0x8b, 0x62, 0x00, 0x78, 0x09, 0x14, 0xbc, 0xc8, 0x71, 0xf0, 0xae, 0x43, 0xcc, 0xc7, 0x44, + 0x2f, 0xa2, 0xa7, 0x98, 0x5b, 0x8a, 0x8e, 0xb4, 0x04, 0xbc, 0x09, 0x16, 0x8f, 0x5e, 0xd6, 0xff, + 0xd1, 0xb3, 0x43, 0x89, 0xc0, 0xbf, 0xee, 0x1d, 0x78, 0xfe, 0xa1, 0xf7, 0x82, 0x4d, 0x1c, 0x8b, + 0x99, 0x8f, 0x0b, 0x2b, 0xf1, 0x04, 0x7b, 0xfe, 0x46, 0x4f, 0x29, 0x74, 0x57, 0x3b, 0xf0, 0x35, + 0xf0, 0x70, 0x4a, 0x66, 0xdd, 0xdd, 0x25, 0x96, 0x45, 0xac, 0xf8, 0xf1, 0x66, 0x7e, 0x51, 0xc0, + 0xe8, 0x43, 0x7e, 0x23, 0x2b, 0x80, 0xee, 0xa4, 0x0d, 0xaf, 0x82, 0xf9, 0x14, 0x7b, 0xc3, 0x0b, + 0xb7, 0x69, 0x2d, 0xa4, 0xb6, 0xd7, 0x30, 0x97, 0x84, 0xdd, 0xb3, 0xf1, 0xa9, 0xbc, 0x91, 0xe2, + 0xa1, 0x3e, 0x3a, 0x0b, 0xfc, 0xf9, 0x98, 0x29, 0x3f, 0x70, 0x16, 0x8c, 0x1c, 0x10, 0xf5, 0x2b, + 0x39, 0xe2, 0x7f, 0x42, 0x0b, 0xe4, 0x9b, 0xd8, 0x89, 0xe2, 0x17, 0xf0, 0x80, 0xaf, 0x2e, 0x24, + 0x8d, 0x3f, 0x9f, 0x7b, 0xce, 0x58, 0xf8, 0xc0, 0x00, 0xf3, 0xbd, 0xab, 0xe2, 0x03, 0x75, 0xeb, + 0x37, 0x06, 0x98, 0xeb, 0x2a, 0x80, 0x3d, 0x3c, 0xba, 0xd5, 0xe9, 0xd1, 0x6b, 0x83, 0xae, 0x64, + 0x72, 0xd7, 0x44, 0xfb, 0x96, 0x76, 0xef, 0xe7, 0x06, 0x98, 0xcd, 0xd6, 0x94, 0x07, 0x19, 0xaf, + 0xf2, 0x07, 0x39, 0x30, 0xdf, 0xbb, 0xeb, 0x84, 0x54, 0x3f, 0xaf, 0x87, 0x33, 0xa6, 0xe8, 0x35, + 0xd2, 0x7c, 0xd7, 0x00, 0x93, 0x37, 0xb5, 0x5c, 0xfc, 0x2b, 0xea, 0xc0, 0x07, 0x24, 0x71, 0x11, + 0x4f, 0x18, 0x0c, 0xa5, 0x71, 0xcb, 0x7f, 0x31, 0xc0, 0xb9, 0x9e, 0xb7, 0x13, 0x7f, 0xc7, 0x63, + 0xc7, 0xf1, 0x0f, 0xe5, 0x9c, 0x2b, 0x35, 0x44, 0x5e, 0x11, 0x54, 0xa4, 0xb8, 0xa9, 0xe8, 0xe5, + 0x3e, 0xaf, 0xe8, 0x95, 0xff, 0x6e, 0x80, 0x0b, 0x77, 0xca, 0xc4, 0x07, 0xb2, 0xa5, 0x4b, 0xa0, + 0xa0, 0x3a, 0xcb, 0x63, 0xb1, 0x9d, 0xea, 0x31, 0xa5, 0x8a, 0x86, 0xf8, 0xc7, 0x21, 0xf9, 0x57, + 0xf9, 0x43, 0x03, 0xcc, 0xd6, 0x08, 0x6d, 0xda, 0x75, 0x82, 0xc8, 0x1e, 0xa1, 0xc4, 0xab, 0x13, + 0xb8, 0x0c, 0x26, 0xc4, 0xcf, 0x97, 0x01, 0xae, 0xc7, 0xb3, 0xfd, 0x39, 0x15, 0xf2, 0x89, 0xad, + 0x98, 0x81, 0x12, 0x19, 0xfd, 0x3b, 0x40, 0xae, 0xef, 0xef, 0x00, 0x17, 0xc0, 0x68, 0x90, 0x4c, + 0x49, 0x0b, 0x9c, 0x2b, 0x06, 0xa3, 0x82, 0x2a, 0xb8, 0x3e, 0x0d, 0xc5, 0xe8, 0x27, 0xaf, 0xb8, + 0x3e, 0x0d, 0x91, 0xa0, 0x96, 0xff, 0x61, 0x80, 0x5e, 0xff, 0xe2, 0x03, 0x9b, 0x60, 0x9c, 0x49, + 0xd7, 0x55, 0x68, 0xb7, 0xef, 0x33, 0xb4, 0xd9, 0x40, 0xc8, 0xbb, 0x35, 0xa6, 0xc6, 0x60, 0x3c, + 0xba, 0x75, 0x5c, 0x8d, 0x3c, 0x4b, 0x4d, 0x3d, 0xa7, 0x64, 0x74, 0x57, 0x57, 0x24, 0x0d, 0x69, + 0x2e, 0x3c, 0x2f, 0xe7, 0x73, 0xa9, 0xa1, 0x57, 0x3c, 0x9b, 0xab, 0x5e, 0xfe, 0xe8, 0x76, 0xf1, + 0xd4, 0xc7, 0xb7, 0x8b, 0xa7, 0x3e, 0xb9, 0x5d, 0x3c, 0xf5, 0xbd, 0x76, 0xd1, 0xf8, 0xa8, 0x5d, + 0x34, 0x3e, 0x6e, 0x17, 0x8d, 0x4f, 0xda, 0x45, 0xe3, 0x5f, 0xed, 0xa2, 0xf1, 0x8b, 0x4f, 0x8b, + 0xa7, 0xbe, 0x35, 0xae, 0x5c, 0xfb, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0x57, 0xd1, 0xa5, + 0x78, 0x2b, 0x00, 0x00, } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index ce7dd730a21..a0c23a44f56 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -443,6 +443,37 @@ message JSONSchemaProps { optional JSON example = 36; optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; } // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index bee60f2e258..00a558a7fc9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -938,6 +938,9 @@ func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JS out.Example = nil } out.Nullable = in.Nullable + out.XPreserveUnknownFields = in.XPreserveUnknownFields + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString return nil } @@ -1120,6 +1123,9 @@ func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *ap } else { out.Example = nil } + out.XPreserveUnknownFields = in.XPreserveUnknownFields + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString return nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index c7378143b9d..d624136ac96 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -32,6 +32,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/crdserverscheme:go_default_library", @@ -119,6 +120,7 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion:all-srcs", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation:all-srcs", ], tags = ["automanaged"], diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD new file mode 100644 index 00000000000..2a8cee01698 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "complete.go", + "convert.go", + "structural.go", + "validation.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema", + importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver/schema", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", + ], +) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go new file mode 100644 index 00000000000..01b566c8789 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go @@ -0,0 +1,245 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package schema + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extensions) DeepCopyInto(out *Extensions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extensions. +func (in *Extensions) DeepCopy() *Extensions { + if in == nil { + return nil + } + out := new(Extensions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Generic) DeepCopyInto(out *Generic) { + *out = *in + out.Default = in.Default.DeepCopy() + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(StructuralOrBool) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generic. +func (in *Generic) DeepCopy() *Generic { + if in == nil { + return nil + } + out := new(Generic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NestedValueValidation) DeepCopyInto(out *NestedValueValidation) { + *out = *in + in.ValueValidation.DeepCopyInto(&out.ValueValidation) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(NestedValueValidation) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]NestedValueValidation, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ForbiddenGenerics.DeepCopyInto(&out.ForbiddenGenerics) + out.ForbiddenExtensions = in.ForbiddenExtensions + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NestedValueValidation. +func (in *NestedValueValidation) DeepCopy() *NestedValueValidation { + if in == nil { + return nil + } + out := new(NestedValueValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Structural) DeepCopyInto(out *Structural) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(Structural) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]Structural, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.Generic.DeepCopyInto(&out.Generic) + out.Extensions = in.Extensions + if in.ValueValidation != nil { + in, out := &in.ValueValidation, &out.ValueValidation + *out = new(ValueValidation) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Structural. +func (in *Structural) DeepCopy() *Structural { + if in == nil { + return nil + } + out := new(Structural) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StructuralOrBool) DeepCopyInto(out *StructuralOrBool) { + *out = *in + if in.Structural != nil { + in, out := &in.Structural, &out.Structural + *out = new(Structural) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuralOrBool. +func (in *StructuralOrBool) DeepCopy() *StructuralOrBool { + if in == nil { + return nil + } + out := new(StructuralOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueValidation) DeepCopyInto(out *ValueValidation) { + *out = *in + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(float64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(float64) + **out = **in + } + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + *out = new(int64) + **out = **in + } + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + *out = new(int64) + **out = **in + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + *out = new(int64) + **out = **in + } + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + *out = new(int64) + **out = **in + } + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + *out = new(float64) + **out = **in + } + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]JSON, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + *out = new(int64) + **out = **in + } + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + *out = new(int64) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]NestedValueValidation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]NestedValueValidation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]NestedValueValidation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(NestedValueValidation) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueValidation. +func (in *ValueValidation) DeepCopy() *ValueValidation { + if in == nil { + return nil + } + out := new(ValueValidation) + in.DeepCopyInto(out) + return out +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/BUILD new file mode 100644 index 00000000000..e6fbd704bca --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["nonstructuralschema_controller.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema", + importpath = "k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index b9d951758cc..546aa4920fe 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -26,6 +26,7 @@ go_test( "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/options:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library", @@ -40,6 +41,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", diff --git a/vendor/modules.txt b/vendor/modules.txt index 788f32673db..4623350d8f9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1066,6 +1066,7 @@ k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation k8s.io/apiextensions-apiserver/pkg/apiserver k8s.io/apiextensions-apiserver/pkg/apiserver/conversion +k8s.io/apiextensions-apiserver/pkg/apiserver/schema k8s.io/apiextensions-apiserver/pkg/apiserver/validation k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme @@ -1087,6 +1088,7 @@ k8s.io/apiextensions-apiserver/pkg/cmd/server/options k8s.io/apiextensions-apiserver/pkg/cmd/server/testing k8s.io/apiextensions-apiserver/pkg/controller/establish k8s.io/apiextensions-apiserver/pkg/controller/finalizer +k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema k8s.io/apiextensions-apiserver/pkg/controller/openapi k8s.io/apiextensions-apiserver/pkg/controller/status k8s.io/apiextensions-apiserver/pkg/crdserverscheme From bdc6a299483e4b4a03659e6ecf537cb4d26ac24a Mon Sep 17 00:00:00 2001 From: Han Kang Date: Thu, 9 May 2019 12:49:56 -0700 Subject: [PATCH 075/194] add wrappers around gauge, histogram & summary --- .../src/k8s.io/component-base/metrics/BUILD | 6 + .../k8s.io/component-base/metrics/gauge.go | 150 ++++++++++++ .../component-base/metrics/gauge_test.go | 210 ++++++++++++++++ .../component-base/metrics/histogram.go | 148 ++++++++++++ .../component-base/metrics/histogram_test.go | 225 ++++++++++++++++++ .../src/k8s.io/component-base/metrics/opts.go | 128 ++++++++++ .../k8s.io/component-base/metrics/summary.go | 152 ++++++++++++ .../component-base/metrics/summary_test.go | 220 +++++++++++++++++ .../k8s.io/component-base/metrics/wrappers.go | 10 + 9 files changed, 1249 insertions(+) create mode 100644 staging/src/k8s.io/component-base/metrics/gauge.go create mode 100644 staging/src/k8s.io/component-base/metrics/gauge_test.go create mode 100644 staging/src/k8s.io/component-base/metrics/histogram.go create mode 100644 staging/src/k8s.io/component-base/metrics/histogram_test.go create mode 100644 staging/src/k8s.io/component-base/metrics/summary.go create mode 100644 staging/src/k8s.io/component-base/metrics/summary_test.go diff --git a/staging/src/k8s.io/component-base/metrics/BUILD b/staging/src/k8s.io/component-base/metrics/BUILD index b13db6f3536..d991cb88c61 100644 --- a/staging/src/k8s.io/component-base/metrics/BUILD +++ b/staging/src/k8s.io/component-base/metrics/BUILD @@ -10,9 +10,12 @@ go_library( name = "go_default_library", srcs = [ "counter.go", + "gauge.go", + "histogram.go", "metric.go", "opts.go", "registry.go", + "summary.go", "version_parser.go", "wrappers.go", ], @@ -31,7 +34,10 @@ go_test( name = "go_default_test", srcs = [ "counter_test.go", + "gauge_test.go", + "histogram_test.go", "registry_test.go", + "summary_test.go", "version_parser_test.go", ], embed = [":go_default_library"], diff --git a/staging/src/k8s.io/component-base/metrics/gauge.go b/staging/src/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 00000000000..a1c6f2f82b5 --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,150 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both KubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +// NewGauge returns an object which satisfies the KubeCollector and KubeGauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return g.GaugeOpts.DeprecatedVersion +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both KubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +// NewGaugeVec returns an object which satisfies the KubeCollector and KubeGaugeVec interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return v.GaugeOpts.DeprecatedVersion +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + return v.GaugeVec.WithLabelValues(lvs...) +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels prometheus.Labels) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + return v.GaugeVec.With(labels) +} diff --git a/staging/src/k8s.io/component-base/metrics/gauge_test.go b/staging/src/k8s.io/component-base/metrics/gauge_test.go new file mode 100644 index 00000000000..4b26a0a2cdf --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/gauge_test.go @@ -0,0 +1,210 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + apimachineryversion "k8s.io/apimachinery/pkg/version" + "testing" +) + +func TestGauge(t *testing.T) { + v115 := semver.MustParse("1.15.0") + v114 := semver.MustParse("1.14.0") + var tests = []struct { + desc string + GaugeOpts + registryVersion *semver.Version + expectedMetricCount int + expectedHelp string + }{ + { + desc: "Test non deprecated", + GaugeOpts: GaugeOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "gauge help", + }, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] gauge help", + }, + { + desc: "Test deprecated", + GaugeOpts: GaugeOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "gauge help", + DeprecatedVersion: &v115, + }, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] (Deprecated since 1.15.0) gauge help", + }, + { + desc: "Test hidden", + GaugeOpts: GaugeOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "gauge help", + DeprecatedVersion: &v114, + }, + registryVersion: &v115, + expectedMetricCount: 0, + expectedHelp: "gauge help", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + registry := NewKubeRegistry(apimachineryversion.Info{ + Major: "1", + Minor: "15", + GitVersion: "v1.15.0-alpha-1.12345", + }) + c := NewGauge(&test.GaugeOpts) + registry.MustRegister(c) + + ms, err := registry.Gather() + if len(ms) != test.expectedMetricCount { + t.Errorf("Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount) + } + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, metric := range ms { + if metric.GetHelp() != test.expectedHelp { + t.Errorf("Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp) + } + } + + // let's increment the counter and verify that the metric still works + c.Set(100) + c.Set(101) + expected := 101 + ms, err = registry.Gather() + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, mf := range ms { + for _, m := range mf.GetMetric() { + if int(m.GetGauge().GetValue()) != expected { + t.Errorf("Got %v, wanted %v as the count", m.GetGauge().GetValue(), expected) + } + t.Logf("%v\n", m.GetGauge().GetValue()) + } + } + }) + } +} + +func TestGaugeVec(t *testing.T) { + v115 := semver.MustParse("1.15.0") + v114 := semver.MustParse("1.14.0") + var tests = []struct { + desc string + GaugeOpts + labels []string + registryVersion *semver.Version + expectedMetricCount int + expectedHelp string + }{ + { + desc: "Test non deprecated", + GaugeOpts: GaugeOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "gauge help", + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] gauge help", + }, + { + desc: "Test deprecated", + GaugeOpts: GaugeOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "gauge help", + DeprecatedVersion: &v115, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] (Deprecated since 1.15.0) gauge help", + }, + { + desc: "Test hidden", + GaugeOpts: GaugeOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "gauge help", + DeprecatedVersion: &v114, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 0, + expectedHelp: "gauge help", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + registry := NewKubeRegistry(apimachineryversion.Info{ + Major: "1", + Minor: "15", + GitVersion: "v1.15.0-alpha-1.12345", + }) + c := NewGaugeVec(&test.GaugeOpts, test.labels) + registry.MustRegister(c) + c.WithLabelValues("1", "2").Set(1.0) + ms, err := registry.Gather() + + if len(ms) != test.expectedMetricCount { + t.Errorf("Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount) + } + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, metric := range ms { + if metric.GetHelp() != test.expectedHelp { + t.Errorf("Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp) + } + } + + // let's increment the counter and verify that the metric still works + c.WithLabelValues("1", "3").Set(1.0) + c.WithLabelValues("2", "3").Set(1.0) + ms, err = registry.Gather() + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, mf := range ms { + if len(mf.GetMetric()) != 3 { + t.Errorf("Got %v metrics, wanted 2 as the count", len(mf.GetMetric())) + } + } + }) + } +} diff --git a/staging/src/k8s.io/component-base/metrics/histogram.go b/staging/src/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 00000000000..ff88ae151e7 --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" +) + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both KubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return h.HistogramOpts.DeprecatedVersion +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies KubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + // todo: handle defaulting better + klog.Errorf("---%v---\n", opts) + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } + klog.Errorf("---%v---\n", opts) + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return v.HistogramOpts.DeprecatedVersion +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels prometheus.Labels) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.HistogramVec.With(labels) +} diff --git a/staging/src/k8s.io/component-base/metrics/histogram_test.go b/staging/src/k8s.io/component-base/metrics/histogram_test.go new file mode 100644 index 00000000000..ffa9c281606 --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/histogram_test.go @@ -0,0 +1,225 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + apimachineryversion "k8s.io/apimachinery/pkg/version" + "testing" +) + +func TestHistogram(t *testing.T) { + v115 := semver.MustParse("1.15.0") + v114 := semver.MustParse("1.14.0") + var tests = []struct { + desc string + HistogramOpts + registryVersion *semver.Version + expectedMetricCount int + expectedHelp string + }{ + { + desc: "Test non deprecated", + HistogramOpts: HistogramOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "histogram help message", + Buckets: prometheus.DefBuckets, + }, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] histogram help message", + }, + { + desc: "Test deprecated", + HistogramOpts: HistogramOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "histogram help message", + DeprecatedVersion: &v115, + Buckets: prometheus.DefBuckets, + }, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] (Deprecated since 1.15.0) histogram help message", + }, + { + desc: "Test hidden", + HistogramOpts: HistogramOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "histogram help message", + DeprecatedVersion: &v114, + Buckets: prometheus.DefBuckets, + }, + registryVersion: &v115, + expectedMetricCount: 0, + expectedHelp: "histogram help message", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + registry := NewKubeRegistry(apimachineryversion.Info{ + Major: "1", + Minor: "15", + GitVersion: "v1.15.0-alpha-1.12345", + }) + c := NewHistogram(&test.HistogramOpts) + registry.MustRegister(c) + + ms, err := registry.Gather() + if len(ms) != test.expectedMetricCount { + t.Errorf("Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount) + } + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, metric := range ms { + if metric.GetHelp() != test.expectedHelp { + t.Errorf("Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp) + } + } + + // let's increment the counter and verify that the metric still works + c.Observe(1) + c.Observe(2) + c.Observe(3) + c.Observe(1.5) + expected := 4 + ms, err = registry.Gather() + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, mf := range ms { + for _, m := range mf.GetMetric() { + if int(m.GetHistogram().GetSampleCount()) != expected { + t.Errorf("Got %v, want %v as the sample count", m.GetHistogram().GetSampleCount(), expected) + } + } + } + }) + } +} + +func TestHistogramVec(t *testing.T) { + v115 := semver.MustParse("1.15.0") + v114 := semver.MustParse("1.14.0") + var tests = []struct { + desc string + HistogramOpts + labels []string + registryVersion *semver.Version + expectedMetricCount int + expectedHelp string + }{ + { + desc: "Test non deprecated", + HistogramOpts: HistogramOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "histogram help message", + Buckets: prometheus.DefBuckets, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] histogram help message", + }, + { + desc: "Test deprecated", + HistogramOpts: HistogramOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "histogram help message", + DeprecatedVersion: &v115, + Buckets: prometheus.DefBuckets, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] (Deprecated since 1.15.0) histogram help message", + }, + { + desc: "Test hidden", + HistogramOpts: HistogramOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "histogram help message", + DeprecatedVersion: &v114, + Buckets: prometheus.DefBuckets, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 0, + expectedHelp: "histogram help message", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + registry := NewKubeRegistry(apimachineryversion.Info{ + Major: "1", + Minor: "15", + GitVersion: "v1.15.0-alpha-1.12345", + }) + c := NewHistogramVec(&test.HistogramOpts, test.labels) + registry.MustRegister(c) + c.WithLabelValues("1", "2").Observe(1.0) + ms, err := registry.Gather() + + if len(ms) != test.expectedMetricCount { + t.Errorf("Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount) + } + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, metric := range ms { + if metric.GetHelp() != test.expectedHelp { + t.Errorf("Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp) + } + } + + // let's increment the counter and verify that the metric still works + c.WithLabelValues("1", "3").Observe(1.0) + c.WithLabelValues("2", "3").Observe(1.0) + ms, err = registry.Gather() + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, mf := range ms { + if len(mf.GetMetric()) != 3 { + t.Errorf("Got %v metrics, wanted 2 as the count", len(mf.GetMetric())) + } + for _, m := range mf.GetMetric() { + if m.GetHistogram().GetSampleCount() != 1 { + t.Errorf( + "Got %v metrics, expected histogram sample count to equal 1", + m.GetHistogram().GetSampleCount()) + } + } + } + }) + } +} diff --git a/staging/src/k8s.io/component-base/metrics/opts.go b/staging/src/k8s.io/component-base/metrics/opts.go index 1409f241020..7e4c491706c 100644 --- a/staging/src/k8s.io/component-base/metrics/opts.go +++ b/staging/src/k8s.io/component-base/metrics/opts.go @@ -21,6 +21,7 @@ import ( "github.com/blang/semver" "github.com/prometheus/client_golang/prometheus" "sync" + "time" ) // KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure @@ -82,3 +83,130 @@ func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { ConstLabels: o.ConstLabels, } } + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels prometheus.Labels + Buckets []float64 + DeprecatedVersion *semver.Version + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels prometheus.Labels + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion *semver.Version + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: o.Objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} diff --git a/staging/src/k8s.io/component-base/metrics/summary.go b/staging/src/k8s.io/component-base/metrics/summary.go new file mode 100644 index 00000000000..366021e3865 --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,152 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both KubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return s.SummaryOpts.DeprecatedVersion +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies KubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return v.SummaryOpts.DeprecatedVersion +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/summary.go#L485-L495 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels prometheus.Labels) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.SummaryVec.With(labels) +} diff --git a/staging/src/k8s.io/component-base/metrics/summary_test.go b/staging/src/k8s.io/component-base/metrics/summary_test.go new file mode 100644 index 00000000000..c7113b1e5f2 --- /dev/null +++ b/staging/src/k8s.io/component-base/metrics/summary_test.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + apimachineryversion "k8s.io/apimachinery/pkg/version" + "testing" +) + +func TestSummary(t *testing.T) { + v115 := semver.MustParse("1.15.0") + v114 := semver.MustParse("1.14.0") + var tests = []struct { + desc string + SummaryOpts + registryVersion *semver.Version + expectedMetricCount int + expectedHelp string + }{ + { + desc: "Test non deprecated", + SummaryOpts: SummaryOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "summary help message", + StabilityLevel: ALPHA, + }, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] summary help message", + }, + { + desc: "Test deprecated", + SummaryOpts: SummaryOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "summary help message", + DeprecatedVersion: &v115, + StabilityLevel: ALPHA, + }, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] (Deprecated since 1.15.0) summary help message", + }, + { + desc: "Test hidden", + SummaryOpts: SummaryOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "summary help message", + DeprecatedVersion: &v114, + }, + registryVersion: &v115, + expectedMetricCount: 0, + expectedHelp: "summary help message", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + registry := NewKubeRegistry(apimachineryversion.Info{ + Major: "1", + Minor: "15", + GitVersion: "v1.15.0-alpha-1.12345", + }) + c := NewSummary(&test.SummaryOpts) + registry.MustRegister(c) + + ms, err := registry.Gather() + if len(ms) != test.expectedMetricCount { + t.Errorf("Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount) + } + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, metric := range ms { + if metric.GetHelp() != test.expectedHelp { + t.Errorf("Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp) + } + } + + // let's increment the counter and verify that the metric still works + c.Observe(1) + c.Observe(2) + c.Observe(3) + c.Observe(1.5) + expected := 4 + ms, err = registry.Gather() + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, mf := range ms { + for _, m := range mf.GetMetric() { + if int(m.GetSummary().GetSampleCount()) != expected { + t.Errorf("Got %v, want %v as the sample count", m.GetHistogram().GetSampleCount(), expected) + } + } + } + }) + } +} + +func TestSummaryVec(t *testing.T) { + v115 := semver.MustParse("1.15.0") + v114 := semver.MustParse("1.14.0") + var tests = []struct { + desc string + SummaryOpts + labels []string + registryVersion *semver.Version + expectedMetricCount int + expectedHelp string + }{ + { + desc: "Test non deprecated", + SummaryOpts: SummaryOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "summary help message", + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] summary help message", + }, + { + desc: "Test deprecated", + SummaryOpts: SummaryOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "summary help message", + DeprecatedVersion: &v115, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 1, + expectedHelp: "[ALPHA] (Deprecated since 1.15.0) summary help message", + }, + { + desc: "Test hidden", + SummaryOpts: SummaryOpts{ + Namespace: "namespace", + Name: "metric_test_name", + Subsystem: "subsystem", + Help: "summary help message", + DeprecatedVersion: &v114, + }, + labels: []string{"label_a", "label_b"}, + registryVersion: &v115, + expectedMetricCount: 0, + expectedHelp: "summary help message", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + registry := NewKubeRegistry(apimachineryversion.Info{ + Major: "1", + Minor: "15", + GitVersion: "v1.15.0-alpha-1.12345", + }) + c := NewSummaryVec(&test.SummaryOpts, test.labels) + registry.MustRegister(c) + c.WithLabelValues("1", "2").Observe(1.0) + ms, err := registry.Gather() + + if len(ms) != test.expectedMetricCount { + t.Errorf("Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount) + } + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, metric := range ms { + if metric.GetHelp() != test.expectedHelp { + t.Errorf("Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp) + } + } + + // let's increment the counter and verify that the metric still works + c.WithLabelValues("1", "3").Observe(1.0) + c.WithLabelValues("2", "3").Observe(1.0) + ms, err = registry.Gather() + if err != nil { + t.Fatalf("Gather failed %v", err) + } + for _, mf := range ms { + if len(mf.GetMetric()) != 3 { + t.Errorf("Got %v metrics, wanted 2 as the count", len(mf.GetMetric())) + } + for _, m := range mf.GetMetric() { + if m.GetSummary().GetSampleCount() != 1 { + t.Errorf( + "Got %v metrics, wanted 2 as the summary sample count", + m.GetSummary().GetSampleCount()) + } + } + } + }) + } +} diff --git a/staging/src/k8s.io/component-base/metrics/wrappers.go b/staging/src/k8s.io/component-base/metrics/wrappers.go index 5bb72cacfbc..f2d4c9dedd9 100644 --- a/staging/src/k8s.io/component-base/metrics/wrappers.go +++ b/staging/src/k8s.io/component-base/metrics/wrappers.go @@ -56,6 +56,16 @@ type CounterVecMetric interface { With(prometheus.Labels) CounterMetric } +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + // PromRegistry is an interface which implements a subset of prometheus.Registerer and // prometheus.Gatherer interfaces type PromRegistry interface { From 62e58a66aa71bfbdfd6721b2225a29d83869cd88 Mon Sep 17 00:00:00 2001 From: Brad Hoekstra Date: Thu, 9 May 2019 11:34:56 -0400 Subject: [PATCH 076/194] Fix some lint errors in pkg/proxy --- cmd/kube-proxy/app/server_others.go | 6 ++--- hack/.golint_failures | 2 -- pkg/proxy/iptables/proxier.go | 34 ++++++++++++++++++++++------- pkg/proxy/util/endpoints.go | 8 +++---- pkg/proxy/util/utils.go | 13 ++++++++++- 5 files changed, 45 insertions(+), 18 deletions(-) diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 1182c001668..1b31497db02 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -237,7 +237,7 @@ func newProxyServer( }, nil } -func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { +func getProxyMode(proxyMode string, iptver iptables.Versioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { switch proxyMode { case proxyModeUserspace: return proxyModeUserspace @@ -250,7 +250,7 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle i return tryIPTablesProxy(iptver, kcompat) } -func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { +func tryIPVSProxy(iptver iptables.Versioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { // guaranteed false on error, error only necessary for debugging // IPVS Proxier relies on ip_vs_* kernel modules and ipset useIPVSProxy, err := ipvs.CanUseIPVSProxier(khandle, ipsetver) @@ -267,7 +267,7 @@ func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, return tryIPTablesProxy(iptver, kcompat) } -func tryIPTablesProxy(iptver iptables.IPTablesVersioner, kcompat iptables.KernelCompatTester) string { +func tryIPTablesProxy(iptver iptables.Versioner, kcompat iptables.KernelCompatTester) string { // guaranteed false on error, error only necessary for debugging useIPTablesProxy, err := iptables.CanUseIPTablesProxier(iptver, kcompat) if err != nil { diff --git a/hack/.golint_failures b/hack/.golint_failures index 9ef59605506..75ce617ee0a 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -194,9 +194,7 @@ pkg/master/tunneler pkg/proxy pkg/proxy/apis/config pkg/proxy/apis/config/v1alpha1 -pkg/proxy/iptables pkg/proxy/userspace -pkg/proxy/util pkg/proxy/winkernel pkg/proxy/winuserspace pkg/quota/v1/evaluator/core diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 5911f0e623e..4dc84e2866e 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -73,18 +73,18 @@ const ( // the kubernetes postrouting chain kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING" - // the mark-for-masquerade chain + // KubeMarkMasqChain is the mark-for-masquerade chain KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ" - // the mark-for-drop chain + // KubeMarkDropChain is the mark-for-drop chain KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP" // the kubernetes forward chain kubeForwardChain utiliptables.Chain = "KUBE-FORWARD" ) -// IPTablesVersioner can query the current iptables version. -type IPTablesVersioner interface { +// Versioner can query the current iptables version. +type Versioner interface { // returns "X.Y.Z" GetVersion() (string, error) } @@ -100,7 +100,7 @@ type KernelCompatTester interface { // the iptables version and for the existence of kernel features. It may return // an error if it fails to get the iptables version without error, in which // case it will also return false. -func CanUseIPTablesProxier(iptver IPTablesVersioner, kcompat KernelCompatTester) (bool, error) { +func CanUseIPTablesProxier(iptver Versioner, kcompat KernelCompatTester) (bool, error) { minVersion, err := utilversion.ParseGeneric(iptablesMinVersion) if err != nil { return false, err @@ -124,12 +124,14 @@ func CanUseIPTablesProxier(iptver IPTablesVersioner, kcompat KernelCompatTester) return true, nil } +// LinuxKernelCompatTester is the Linux implementation of KernelCompatTester type LinuxKernelCompatTester struct{} +// IsCompatible checks for the required sysctls. We don't care about the value, just +// that it exists. If this Proxier is chosen, we'll initialize it as we +// need. func (lkct LinuxKernelCompatTester) IsCompatible() error { - // Check for the required sysctls. We don't care about the value, just - // that it exists. If this Proxier is chosen, we'll initialize it as we - // need. + _, err := utilsysctl.New().GetSysctl(sysctlRouteLocalnet) return err } @@ -507,21 +509,29 @@ func (proxier *Proxier) isInitialized() bool { return atomic.LoadInt32(&proxier.initialized) > 0 } +// OnServiceAdd is called whenever creation of new service object +// is observed. func (proxier *Proxier) OnServiceAdd(service *v1.Service) { proxier.OnServiceUpdate(nil, service) } +// OnServiceUpdate is called whenever modification of an existing +// service object is observed. func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() { proxier.syncRunner.Run() } } +// OnServiceDelete is called whenever deletion of an existing service +// object is observed. func (proxier *Proxier) OnServiceDelete(service *v1.Service) { proxier.OnServiceUpdate(service, nil) } +// OnServiceSynced is called once all the initial even handlers were +// called and the state is fully propagated to local cache. func (proxier *Proxier) OnServiceSynced() { proxier.mu.Lock() proxier.servicesSynced = true @@ -532,20 +542,28 @@ func (proxier *Proxier) OnServiceSynced() { proxier.syncProxyRules() } +// OnEndpointsAdd is called whenever creation of new endpoints object +// is observed. func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) { proxier.OnEndpointsUpdate(nil, endpoints) } +// OnEndpointsUpdate is called whenever modification of an existing +// endpoints object is observed. func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { if proxier.endpointsChanges.Update(oldEndpoints, endpoints) && proxier.isInitialized() { proxier.syncRunner.Run() } } +// OnEndpointsDelete is called whever deletion of an existing endpoints +// object is observed. func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) { proxier.OnEndpointsUpdate(endpoints, nil) } +// OnEndpointsSynced is called once all the initial event handlers were +// called and the state is fully propagated to local cache. func (proxier *Proxier) OnEndpointsSynced() { proxier.mu.Lock() proxier.endpointsSynced = true diff --git a/pkg/proxy/util/endpoints.go b/pkg/proxy/util/endpoints.go index 716491cd25c..0a65f43955e 100644 --- a/pkg/proxy/util/endpoints.go +++ b/pkg/proxy/util/endpoints.go @@ -39,12 +39,12 @@ func IPPart(s string) string { return "" } // Check if host string is a valid IP address - if ip := net.ParseIP(host); ip != nil { - return ip.String() - } else { + ip := net.ParseIP(host) + if ip == nil { klog.Errorf("invalid IP part '%s'", host) + return "" } - return "" + return ip.String() } // PortPart returns just the port part of an endpoint string. diff --git a/pkg/proxy/util/utils.go b/pkg/proxy/util/utils.go index 822da8534d3..d390c7f5b0a 100644 --- a/pkg/proxy/util/utils.go +++ b/pkg/proxy/util/utils.go @@ -33,15 +33,23 @@ import ( ) const ( + // IPv4ZeroCIDR is the CIDR block for the whole IPv4 address space IPv4ZeroCIDR = "0.0.0.0/0" + + // IPv6ZeroCIDR is the CIDR block for the whole IPv6 address space IPv6ZeroCIDR = "::/0" ) var ( + // ErrAddressNotAllowed indicates the address is not allowed ErrAddressNotAllowed = errors.New("address not allowed") - ErrNoAddresses = errors.New("No addresses for hostname") + + // ErrNoAddresses indicates there are no addresses for the hostname + ErrNoAddresses = errors.New("No addresses for hostname") ) +// IsZeroCIDR checks whether the input CIDR string is either +// the IPv4 or IPv6 zero CIDR func IsZeroCIDR(cidr string) bool { if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR { return true @@ -89,6 +97,8 @@ func IsProxyableHostname(ctx context.Context, resolv Resolver, hostname string) return nil } +// IsLocalIP checks if a given IP address is bound to an interface +// on the local system func IsLocalIP(ip string) (bool, error) { addrs, err := net.InterfaceAddrs() if err != nil { @@ -106,6 +116,7 @@ func IsLocalIP(ip string) (bool, error) { return false, nil } +// ShouldSkipService checks if a given service should skip proxying func ShouldSkipService(svcName types.NamespacedName, service *v1.Service) bool { // if ClusterIP is "None" or empty, skip proxying if !helper.IsServiceIPSet(service) { From f0ec8c8644997e3cc12be968c2341fc67dff6a5f Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Thu, 9 May 2019 13:56:02 -0700 Subject: [PATCH 077/194] Promote oomichi to approver in test/ --- test/OWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/OWNERS b/test/OWNERS index 14bbffa0418..a228a8d9339 100644 --- a/test/OWNERS +++ b/test/OWNERS @@ -30,7 +30,6 @@ reviewers: - zmerlynn - vishh - MaciekPytel # for test/e2e/common/autoscaling_utils.go - - oomichi - xichengliudui - andrewsykim - neolit123 @@ -58,6 +57,7 @@ approvers: - mikedanese - msau42 # for test/e2e/commmon/{*volume,empty_dir}.go and test/e2e/framework/{pv|volume}_util.go - ncdc + - oomichi - pwittrock # for test/e2e/kubectl.go - saad-ali - shyamjvs From 205eaed1a70195982e6f2eed33e73c4b707c9630 Mon Sep 17 00:00:00 2001 From: "Khaled Henidak(Kal)" Date: Thu, 9 May 2019 22:55:15 +0000 Subject: [PATCH 078/194] add legacy cloud providers unit tests to [make test] --- hack/make-rules/test.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh index 2ced45589e9..b215aa147ba 100755 --- a/hack/make-rules/test.sh +++ b/hack/make-rules/test.sh @@ -98,6 +98,10 @@ kube::test::find_dirs() { find ./staging/src/k8s.io/cli-runtime -name '*_test.go' \ -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u + + # add legacy cloud providers tests + find ./staging/src/k8s.io/legacy-cloud-providers -name '*_test.go' \ + -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u ) } From 69eab5f04b4542b933b4735aec09642ae8abc6c9 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Thu, 9 May 2019 16:30:58 -0700 Subject: [PATCH 079/194] Prune test/OWNERS Move approvers who haven't reviewed any PRs touching test/ in over six months to emeritus_approvers (and remove from reviewers as well to avoid assigning to inactive people) --- test/OWNERS | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/test/OWNERS b/test/OWNERS index a228a8d9339..53ca857e1c0 100644 --- a/test/OWNERS +++ b/test/OWNERS @@ -6,30 +6,20 @@ reviewers: - MrHohn - deads2k - enisoc - - enj # for test/integration/etcd/etcd_storage_path_test.go - - erictune - - foxish # for test/e2e/network-partition.go - - gmarek - janetkuo - - kow3ns # for test/e2e/statefulset.go - - krousey - liggitt - - madhusudancs - - marun - mikedanese - - msau42 # for test/e2e/commmon/{*volume,empty_dir}.go and test/e2e/framework/{pv|volume}_util.go + - msau42 - ncdc - pwittrock # for test/e2e/kubectl.go - saad-ali - shyamjvs - smarterclayton - - soltysh - sig-testing-reviewers - sttts - timothysc - zmerlynn - vishh - - MaciekPytel # for test/e2e/common/autoscaling_utils.go - xichengliudui - andrewsykim - neolit123 @@ -43,19 +33,10 @@ approvers: - MrHohn - deads2k - enisoc - - enj # for test/integration/etcd/etcd_storage_path_test.go - - eparis - - erictune - - foxish # for test/e2e/network-partition.go - - gmarek - janetkuo - - kow3ns # for test/e2e/statefulset.go - - krousey - liggitt - - madhusudancs - - marun - mikedanese - - msau42 # for test/e2e/commmon/{*volume,empty_dir}.go and test/e2e/framework/{pv|volume}_util.go + - msau42 - ncdc - oomichi - pwittrock # for test/e2e/kubectl.go @@ -63,12 +44,23 @@ approvers: - shyamjvs - sig-testing-approvers - smarterclayton - - soltysh - sttts - timothysc - - zmerlynn - vishh - - MaciekPytel # for test/e2e/common/autoscaling_utils.go +emeritus_approvers: + - enj + - eparis + - erictune + - foxish + - gmarek + - krousey + - kow3ns + - madhusudancs + - marun + - soltysh + - zmerlynn + - MaciekPytel + labels: - area/test - sig/testing From b0aab032091c22135a5d6a0bcc83f927c0a3837b Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 7 May 2019 21:28:45 -0700 Subject: [PATCH 080/194] Fix admission webhook integration tests to filter out controller requests --- .../admissionwebhook/admission_test.go | 36 ++++++++++++++----- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index 7b833f96a65..bd506e4dcf1 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -52,7 +52,8 @@ import ( ) const ( - testNamespace = "webhook-integration" + testNamespace = "webhook-integration" + testClientUsername = "webhook-integration-client" mutation = "mutation" validation = "validation" @@ -336,19 +337,34 @@ func TestWebhookV1beta1(t *testing.T) { }) defer master.Cleanup() - if _, err := master.Client.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + // Configure a client with a distinct user name so that it is easy to distinguish requests + // made by the client from requests made by controllers. We use this to filter out requests + // before recording them to ensure we don't accidentally mistake requests from controllers + // as requests made by the client. + clientConfig := master.Config + clientConfig.Impersonate.UserName = testClientUsername + clientConfig.Impersonate.Groups = []string{"system:masters", "system:authenticated"} + client, err := clientset.NewForConfig(clientConfig) + if err != nil { t.Fatal(err) } - if err := createV1beta1MutationWebhook(master.Client, webhookServer.URL+"/"+mutation); err != nil { + + if _, err := client.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { t.Fatal(err) } - if err := createV1beta1ValidationWebhook(master.Client, webhookServer.URL+"/"+validation); err != nil { + if err := createV1beta1MutationWebhook(client, webhookServer.URL+"/"+mutation); err != nil { + t.Fatal(err) + } + if err := createV1beta1ValidationWebhook(client, webhookServer.URL+"/"+validation); err != nil { t.Fatal(err) } // gather resources to test - dynamicClient := master.Dynamic - _, resources, err := master.Client.Discovery().ServerGroupsAndResources() + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + t.Fatal(err) + } + _, resources, err := client.Discovery().ServerGroupsAndResources() if err != nil { t.Fatalf("Failed to get ServerGroupsAndResources with error: %+v", err) } @@ -412,7 +428,7 @@ func TestWebhookV1beta1(t *testing.T) { t: t, admissionHolder: holder, client: dynamicClient, - clientset: master.Client, + clientset: client, verb: verb, gvr: gvr, resource: resource, @@ -938,7 +954,11 @@ func newWebhookHandler(t *testing.T, holder *holder, phase string) http.Handler } review.Request.OldObject.Object = u } - holder.record(phase, review.Request) + + if review.Request.UserInfo.Username == testClientUsername { + // only record requests originating from this integration test's client + holder.record(phase, review.Request) + } review.Response = &v1beta1.AdmissionResponse{ Allowed: true, From a591a838e5471e186a0153b80a0be32fde4b8b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=80b=C3=A9j=C3=ADd=C3=A9=20=C3=80yod=C3=A9l=C3=A9?= Date: Wed, 1 May 2019 16:23:28 +0000 Subject: [PATCH 081/194] Clean up controller-manager. These are based on recommendation from [staticcheck](http://staticcheck.io/). --- cmd/controller-manager/app/options/generic.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/controller-manager/app/options/generic.go b/cmd/controller-manager/app/options/generic.go index 9491ec0a745..59dc6784467 100644 --- a/cmd/controller-manager/app/options/generic.go +++ b/cmd/controller-manager/app/options/generic.go @@ -103,9 +103,7 @@ func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers [ if controller == "*" { continue } - if strings.HasPrefix(controller, "-") { - controller = controller[1:] - } + controller = strings.TrimPrefix(controller, "-") if !allControllersSet.Has(controller) { errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller)) } From be4af8f83f86a1355359cfe142bc68c00845df06 Mon Sep 17 00:00:00 2001 From: Akihito INOH Date: Thu, 9 May 2019 16:56:45 +0900 Subject: [PATCH 082/194] Use framework.ExpectNoError() for e2e/lifecycle The e2e test framework has ExpectNoError() for readable test code. This replaces Expect(err).NotTo(HaveOccurred()) with it for e2e/lifecycle. --- test/e2e/lifecycle/addon_update.go | 11 ++++----- test/e2e/lifecycle/kubelet_security.go | 4 ++-- test/e2e/lifecycle/reboot.go | 3 +-- test/e2e/lifecycle/resize_nodes.go | 31 +++++++++++++------------- test/e2e/lifecycle/restart.go | 13 +++++------ 5 files changed, 30 insertions(+), 32 deletions(-) diff --git a/test/e2e/lifecycle/addon_update.go b/test/e2e/lifecycle/addon_update.go index 7152bc161d6..a813684246d 100644 --- a/test/e2e/lifecycle/addon_update.go +++ b/test/e2e/lifecycle/addon_update.go @@ -227,7 +227,7 @@ var _ = SIGDescribe("Addon update", func() { var err error sshClient, err = getMasterSSHClient() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get the master SSH client.") + framework.ExpectNoError(err, "Failed to get the master SSH client.") }) ginkgo.AfterEach(func() { @@ -275,7 +275,7 @@ var _ = SIGDescribe("Addon update", func() { for _, p := range remoteFiles { err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient) + framework.ExpectNoError(err, "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient) } // directory on kubernetes-master @@ -284,7 +284,7 @@ var _ = SIGDescribe("Addon update", func() { // cleanup from previous tests _, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient) + framework.ExpectNoError(err, "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient) defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir)) @@ -300,7 +300,8 @@ var _ = SIGDescribe("Addon update", func() { // Delete the "ensure exist class" addon at the end. defer func() { e2elog.Logf("Cleaning up ensure exist class addon.") - gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred()) + err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil) + framework.ExpectNoError(err) }() waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true) @@ -386,7 +387,7 @@ func getMasterSSHClient() (*ssh.Client, error) { func sshExecAndVerify(client *ssh.Client, cmd string) { _, _, rc, err := sshExec(client, cmd) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client) + framework.ExpectNoError(err, "Failed to execute %q with ssh client %+v", cmd, client) gomega.Expect(rc).To(gomega.Equal(0), "error return code from executing command on the cluster: %s", cmd) } diff --git a/test/e2e/lifecycle/kubelet_security.go b/test/e2e/lifecycle/kubelet_security.go index 841847a14e5..22cf1b12f61 100644 --- a/test/e2e/lifecycle/kubelet_security.go +++ b/test/e2e/lifecycle/kubelet_security.go @@ -46,7 +46,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { // make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() { result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) var statusCode int result.StatusCode(&statusCode) @@ -54,7 +54,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { }) ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() { result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) var statusCode int result.StatusCode(&statusCode) diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index 7c5ab138d8f..76789fa6571 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -35,7 +35,6 @@ import ( testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const ( @@ -70,7 +69,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { namespaceName := metav1.NamespaceSystem ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, e := range events.Items { e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) diff --git a/test/e2e/lifecycle/resize_nodes.go b/test/e2e/lifecycle/resize_nodes.go index 75f3f6e5d6b..68712f07d0f 100644 --- a/test/e2e/lifecycle/resize_nodes.go +++ b/test/e2e/lifecycle/resize_nodes.go @@ -27,7 +27,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { @@ -51,7 +50,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { c = f.ClientSet ns = f.Namespace.Name systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) @@ -104,7 +103,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) ginkgo.It("should be able to delete nodes", func() { @@ -112,20 +111,20 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-delete-node" numNodes, err := framework.NumberOfRegisteredNodes(c) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) originalNodeCount = int32(numNodes) common.NewRCByName(c, ns, name, originalNodeCount, nil) err = framework.VerifyPods(c, ns, name, true, originalNodeCount) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1) ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes)) err = framework.ResizeGroup(group, targetNumNodes) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForGroupSize(group, targetNumNodes) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " + "the now non-existent node and the RC to recreate it") @@ -133,7 +132,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By("verifying whether the pods from the removed node are recreated") err = framework.VerifyPods(c, ns, name, true, originalNodeCount) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) // TODO: Bug here - testName is not correct @@ -143,26 +142,26 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { name := "my-hostname-add-node" common.NewSVCByName(c, ns, name) numNodes, err := framework.NumberOfRegisteredNodes(c) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) originalNodeCount = int32(numNodes) common.NewRCByName(c, ns, name, originalNodeCount, nil) err = framework.VerifyPods(c, ns, name, true, originalNodeCount) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1) ginkgo.By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes)) err = framework.ResizeGroup(group, targetNumNodes) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForGroupSize(group, targetNumNodes) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1)) err = resizeRC(c, ns, name, originalNodeCount+1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) }) }) diff --git a/test/e2e/lifecycle/restart.go b/test/e2e/lifecycle/restart.go index 86be21dc4e9..bdaea382a06 100644 --- a/test/e2e/lifecycle/restart.go +++ b/test/e2e/lifecycle/restart.go @@ -29,7 +29,6 @@ import ( testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) func nodeNames(nodes []v1.Node) []string { @@ -54,14 +53,14 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { framework.SkipUnlessProviderIs("gce", "gke") var err error ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) systemNamespace = metav1.NamespaceSystem ginkgo.By("ensuring all nodes are ready") originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) ginkgo.By("ensuring all pods are running and ready") @@ -87,11 +86,11 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() { ginkgo.By("restarting all of the nodes") err := common.RestartNodes(f.ClientSet, originalNodes) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("ensuring all nodes are ready after the restart") nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) // Make sure that we have the same number of nodes. We're not checking @@ -108,7 +107,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.By("ensuring the same number of pods are running and ready after restart") podCheckStart := time.Now() podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { pods := ps.List() From c045046e5b1391079ab23d719e4fd4f4124257ff Mon Sep 17 00:00:00 2001 From: draveness Date: Fri, 10 May 2019 09:33:12 +0800 Subject: [PATCH 083/194] feat: use framework.ExpectNoError instead in e2e test --- test/e2e/apimachinery/crd_watch.go | 13 ++--- test/e2e/apimachinery/etcd_failure.go | 12 ++-- test/e2e/apimachinery/garbage_collector.go | 21 ++++--- test/e2e/apimachinery/namespace.go | 24 ++++---- test/e2e/apimachinery/table_conversion.go | 12 ++-- test/e2e/apimachinery/watch.go | 65 +++++++++++----------- test/e2e/apimachinery/webhook.go | 16 +++--- test/e2e/apps/daemon_set.go | 48 ++++++++-------- test/e2e/apps/deployment.go | 12 ++-- test/e2e/kubectl/portforward.go | 2 +- test/e2e/servicecatalog/podpreset.go | 16 +++--- test/e2e/windows/volumes.go | 4 +- 12 files changed, 121 insertions(+), 124 deletions(-) diff --git a/test/e2e/apimachinery/crd_watch.go b/test/e2e/apimachinery/crd_watch.go index 3256bddf4ab..abad1bb0f50 100644 --- a/test/e2e/apimachinery/crd_watch.go +++ b/test/e2e/apimachinery/crd_watch.go @@ -31,7 +31,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) var _ = SIGDescribe("CustomResourceDefinition Watch", func() { @@ -80,35 +79,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() { noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition) watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA) + framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameA) watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB) + framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameB) testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA) testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB) ginkgo.By("Creating first CR ") testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA) + framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrA) expectEvent(watchA, watch.Added, testCrA) expectNoEvent(watchB, watch.Added, testCrA) ginkgo.By("Creating second CR") testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB) + framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrB) expectEvent(watchB, watch.Added, testCrB) expectNoEvent(watchA, watch.Added, testCrB) ginkgo.By("Deleting first CR") err = deleteCustomResource(noxuResourceClient, watchCRNameA) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA) + framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameA) expectEvent(watchA, watch.Deleted, nil) expectNoEvent(watchB, watch.Deleted, nil) ginkgo.By("Deleting second CR") err = deleteCustomResource(noxuResourceClient, watchCRNameB) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB) + framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameB) expectEvent(watchB, watch.Deleted, nil) expectNoEvent(watchA, watch.Deleted, nil) }) diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index 47f1bff7112..1e980145bdc 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -31,7 +31,6 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) var _ = SIGDescribe("Etcd failure [Disruptive]", func() { @@ -46,13 +45,14 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { // providers that provide those capabilities. framework.SkipUnlessProviderIs("gce") - gomega.Expect(framework.RunRC(testutils.RCConfig{ + err := framework.RunRC(testutils.RCConfig{ Client: f.ClientSet, Name: "baz", Namespace: f.Namespace.Name, Image: imageutils.GetPauseImageName(), Replicas: 1, - })).NotTo(gomega.HaveOccurred()) + }) + framework.ExpectNoError(err) }) ginkgo.It("should recover from network partition with master", func() { @@ -98,7 +98,7 @@ func doEtcdFailure(failCommand, fixCommand string) { func masterExec(cmd string) { host := framework.GetMasterHost() + ":22" result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd) + framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd) if result.Code != 0 { e2essh.LogResult(result) framework.Failf("master exec command returned non-zero") @@ -123,7 +123,7 @@ func checkExistingRCRecovers(f *framework.Framework) { } for _, pod := range pods.Items { err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) } e2elog.Logf("apiserver has recovered") return true, nil @@ -133,7 +133,7 @@ func checkExistingRCRecovers(f *framework.Framework) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { options := metav1.ListOptions{LabelSelector: rcSelector.String()} pods, err := podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String()) + framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String()) for _, pod := range pods.Items { if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) { return true, nil diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 4893dbf9e36..5c1e6f02c99 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -42,7 +42,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework/metrics" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -735,12 +734,12 @@ var _ = SIGDescribe("Garbage collector", func() { } ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name)) pods, err := podClient.List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name) + framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name) patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID) for i := 0; i < halfReplicas; i++ { pod := pods.Items[i] _, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch) + framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch) } ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name)) @@ -815,36 +814,36 @@ var _ = SIGDescribe("Garbage collector", func() { pod1Name := "pod1" pod1 := newGCPod(pod1Name) pod1, err := podClient.Create(pod1) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name) pod2Name := "pod2" pod2 := newGCPod(pod2Name) pod2, err = podClient.Create(pod2) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name) pod3Name := "pod3" pod3 := newGCPod(pod3Name) pod3, err = podClient.Create(pod3) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name) // create circular dependency addRefPatch := func(name string, uid types.UID) []byte { return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid)) } patch1 := addRefPatch(pod3.Name, pod3.UID) pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) + framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) patch2 := addRefPatch(pod1.Name, pod1.UID) pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) + framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) patch3 := addRefPatch(pod2.Name, pod2.UID) pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) + framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) // delete one pod, should result in the deletion of all pods deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID)) err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name) var pods *v1.PodList var err2 error // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. @@ -1074,7 +1073,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("Create the cronjob") cronJob := newCronJob("simple", "*/1 * * * ?") cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) ginkgo.By("Wait for the CronJob to create new Job") err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) { diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index 98ebba51738..7a9bf1d31cc 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -47,7 +47,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max defer ginkgo.GinkgoRecover() ns := fmt.Sprintf("nslifetest-%v", n) _, err = f.CreateNamespace(ns, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", ns) + framework.ExpectNoError(err, "failed to create namespace: %s", ns) }(n) } wg.Wait() @@ -57,7 +57,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max time.Sleep(time.Duration(10 * time.Second)) deleteFilter := []string{"nslifetest"} deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter) + framework.ExpectNoError(err, "failed to delete namespace(s) containing: %s", deleteFilter) gomega.Expect(len(deleted)).To(gomega.Equal(totalNS)) ginkgo.By("Waiting for namespaces to vanish") @@ -86,11 +86,11 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { ginkgo.By("Creating a test namespace") namespaceName := "nsdeletetest" namespace, err := f.CreateNamespace(namespaceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName) + framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Waiting for a default service account to be provisioned in namespace") err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) + framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) ginkgo.By("Creating a pod in the namespace") podName := "test-pod" @@ -108,14 +108,14 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, } pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name) ginkgo.By("Waiting for the pod to have running status") framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) ginkgo.By("Deleting the namespace") err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name) + framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds @@ -130,7 +130,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { ginkgo.By("Recreating the namespace") namespace, err = f.CreateNamespace(namespaceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName) + framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Verifying there are no pods in the namespace") _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{}) @@ -143,11 +143,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { ginkgo.By("Creating a test namespace") namespaceName := "nsdeletetest" namespace, err := f.CreateNamespace(namespaceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName) + framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Waiting for a default service account to be provisioned in namespace") err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) + framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) ginkgo.By("Creating a service in the namespace") serviceName := "test-service" @@ -168,11 +168,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, } service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name) + framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name) ginkgo.By("Deleting the namespace") err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name) + framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) @@ -187,7 +187,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { ginkgo.By("Recreating the namespace") namespace, err = f.CreateNamespace(namespaceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName) + framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Verifying there is no service in the namespace") _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{}) diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 75a7e1e7b36..d98e34d0d34 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -56,11 +56,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { e2elog.Logf("Creating pod %s", podName) _, err := c.CoreV1().Pods(ns).Create(newTablePod(podName)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns) table := &metav1beta1.Table{} err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns) + framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns) e2elog.Logf("Table: %#v", table) gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2)) @@ -108,7 +108,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec). SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io"). Do().Into(pagedTable) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns) + framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns) gomega.Expect(len(pagedTable.Rows)).To(gomega.Equal(2)) gomega.Expect(pagedTable.ResourceVersion).ToNot(gomega.Equal("")) gomega.Expect(pagedTable.SelfLink).ToNot(gomega.Equal("")) @@ -120,7 +120,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec). SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io"). Do().Into(pagedTable) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns) + framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns) gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0)) gomega.Expect(pagedTable.Rows[0].Cells[0]).To(gomega.Equal("template-0002")) }) @@ -130,7 +130,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { table := &metav1beta1.Table{} err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get nodes in Table form across all namespaces") + framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces") e2elog.Logf("Table: %#v", table) gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2)) @@ -168,7 +168,7 @@ func printTable(table *metav1beta1.Table) string { tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0) printer := printers.NewTablePrinter(printers.PrintOptions{}) err := printer.PrintObj(table, tw) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to print table: %+v", table) + framework.ExpectNoError(err, "failed to print table: %+v", table) tw.Flush() return buf.String() } diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index 43b569495e3..428dd6d1e1f 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -30,7 +30,6 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const ( @@ -58,15 +57,15 @@ var _ = SIGDescribe("Watchers", func() { ginkgo.By("creating a watch on configmaps with label A") watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA) + framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA) ginkgo.By("creating a watch on configmaps with label B") watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB) + framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB) ginkgo.By("creating a watch on configmaps with label A or B") watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB) + framework.ExpectNoError(err, "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB) testConfigMapA := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -87,7 +86,7 @@ var _ = SIGDescribe("Watchers", func() { ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification") testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns) + framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns) expectEvent(watchA, watch.Added, testConfigMapA) expectEvent(watchAB, watch.Added, testConfigMapA) expectNoEvent(watchB, watch.Added, testConfigMapA) @@ -96,7 +95,7 @@ var _ = SIGDescribe("Watchers", func() { testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) expectEvent(watchA, watch.Modified, testConfigMapA) expectEvent(watchAB, watch.Modified, testConfigMapA) expectNoEvent(watchB, watch.Modified, testConfigMapA) @@ -105,28 +104,28 @@ var _ = SIGDescribe("Watchers", func() { testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) expectEvent(watchA, watch.Modified, testConfigMapA) expectEvent(watchAB, watch.Modified, testConfigMapA) expectNoEvent(watchB, watch.Modified, testConfigMapA) ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification") err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns) + framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns) expectEvent(watchA, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) expectNoEvent(watchB, watch.Deleted, nil) ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification") testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns) + framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns) expectEvent(watchB, watch.Added, testConfigMapB) expectEvent(watchAB, watch.Added, testConfigMapB) expectNoEvent(watchA, watch.Added, testConfigMapB) ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification") err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns) + framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns) expectEvent(watchB, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) expectNoEvent(watchA, watch.Deleted, nil) @@ -152,27 +151,27 @@ var _ = SIGDescribe("Watchers", func() { ginkgo.By("creating a new configmap") testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns) + framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("modifying the configmap once") testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("modifying the configmap a second time") testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns) ginkgo.By("deleting the configmap") err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns) + framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("creating a watch on configmaps from the resource version returned by the first update") testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion) + framework.ExpectNoError(err, "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion) ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update") expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate) @@ -201,17 +200,17 @@ var _ = SIGDescribe("Watchers", func() { ginkgo.By("creating a watch on configmaps") testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue) + framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue) ginkgo.By("creating a new configmap") testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns) + framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") _, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns) ginkgo.By("closing the watch once it receives two notifications") expectEvent(testWatchBroken, watch.Added, testConfigMap) @@ -225,7 +224,7 @@ var _ = SIGDescribe("Watchers", func() { testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns) ginkgo.By("creating a new watch on configmaps from the last resource version observed by the first watch") lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap) @@ -233,11 +232,11 @@ var _ = SIGDescribe("Watchers", func() { framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent) } testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion) + framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion) ginkgo.By("deleting the configmap") err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns) + framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed") expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate) @@ -266,23 +265,23 @@ var _ = SIGDescribe("Watchers", func() { ginkgo.By("creating a watch on configmaps with a certain label") testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue) + framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue) ginkgo.By("creating a new configmap") testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns) + framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns) ginkgo.By("changing the label value of the configmap") _, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value" }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value", configMapName, ns) ginkgo.By("Expecting to observe a delete notification for the watched object") expectEvent(testWatch, watch.Added, testConfigMap) @@ -293,7 +292,7 @@ var _ = SIGDescribe("Watchers", func() { testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns) ginkgo.By("Expecting not to observe a notification because the object no longer meets the selector's requirements") expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate) @@ -302,17 +301,17 @@ var _ = SIGDescribe("Watchers", func() { testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns) ginkgo.By("modifying the configmap a third time") testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "3") }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns) + framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns) ginkgo.By("deleting the configmap") err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns) + framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored") expectEvent(testWatch, watch.Added, testConfigMapLabelRestored) @@ -347,7 +346,7 @@ var _ = SIGDescribe("Watchers", func() { resourceVersion := "0" for i := 0; i < iterations; i++ { wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to watch configmaps in the namespace %s", ns) + framework.ExpectNoError(err, "Failed to watch configmaps in the namespace %s", ns) wcs = append(wcs, wc) resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion for _, wc := range wcs[1:] { @@ -473,18 +472,18 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa case createEvent: cm.Name = name(i) _, err := c.CoreV1().ConfigMaps(ns).Create(cm) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create configmap %s in namespace %s", cm.Name, ns) + framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns) existing = append(existing, i) i++ case updateEvent: idx := rand.Intn(len(existing)) cm.Name = name(existing[idx]) _, err := c.CoreV1().ConfigMaps(ns).Update(cm) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to update configmap %s in namespace %s", cm.Name, ns) + framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns) case deleteEvent: idx := rand.Intn(len(existing)) err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns) + framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns) existing = append(existing[:idx], existing[idx+1:]...) default: framework.Failf("Unsupported event operation: %d", op) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 299d0a3df33..a7cff4f6c34 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -718,7 +718,7 @@ func testWebhook(f *framework.Framework) { }, } _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name) ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook") toNonCompliantFn := func(cm *v1.ConfigMap) { @@ -755,7 +755,7 @@ func testWebhook(f *framework.Framework) { ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace") configmap = nonCompliantConfigMap(f) _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName) + framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName) } func testAttachingPodWebhook(f *framework.Framework) { @@ -763,9 +763,9 @@ func testAttachingPodWebhook(f *framework.Framework) { client := f.ClientSet pod := toBeAttachedPod(f) _, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name) err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name) + framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name) ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") timer := time.NewTimer(30 * time.Second) @@ -1345,7 +1345,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension }, } mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name) expectedCRData := map[string]interface{}{ "mutation-start": "yes", "mutation-stage-1": "yes", @@ -1374,17 +1374,17 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd. }, } _, err := customResourceClient.Create(cr, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name) + framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name) ginkgo.By("Patching Custom Resource Definition to set v2 as storage") apiVersionWithV2StoragePatch := fmt.Sprint(`{"spec": {"versions": [{"name": "v1", "storage": false, "served": true},{"name": "v2", "storage": true, "served": true}]}}`) _, err = testcrd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name) + framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name) ginkgo.By("Patching the custom resource while v2 is storage version") crDummyPatch := fmt.Sprint(`[{ "op": "add", "path": "/dummy", "value": "test" }]`) _, err = testcrd.DynamicClients["v2"].Patch(crName, types.JSONPatchType, []byte(crDummyPatch), metav1.PatchOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name) + framework.ExpectNoError(err, "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name) } func registerValidatingWebhookForCRD(f *framework.Framework, context *certContext) func() { diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 70da1216c90..6ac4e8f031b 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -69,13 +69,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.AfterEach(func() { // Clean up daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to dump DaemonSets") + framework.ExpectNoError(err, "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to be reaped") + framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") } } if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { @@ -128,7 +128,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") err = checkDaemonStatus(f, dsName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -138,7 +138,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { err = c.CoreV1().Pods(ns).Delete(pod.Name, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive") + framework.ExpectNoError(err, "error waiting for daemon pod to revive") }) /* @@ -157,24 +157,24 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Initially, daemon pods should not be running on any nodes.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes") + framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") ginkgo.By("Change node label to blue, check that daemon pod is launched.") nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node") + framework.ExpectNoError(err, "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name})) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") nodeSelector[daemonsetColorLabel] = "green" greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node") + framework.ExpectNoError(err, "error removing labels on node") gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes") @@ -182,11 +182,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error patching daemon set") + framework.ExpectNoError(err, "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name})) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -220,23 +220,23 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Initially, daemon pods should not be running on any nodes.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes") + framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") ginkgo.By("Change node label to blue, check that daemon pod is launched.") nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node") + framework.ExpectNoError(err, "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name})) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove the node label and wait for daemons to be unscheduled") _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node") + framework.ExpectNoError(err, "error removing labels on node") gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes") }) @@ -254,7 +254,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") err = checkDaemonStatus(f, dsName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -264,13 +264,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed _, err = c.CoreV1().Pods(ns).UpdateStatus(&pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error failing a daemon pod") + framework.ExpectNoError(err, "error failing a daemon pod") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive") + framework.ExpectNoError(err, "error waiting for daemon pod to revive") ginkgo.By("Wait for the failed daemon pod to be completely deleted.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted") + framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted") }) // This test should not be added to conformance. We will consider deprecating OnDelete when the @@ -286,7 +286,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) @@ -308,7 +308,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) @@ -335,7 +335,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) @@ -364,7 +364,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) @@ -393,7 +393,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { e2elog.Logf("Check that daemon pods launch on every node of the cluster") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + framework.ExpectNoError(err, "error waiting for daemon pod to start") e2elog.Logf("Update the DaemonSet to trigger a rollout") // We use a nonexistent image here, so that we make sure it won't finish @@ -741,7 +741,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st return false, nil } err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for controllerrevisions to be created") + framework.ExpectNoError(err, "error waiting for controllerrevisions to be created") } func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList { diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 5209a6930d1..6d5af81ece0 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -274,7 +274,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %s", err) + framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-rolling-update-deployment" @@ -350,14 +350,14 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) + framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-cleanup-deployment" e2elog.Logf("Creating deployment %s", deploymentName) pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err) + framework.ExpectNoError(err, "Failed to query for pods: %v", err) options := metav1.ListOptions{ ResourceVersion: pods.ListMeta.ResourceVersion, @@ -420,7 +420,7 @@ func testRolloverDeployment(f *framework.Framework) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) + framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Wait for replica set to become ready before adopting it. e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName) @@ -803,7 +803,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { ginkgo.By("Wait for the ReplicaSet to be orphaned") err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned") + framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned") deploymentName = "test-adopt-deployment" e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) @@ -852,7 +852,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Verify that the required pods have come up. e2elog.Logf("Waiting for all required pods to come up") err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) + framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) e2elog.Logf("Waiting for deployment %q to complete", deployment.Name) gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 7c0ca5fa2b6..55cdc1202fa 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -346,7 +346,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) func doTestOverWebSockets(bindAddress string, f *framework.Framework) { config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") + framework.ExpectNoError(err, "unable to get base config") ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index a3e6f0a1c60..4f992aed9bb 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -113,14 +113,14 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + framework.ExpectNoError(err, "failed to query for pod") gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } w, err := podClient.Watch(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") + framework.ExpectNoError(err, "failed to set up watch") ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) @@ -129,7 +129,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + framework.ExpectNoError(err, "failed to query for pod") gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) ginkgo.By("verifying pod creation was observed") @@ -149,7 +149,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("ensuring pod is modified") // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") + framework.ExpectNoError(err, "failed to GET scheduled pod") // check the annotation is there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { @@ -233,14 +233,14 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + framework.ExpectNoError(err, "failed to query for pod") gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } w, err := podClient.Watch(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") + framework.ExpectNoError(err, "failed to set up watch") ginkgo.By("submitting the pod to kubernetes") podClient.Create(originalPod) @@ -249,7 +249,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + framework.ExpectNoError(err, "failed to query for pod") gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) ginkgo.By("verifying pod creation was observed") @@ -269,7 +269,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("ensuring pod is modified") // save the running pod pod, err := podClient.Get(originalPod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") + framework.ExpectNoError(err, "failed to GET scheduled pod") // check the annotation is not there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok { diff --git a/test/e2e/windows/volumes.go b/test/e2e/windows/volumes.go index 94d3bb765d5..21120bd1928 100644 --- a/test/e2e/windows/volumes.go +++ b/test/e2e/windows/volumes.go @@ -125,7 +125,7 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol stdoutRW, stderrRW, errRW := f.ExecCommandInContainerWithFullOutput(podName, rwcontainerName, cmd...) msg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", cmd, stdoutRW, stderrRW) - gomega.Expect(errRW).NotTo(gomega.HaveOccurred(), msg) + framework.ExpectNoError(errRW, msg) _, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...) gomega.Expect(stderr).To(gomega.Equal("Access is denied.")) @@ -134,7 +134,7 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...) readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr) gomega.Expect(readout).To(gomega.Equal("windows-volume-test")) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), readmsg) + framework.ExpectNoError(err, readmsg) } func testPodWithROVolume(podName string, source v1.VolumeSource, path string) *v1.Pod { From f3ea5e5c9417c5d52b2b10b92700217e549d445e Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 10:15:45 +0800 Subject: [PATCH 084/194] replace test error checking with more readable way --- test/e2e/apimachinery/resource_quota.go | 10 +++++----- test/e2e/apps/cronjob.go | 8 ++++---- test/e2e/apps/deployment.go | 2 +- test/e2e/kubectl/kubectl.go | 6 +++--- test/e2e/lifecycle/addon_update.go | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index c05d32ffa3a..2b129db0351 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -216,7 +216,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceMemory] = resource.MustParse("100Mi") pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)") requests = v1.ResourceList{} @@ -228,7 +228,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Ensuring a pod cannot update its resource requirements") // a pod cannot dynamically update its resource requirements. @@ -238,7 +238,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi") podToUpdate.Spec.Containers[0].Resources.Requests = requests _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage") err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) @@ -561,7 +561,7 @@ var _ = SIGDescribe("ResourceQuota", func() { }, }, resourceClient, testcrd.Crd) // since we only give one quota, this creation should fail. - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Deleting a custom resource") err = deleteCustomResource(resourceClient, testcr.GetName()) @@ -1052,7 +1052,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2") pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Deleting first pod") err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 93e8cb620d0..ae9dbcc5d6a 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -91,7 +91,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring no jobs are scheduled") err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Ensuring no job exists by listing jobs explicitly") jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) @@ -128,7 +128,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring no more jobs are scheduled") err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) @@ -183,7 +183,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring no unexpected event has happened") err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) @@ -213,7 +213,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) ginkgo.By("Ensuring the job is not in the cronjob active list") diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 5209a6930d1..d084cf948fc 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -198,7 +198,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { e2elog.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index e1440e568d5..485484ec757 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -711,7 +711,7 @@ metadata: ginkgo.By("trying to use kubectl with invalid token") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") e2elog.Logf("got err %v", err) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(err).To(gomega.ContainSubstring("Authorization: Bearer invalid")) @@ -720,7 +720,7 @@ metadata: ginkgo.By("trying to use kubectl with invalid server") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") e2elog.Logf("got err %v", err) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server")) gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api")) @@ -1717,7 +1717,7 @@ metadata: ginkgo.By("verifying the job " + jobName + " was deleted") _, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) gomega.Expect(apierrs.IsNotFound(err)).To(gomega.BeTrue()) }) }) diff --git a/test/e2e/lifecycle/addon_update.go b/test/e2e/lifecycle/addon_update.go index 7152bc161d6..a4bf1a8ca70 100644 --- a/test/e2e/lifecycle/addon_update.go +++ b/test/e2e/lifecycle/addon_update.go @@ -334,7 +334,7 @@ var _ = SIGDescribe("Addon update", func() { ginkgo.By("verify invalid addons weren't created") _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. }) From a33b86e27f429ffb12d6bc33a1349854a32cafb9 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 9 May 2019 08:54:19 +0000 Subject: [PATCH 085/194] fix incorrect prometheus metrics --- .../azure/azure_client.go | 101 ++++++------------ .../azure/azure_metrics.go | 4 +- 2 files changed, 37 insertions(+), 68 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go index defe13c913a..cb262fb302b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go @@ -293,13 +293,11 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { @@ -376,19 +374,16 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, loadBalancerName, parameters, etag) if err != nil { - mc.Observe(err) - return nil, err + return nil, mc.Observe(err) } future, err := az.client.CreateOrUpdateSender(req) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // createOrUpdatePreparer prepares the CreateOrUpdate request. @@ -432,14 +427,12 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, loadBalancerName) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { @@ -528,14 +521,12 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) { @@ -552,14 +543,12 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, publicIPAddressName) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { @@ -648,13 +637,11 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) { @@ -672,13 +659,11 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, virtualNetworkName, subnetName) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { @@ -710,8 +695,8 @@ func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, v mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualNetworkName) + mc.Observe(err) if err != nil { - mc.Observe(err) return nil, err } @@ -767,19 +752,16 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) if err != nil { - mc.Observe(err) - return nil, err + return nil, mc.Observe(err) } future, err := az.client.CreateOrUpdateSender(req) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // createOrUpdatePreparer prepares the CreateOrUpdate request. @@ -824,13 +806,11 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, networkSecurityGroupName) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { @@ -1053,14 +1033,12 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // azRoutesClient implements RoutesClient. @@ -1109,13 +1087,11 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName future, err := az.client.CreateOrUpdateSender(req) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // createOrUpdatePreparer prepares the CreateOrUpdate request. @@ -1162,13 +1138,11 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, routeTableName, routeName) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // azRouteTablesClient implements RouteTablesClient. @@ -1211,19 +1185,16 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, parameters, etag) if err != nil { - mc.Observe(err) - return nil, err + return nil, mc.Observe(err) } future, err := az.client.CreateOrUpdateSender(req) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // createOrUpdatePreparer prepares the CreateOrUpdate request. @@ -1423,14 +1394,12 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) { @@ -1447,14 +1416,12 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, diskName) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go index c0806250b90..9c574d75841 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go @@ -50,12 +50,14 @@ func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *me } } -func (mc *metricContext) Observe(err error) { +func (mc *metricContext) Observe(err error) error { apiMetrics.latency.WithLabelValues(mc.attributes...).Observe( time.Since(mc.start).Seconds()) if err != nil { apiMetrics.errors.WithLabelValues(mc.attributes...).Inc() } + + return err } func registerAPIMetrics(attributes ...string) *apiCallMetrics { From 5b3967101206678a7b06fa5f8bf5299822cddc4b Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 10 May 2019 11:13:25 +0800 Subject: [PATCH 086/194] Fix some service tags not supported issues for Azure LoadBalancer service --- .../azure/azure_loadbalancer.go | 34 +++++------ .../azure/azure_loadbalancer_test.go | 57 +++++++++++++++++++ 2 files changed, 70 insertions(+), 21 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 6f15fb5fd05..ab333899633 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -74,6 +74,7 @@ const ( // ServiceAnnotationAllowedServiceTag is the annotation used on the service // to specify a list of allowed service tags separated by comma + // Refer https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags for all supported service tags. ServiceAnnotationAllowedServiceTag = "service.beta.kubernetes.io/azure-allowed-service-tags" // ServiceAnnotationLoadBalancerIdleTimeout is the annotation used on the service @@ -90,13 +91,6 @@ const ( clusterNameKey = "kubernetes-cluster-name" ) -var ( - // supportedServiceTags holds a list of supported service tags on Azure. - // Refer https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags for more information. - supportedServiceTags = sets.NewString("VirtualNetwork", "VIRTUAL_NETWORK", "AzureLoadBalancer", "AZURE_LOADBALANCER", - "Internet", "INTERNET", "AzureTrafficManager", "Storage", "Sql") -) - // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { @@ -1028,10 +1022,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, if err != nil { return nil, err } - serviceTags, err := getServiceTags(service) - if err != nil { - return nil, err - } + serviceTags := getServiceTags(service) var sourceAddressPrefixes []string if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 { if !requiresInternalLoadBalancer(service) { @@ -1609,24 +1600,25 @@ func useSharedSecurityRule(service *v1.Service) bool { return false } -func getServiceTags(service *v1.Service) ([]string, error) { +func getServiceTags(service *v1.Service) []string { + if service == nil { + return nil + } + if serviceTags, found := service.Annotations[ServiceAnnotationAllowedServiceTag]; found { + result := []string{} tags := strings.Split(strings.TrimSpace(serviceTags), ",") for _, tag := range tags { - // Storage and Sql service tags support setting regions with suffix ".Region" - if strings.HasPrefix(tag, "Storage.") || strings.HasPrefix(tag, "Sql.") { - continue - } - - if !supportedServiceTags.Has(tag) { - return nil, fmt.Errorf("only %q are allowed in service tags", supportedServiceTags.List()) + serviceTag := strings.TrimSpace(tag) + if serviceTag != "" { + result = append(result, serviceTag) } } - return tags, nil + return result } - return nil, nil + return nil } func serviceOwnsPublicIP(pip *network.PublicIPAddress, clusterName, serviceName string) bool { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go index 0efb7ee2974..86fb679f4cc 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go @@ -447,3 +447,60 @@ func TestServiceOwnsPublicIP(t *testing.T) { assert.Equal(t, owns, c.expected, "TestCase[%d]: %s", i, c.desc) } } + +func TestGetServiceTags(t *testing.T) { + tests := []struct { + desc string + service *v1.Service + expected []string + }{ + { + desc: "nil should be returned when service is nil", + service: nil, + expected: nil, + }, + { + desc: "nil should be returned when service has no annotations", + service: &v1.Service{}, + expected: nil, + }, + { + desc: "single tag should be returned when service has set one annotations", + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationAllowedServiceTag: "tag1", + }, + }, + }, + expected: []string{"tag1"}, + }, + { + desc: "multiple tags should be returned when service has set multi-annotations", + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationAllowedServiceTag: "tag1, tag2", + }, + }, + }, + expected: []string{"tag1", "tag2"}, + }, + { + desc: "correct tags should be returned when comma or spaces are included in the annotations", + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationAllowedServiceTag: ", tag1, ", + }, + }, + }, + expected: []string{"tag1"}, + }, + } + + for i, c := range tests { + tags := getServiceTags(c.service) + assert.Equal(t, tags, c.expected, "TestCase[%d]: %s", i, c.desc) + } +} From 7cbe2d6c5f4c3b00d482dc7de3bfd1831ff0e861 Mon Sep 17 00:00:00 2001 From: Stephen Chan Date: Tue, 16 Apr 2019 13:07:21 -0700 Subject: [PATCH 087/194] move signal handling for hyperkube apiserver and kubelet commands out of hyperkube main command --- cmd/genkubedocs/BUILD | 1 - cmd/genkubedocs/gen_kube_docs.go | 5 ++--- cmd/genman/BUILD | 1 - cmd/genman/gen_kube_man.go | 5 ++--- cmd/hyperkube/BUILD | 1 - cmd/hyperkube/main.go | 9 ++++----- cmd/kube-apiserver/BUILD | 1 - cmd/kube-apiserver/apiserver.go | 3 +-- cmd/kube-apiserver/app/server.go | 4 ++-- cmd/kubelet/BUILD | 1 - cmd/kubelet/app/BUILD | 1 + cmd/kubelet/app/server.go | 6 +++++- cmd/kubelet/kubelet.go | 3 +-- 13 files changed, 18 insertions(+), 23 deletions(-) diff --git a/cmd/genkubedocs/BUILD b/cmd/genkubedocs/BUILD index deddd64c7fb..e71660bd0da 100644 --- a/cmd/genkubedocs/BUILD +++ b/cmd/genkubedocs/BUILD @@ -31,7 +31,6 @@ go_library( "//cmd/kube-scheduler/app:go_default_library", "//cmd/kubeadm/app/cmd:go_default_library", "//cmd/kubelet/app:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/cobra/doc:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/cmd/genkubedocs/gen_kube_docs.go b/cmd/genkubedocs/gen_kube_docs.go index 12f2de4f2f0..b10a432877f 100644 --- a/cmd/genkubedocs/gen_kube_docs.go +++ b/cmd/genkubedocs/gen_kube_docs.go @@ -22,7 +22,6 @@ import ( "github.com/spf13/cobra/doc" "github.com/spf13/pflag" - "k8s.io/apiserver/pkg/server" ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" "k8s.io/kubernetes/cmd/genutils" apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" @@ -54,7 +53,7 @@ func main() { switch module { case "kube-apiserver": // generate docs for kube-apiserver - apiserver := apiservapp.NewAPIServerCommand(server.SetupSignalHandler()) + apiserver := apiservapp.NewAPIServerCommand() doc.GenMarkdownTree(apiserver, outDir) case "kube-controller-manager": // generate docs for kube-controller-manager @@ -74,7 +73,7 @@ func main() { doc.GenMarkdownTree(scheduler, outDir) case "kubelet": // generate docs for kubelet - kubelet := kubeletapp.NewKubeletCommand(server.SetupSignalHandler()) + kubelet := kubeletapp.NewKubeletCommand() doc.GenMarkdownTree(kubelet, outDir) case "kubeadm": // resets global flags created by kubelet or other commands e.g. diff --git a/cmd/genman/BUILD b/cmd/genman/BUILD index 09a7a7573d7..65355f37678 100644 --- a/cmd/genman/BUILD +++ b/cmd/genman/BUILD @@ -25,7 +25,6 @@ go_library( "//cmd/kubeadm/app/cmd:go_default_library", "//cmd/kubelet/app:go_default_library", "//pkg/kubectl/cmd:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/github.com/cpuguy83/go-md2man/md2man:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/cmd/genman/gen_kube_man.go b/cmd/genman/gen_kube_man.go index 62ceaab5223..8ccc994f31c 100644 --- a/cmd/genman/gen_kube_man.go +++ b/cmd/genman/gen_kube_man.go @@ -26,7 +26,6 @@ import ( mangen "github.com/cpuguy83/go-md2man/md2man" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/apiserver/pkg/server" ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" "k8s.io/kubernetes/cmd/genutils" apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" @@ -63,7 +62,7 @@ func main() { switch module { case "kube-apiserver": // generate manpage for kube-apiserver - apiserver := apiservapp.NewAPIServerCommand(server.SetupSignalHandler()) + apiserver := apiservapp.NewAPIServerCommand() genMarkdown(apiserver, "", outDir) for _, c := range apiserver.Commands() { genMarkdown(c, "kube-apiserver", outDir) @@ -98,7 +97,7 @@ func main() { } case "kubelet": // generate manpage for kubelet - kubelet := kubeletapp.NewKubeletCommand(server.SetupSignalHandler()) + kubelet := kubeletapp.NewKubeletCommand() genMarkdown(kubelet, "", outDir) for _, c := range kubelet.Commands() { genMarkdown(c, "kubelet", outDir) diff --git a/cmd/hyperkube/BUILD b/cmd/hyperkube/BUILD index 369cc94fc3f..41c086780a4 100644 --- a/cmd/hyperkube/BUILD +++ b/cmd/hyperkube/BUILD @@ -27,7 +27,6 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/kubectl/cmd:go_default_library", "//pkg/version/prometheus:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/logs:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/cmd/hyperkube/main.go b/cmd/hyperkube/main.go index 76dd396d3c8..7f7034423a9 100644 --- a/cmd/hyperkube/main.go +++ b/cmd/hyperkube/main.go @@ -32,7 +32,6 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/apiserver/pkg/server" cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/logs" cloudcontrollermanager "k8s.io/kubernetes/cmd/cloud-controller-manager/app" @@ -49,7 +48,7 @@ import ( func main() { rand.Seed(time.Now().UnixNano()) - hyperkubeCommand, allCommandFns := NewHyperKubeCommand(server.SetupSignalHandler()) + hyperkubeCommand, allCommandFns := NewHyperKubeCommand() // TODO: once we switch everything over to Cobra commands, we can go back to calling // cliflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the @@ -84,15 +83,15 @@ func commandFor(basename string, defaultCommand *cobra.Command, commands []func( } // NewHyperKubeCommand is the entry point for hyperkube -func NewHyperKubeCommand(stopCh <-chan struct{}) (*cobra.Command, []func() *cobra.Command) { +func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) { // these have to be functions since the command is polymorphic. Cobra wants you to be top level // command to get executed - apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand(stopCh) } + apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand() } controller := func() *cobra.Command { return kubecontrollermanager.NewControllerManagerCommand() } proxy := func() *cobra.Command { return kubeproxy.NewProxyCommand() } scheduler := func() *cobra.Command { return kubescheduler.NewSchedulerCommand() } kubectlCmd := func() *cobra.Command { return kubectl.NewDefaultKubectlCommand() } - kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand(stopCh) } + kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand() } cloudController := func() *cobra.Command { return cloudcontrollermanager.NewCloudControllerManagerCommand() } commandFns := []func() *cobra.Command{ diff --git a/cmd/kube-apiserver/BUILD b/cmd/kube-apiserver/BUILD index 32a8f629949..1e9207a7801 100644 --- a/cmd/kube-apiserver/BUILD +++ b/cmd/kube-apiserver/BUILD @@ -22,7 +22,6 @@ go_library( "//cmd/kube-apiserver/app:go_default_library", "//pkg/util/prometheusclientgo:go_default_library", "//pkg/version/prometheus:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/component-base/logs:go_default_library", ], ) diff --git a/cmd/kube-apiserver/apiserver.go b/cmd/kube-apiserver/apiserver.go index 613dc2bbb98..98002a16b0a 100644 --- a/cmd/kube-apiserver/apiserver.go +++ b/cmd/kube-apiserver/apiserver.go @@ -24,7 +24,6 @@ import ( "os" "time" - "k8s.io/apiserver/pkg/server" "k8s.io/component-base/logs" "k8s.io/kubernetes/cmd/kube-apiserver/app" _ "k8s.io/kubernetes/pkg/util/prometheusclientgo" // load all the prometheus client-go plugins @@ -34,7 +33,7 @@ import ( func main() { rand.Seed(time.Now().UnixNano()) - command := app.NewAPIServerCommand(server.SetupSignalHandler()) + command := app.NewAPIServerCommand() // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 43d3d8f1a54..0b6854b5096 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -88,7 +88,7 @@ const etcdRetryLimit = 60 const etcdRetryInterval = 1 * time.Second // NewAPIServerCommand creates a *cobra.Command object with default parameters -func NewAPIServerCommand(stopCh <-chan struct{}) *cobra.Command { +func NewAPIServerCommand() *cobra.Command { s := options.NewServerRunOptions() cmd := &cobra.Command{ Use: "kube-apiserver", @@ -111,7 +111,7 @@ cluster's shared state through which all other components interact.`, return utilerrors.NewAggregate(errs) } - return Run(completedOptions, stopCh) + return Run(completedOptions, genericapiserver.SetupSignalHandler()) }, } diff --git a/cmd/kubelet/BUILD b/cmd/kubelet/BUILD index b0afbcc7880..519101d2edd 100644 --- a/cmd/kubelet/BUILD +++ b/cmd/kubelet/BUILD @@ -21,7 +21,6 @@ go_library( "//cmd/kubelet/app:go_default_library", "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/component-base/logs:go_default_library", ], ) diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index 9a3dbf6bd17..a8b13f757ff 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -122,6 +122,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 18aed3fe3bb..d669d071d41 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -46,6 +46,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" @@ -108,7 +109,7 @@ const ( ) // NewKubeletCommand creates a *cobra.Command object with default parameters -func NewKubeletCommand(stopCh <-chan struct{}) *cobra.Command { +func NewKubeletCommand() *cobra.Command { cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError) cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) kubeletFlags := options.NewKubeletFlags() @@ -254,6 +255,9 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // add the kubelet config controller to kubeletDeps kubeletDeps.KubeletConfigController = kubeletConfigController + // set up stopCh here in order to be reused by kubelet and docker shim + stopCh := genericapiserver.SetupSignalHandler() + // start the experimental docker shim, if enabled if kubeletServer.KubeletFlags.ExperimentalDockershim { if err := RunDockershim(&kubeletServer.KubeletFlags, kubeletConfig, stopCh); err != nil { diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go index 4c27eaf2cda..2bd4d59d0b2 100644 --- a/cmd/kubelet/kubelet.go +++ b/cmd/kubelet/kubelet.go @@ -26,7 +26,6 @@ import ( "os" "time" - "k8s.io/apiserver/pkg/server" "k8s.io/component-base/logs" "k8s.io/kubernetes/cmd/kubelet/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration @@ -36,7 +35,7 @@ import ( func main() { rand.Seed(time.Now().UnixNano()) - command := app.NewKubeletCommand(server.SetupSignalHandler()) + command := app.NewKubeletCommand() logs.InitLogs() defer logs.FlushLogs() From 00c972ce87ad22e5fa5deaf50446a867dc689af0 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 10 May 2019 03:31:44 +0000 Subject: [PATCH 088/194] add source in azure metrics add source in azure metrics revert back zz_generated.conversion.go --- .../azure/azure_backoff.go | 16 +--- .../azure/azure_client.go | 90 +++++++++---------- .../azure/azure_controller_standard.go | 4 +- .../azure/azure_controller_vmss.go | 4 +- .../azure/azure_fakes.go | 4 +- .../azure/azure_metrics.go | 5 +- .../azure/azure_metrics_test.go | 4 +- .../azure/azure_test.go | 2 +- .../azure/azure_vmss.go | 8 +- 9 files changed, 63 insertions(+), 74 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index f7ff8922919..87377cd336e 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -550,25 +550,13 @@ func (az *Cloud) deleteRouteWithRetry(routeName string) error { }) } -// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error { - return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM) - klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName) - return az.processHTTPRetryResponse(nil, "", resp, err) - }) -} - // UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry -func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error { +func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() - resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) + resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID) return az.processHTTPRetryResponse(nil, "", resp, err) }) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go index defe13c913a..08ca2a3a053 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go @@ -48,7 +48,7 @@ func createRateLimitErr(isWrite bool, opName string) error { // VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient type VirtualMachinesClient interface { - CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) } @@ -103,7 +103,7 @@ type VirtualMachineScaleSetVMsClient interface { Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) - Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) + Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) } // RoutesClient defines needed functions for azure network.RoutesClient @@ -183,7 +183,7 @@ func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient } } -func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) { +func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) { // /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { err = createRateLimitErr(true, "VMCreateOrUpdate") @@ -195,7 +195,7 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) }() - mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, VMName, parameters) if err != nil { return future.Response(), err @@ -217,7 +217,7 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) }() - mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, VMName, expand) mc.Observe(err) return @@ -234,7 +234,7 @@ func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName s klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -290,7 +290,7 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) }() - mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters) if err != nil { mc.Observe(err) @@ -313,7 +313,7 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, klog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) }() - mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, networkInterfaceName, expand) mc.Observe(err) return @@ -330,7 +330,7 @@ func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx cont klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) }() - mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) mc.Observe(err) return @@ -373,7 +373,7 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) }() - mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, loadBalancerName, parameters, etag) if err != nil { mc.Observe(err) @@ -430,7 +430,7 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) }() - mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, loadBalancerName) mc.Observe(err) if err != nil { @@ -453,7 +453,7 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) }() - mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, loadBalancerName, expand) mc.Observe(err) return @@ -470,7 +470,7 @@ func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName str klog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -526,7 +526,7 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters) mc.Observe(err) if err != nil { @@ -550,7 +550,7 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, publicIPAddressName) mc.Observe(err) if err != nil { @@ -573,7 +573,7 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) }() - mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, publicIPAddressName, expand) mc.Observe(err) return @@ -589,7 +589,7 @@ func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName klog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -645,7 +645,7 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) if err != nil { mc.Observe(err) @@ -669,7 +669,7 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, virtualNetworkName, subnetName) if err != nil { mc.Observe(err) @@ -692,7 +692,7 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, virtualNetworkName, subnetName, expand) mc.Observe(err) return @@ -708,7 +708,7 @@ func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, v klog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) }() - mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualNetworkName) if err != nil { mc.Observe(err) @@ -764,7 +764,7 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) if err != nil { mc.Observe(err) @@ -821,7 +821,7 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, networkSecurityGroupName) if err != nil { mc.Observe(err) @@ -844,7 +844,7 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, networkSecurityGroupName, expand) mc.Observe(err) return @@ -860,7 +860,7 @@ func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName st klog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -915,7 +915,7 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() - mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName) mc.Observe(err) return @@ -932,7 +932,7 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) }() - mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName) mc.Observe(err) if err != nil { @@ -987,7 +987,7 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName, instanceID) mc.Observe(err) return @@ -1004,7 +1004,7 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.GetInstanceView(ctx, resourceGroupName, VMScaleSetName, instanceID) mc.Observe(err) return @@ -1021,7 +1021,7 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() - mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID, "") iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) mc.Observe(err) if err != nil { @@ -1040,7 +1040,7 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG return result, nil } -func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) { +func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) { if !az.rateLimiterWriter.TryAccept() { err = createRateLimitErr(true, "VMSSUpdate") return @@ -1051,7 +1051,7 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("vmssvm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) future, err := az.client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) mc.Observe(err) if err != nil { @@ -1100,7 +1100,7 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag) if err != nil { mc.Observe(err) @@ -1159,7 +1159,7 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, routeTableName, routeName) if err != nil { mc.Observe(err) @@ -1208,7 +1208,7 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) }() - mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") req, err := az.createOrUpdatePreparer(ctx, resourceGroupName, routeTableName, parameters, etag) if err != nil { mc.Observe(err) @@ -1264,7 +1264,7 @@ func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) }() - mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, routeTableName, expand) mc.Observe(err) return @@ -1306,7 +1306,7 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Create(ctx, resourceGroupName, accountName, parameters) if err != nil { return future.Response(), err @@ -1328,7 +1328,7 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Delete(ctx, resourceGroupName, accountName) mc.Observe(err) return @@ -1345,7 +1345,7 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.ListKeys(ctx, resourceGroupName, accountName) mc.Observe(err) return @@ -1362,7 +1362,7 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) }() - mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.ListByResourceGroup(ctx, resourceGroupName) mc.Observe(err) return @@ -1379,7 +1379,7 @@ func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGro klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) }() - mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.GetProperties(ctx, resourceGroupName, accountName) mc.Observe(err) return @@ -1421,7 +1421,7 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) }() - mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter) mc.Observe(err) if err != nil { @@ -1445,7 +1445,7 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() - mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID, "") future, err := az.client.Delete(ctx, resourceGroupName, diskName) mc.Observe(err) if err != nil { @@ -1468,7 +1468,7 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk klog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) }() - mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID) + mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID, "") result, err = az.client.Get(ctx, resourceGroupName, diskName) mc.Observe(err) return @@ -1522,7 +1522,7 @@ func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location) }() - mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID) + mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID, "") result, err = az.client.List(ctx, location) mc.Observe(err) return diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index 8e10fc75080..5ffafc81ce1 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -84,7 +84,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) + _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") if err != nil { klog.Errorf("azureDisk - attach disk(%s, %s) failed, err: %v", diskName, diskURI, err) detail := err.Error() @@ -151,7 +151,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) + return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") } // GetDataDisks gets a list of data disks attached to the node. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index db914c2c9a0..2d551af3f69 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -89,7 +89,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod defer ss.vmssVMCache.Delete(key) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) - _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") if err != nil { detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { @@ -159,7 +159,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName defer ss.vmssVMCache.Delete(key) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) - return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") } // GetDataDisks gets a list of data disks attached to the node. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index e38f1d7683e..d3ad5e6de83 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -290,7 +290,7 @@ func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient { return fVMC } -func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -550,7 +550,7 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Con return result, nil } -func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go index c0806250b90..02730e0441f 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go @@ -33,6 +33,7 @@ var ( "request", // API function that is being invoked "resource_group", // Resource group of the resource being monitored "subscription_id", // Subscription ID of the resource being monitored + "source", // Oeration source(optional) } apiMetrics = registerAPIMetrics(metricLabels...) @@ -43,10 +44,10 @@ type metricContext struct { attributes []string } -func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext { +func newMetricContext(prefix, request, resourceGroup, subscriptionID, source string) *metricContext { return &metricContext{ start: time.Now(), - attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID}, + attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source}, } } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go index 978c6b50540..7033e6a7be4 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go @@ -23,12 +23,12 @@ import ( ) func TestAzureMetricLabelCardinality(t *testing.T) { - mc := newMetricContext("test", "create", "resource_group", "subscription_id") + mc := newMetricContext("test", "create", "resource_group", "subscription_id", "source") assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match") } func TestAzureMetricLabelPrefix(t *testing.T) { - mc := newMetricContext("prefix", "request", "resource_group", "subscription_id") + mc := newMetricContext("prefix", "request", "resource_group", "subscription_id", "source") found := false for _, attribute := range mc.attributes { if attribute == "prefix_request" { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go index fdefe59e827..ef7f50b3922 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go @@ -1108,7 +1108,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus vmCtx, vmCancel := getContextWithCancel() defer vmCancel() - _, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM) + _, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM, "") if err != nil { } // add to kubernetes diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index eccb831d634..f345a1fa458 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -712,10 +712,10 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam ctx, cancel := getContextWithCancel() defer cancel() klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID) - resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update") if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) + retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update") if retryErr != nil { err = retryErr klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) @@ -841,10 +841,10 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa ctx, cancel := getContextWithCancel() defer cancel() klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID) - resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update") if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) + retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update") if retryErr != nil { err = retryErr klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) From 997648a923cd66aaaaa1a51b3c3fd470b344b3fe Mon Sep 17 00:00:00 2001 From: danielqsj Date: Sun, 5 May 2019 11:16:14 +0800 Subject: [PATCH 089/194] Add Un-reserve extension point for the scheduling framework --- pkg/scheduler/framework/v1alpha1/framework.go | 12 ++ pkg/scheduler/framework/v1alpha1/interface.go | 14 ++ pkg/scheduler/scheduler.go | 8 + test/integration/scheduler/framework_test.go | 142 ++++++++++++++++-- 4 files changed, 165 insertions(+), 11 deletions(-) diff --git a/pkg/scheduler/framework/v1alpha1/framework.go b/pkg/scheduler/framework/v1alpha1/framework.go index fb860d4209a..d4be81c9c27 100644 --- a/pkg/scheduler/framework/v1alpha1/framework.go +++ b/pkg/scheduler/framework/v1alpha1/framework.go @@ -33,6 +33,7 @@ type framework struct { plugins map[string]Plugin // a map of initialized plugins. Plugin name:plugin instance. reservePlugins []ReservePlugin prebindPlugins []PrebindPlugin + unreservePlugins []UnreservePlugin } var _ = Framework(&framework{}) @@ -64,6 +65,9 @@ func NewFramework(r Registry, _ *runtime.Unknown) (Framework, error) { if pp, ok := p.(PrebindPlugin); ok { f.prebindPlugins = append(f.prebindPlugins, pp) } + if up, ok := p.(UnreservePlugin); ok { + f.unreservePlugins = append(f.unreservePlugins, up) + } } return f, nil } @@ -105,6 +109,14 @@ func (f *framework) RunReservePlugins( return nil } +// RunUnreservePlugins runs the set of configured unreserve plugins. +func (f *framework) RunUnreservePlugins( + pc *PluginContext, pod *v1.Pod, nodeName string) { + for _, pl := range f.unreservePlugins { + pl.Unreserve(pc, pod, nodeName) + } +} + // NodeInfoSnapshot returns the latest NodeInfo snapshot. The snapshot // is taken at the beginning of a scheduling cycle and remains unchanged until a // pod finishes "Reserve". There is no guarantee that the information remains diff --git a/pkg/scheduler/framework/v1alpha1/interface.go b/pkg/scheduler/framework/v1alpha1/interface.go index 49b4a235169..a3764adf616 100644 --- a/pkg/scheduler/framework/v1alpha1/interface.go +++ b/pkg/scheduler/framework/v1alpha1/interface.go @@ -113,6 +113,17 @@ type PrebindPlugin interface { Prebind(pc *PluginContext, p *v1.Pod, nodeName string) *Status } +// UnreservePlugin is an interface for Unreserve plugins. This is an informational +// extension point. If a pod was reserved and then rejected in a later phase, then +// un-reserve plugins will be notified. Un-reserve plugins should clean up state +// associated with the reserved Pod. +type UnreservePlugin interface { + Plugin + // Unreserve is called by the scheduling framework when a reserved pod was + // rejected in a later phase. + Unreserve(pc *PluginContext, p *v1.Pod, nodeName string) +} + // Framework manages the set of plugins in use by the scheduling framework. // Configured plugins are called at specified points in a scheduling context. type Framework interface { @@ -128,6 +139,9 @@ type Framework interface { // plugins returns an error, it does not continue running the remaining ones and // returns the error. In such case, pod will not be scheduled. RunReservePlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status + + // RunUnreservePlugins runs the set of configured unreserve plugins. + RunUnreservePlugins(pc *PluginContext, pod *v1.Pod, nodeName string) } // FrameworkHandle provides data and some tools that plugins can use. It is diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 6fb7151a32d..770866cb2ef 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -515,6 +515,8 @@ func (sched *Scheduler) scheduleOne() { if err != nil { klog.Errorf("error assuming pod: %v", err) metrics.PodScheduleErrors.Inc() + // trigger un-reserve plugins to clean up state associated with the reserved Pod + fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) return } // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). @@ -525,6 +527,8 @@ func (sched *Scheduler) scheduleOne() { if err != nil { klog.Errorf("error binding volumes: %v", err) metrics.PodScheduleErrors.Inc() + // trigger un-reserve plugins to clean up state associated with the reserved Pod + fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) return } } @@ -543,6 +547,8 @@ func (sched *Scheduler) scheduleOne() { klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) } sched.recordSchedulingFailure(assumedPod, prebindStatus.AsError(), reason, prebindStatus.Message()) + // trigger un-reserve plugins to clean up state associated with the reserved Pod + fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) return } @@ -558,6 +564,8 @@ func (sched *Scheduler) scheduleOne() { if err != nil { klog.Errorf("error binding pod: %v", err) metrics.PodScheduleErrors.Inc() + // trigger un-reserve plugins to clean up state associated with the reserved Pod + fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) } else { klog.V(2).Infof("pod %v/%v is bound successfully on node %v, %d nodes evaluated, %d nodes were found feasible", assumedPod.Namespace, assumedPod.Name, scheduleResult.SuggestedHost, scheduleResult.EvaluatedNodes, scheduleResult.FeasibleNodes) metrics.PodScheduleSuccesses.Inc() diff --git a/test/integration/scheduler/framework_test.go b/test/integration/scheduler/framework_test.go index c900bd6a7e9..4b55e36a3b1 100644 --- a/test/integration/scheduler/framework_test.go +++ b/test/integration/scheduler/framework_test.go @@ -18,11 +18,11 @@ package scheduler import ( "fmt" - "k8s.io/apimachinery/pkg/runtime" "testing" "time" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) @@ -30,11 +30,12 @@ import ( // TesterPlugin is common ancestor for a test plugin that allows injection of // failures and some other test functionalities. type TesterPlugin struct { - numReserveCalled int - numPrebindCalled int - failReserve bool - failPrebind bool - rejectPrebind bool + numReserveCalled int + numPrebindCalled int + numUnreserveCalled int + failReserve bool + failPrebind bool + rejectPrebind bool } type ReservePlugin struct { @@ -45,19 +46,27 @@ type PrebindPlugin struct { TesterPlugin } +type UnreservePlugin struct { + TesterPlugin +} + const ( - reservePluginName = "reserve-plugin" - prebindPluginName = "prebind-plugin" + reservePluginName = "reserve-plugin" + prebindPluginName = "prebind-plugin" + unreservePluginName = "unreserve-plugin" ) var _ = framework.ReservePlugin(&ReservePlugin{}) var _ = framework.PrebindPlugin(&PrebindPlugin{}) +var _ = framework.UnreservePlugin(&UnreservePlugin{}) // Name returns name of the plugin. func (rp *ReservePlugin) Name() string { return reservePluginName } +var resPlugin = &ReservePlugin{} + // Reserve is a test function that returns an error or nil, depending on the // value of "failReserve". func (rp *ReservePlugin) Reserve(pc *framework.PluginContext, pod *v1.Pod, nodeName string) *framework.Status { @@ -68,14 +77,13 @@ func (rp *ReservePlugin) Reserve(pc *framework.PluginContext, pod *v1.Pod, nodeN return nil } -var resPlugin = &ReservePlugin{} -var pbdPlugin = &PrebindPlugin{} - // NewReservePlugin is the factory for reserve plugin. func NewReservePlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { return resPlugin, nil } +var pbdPlugin = &PrebindPlugin{} + // Name returns name of the plugin. func (pp *PrebindPlugin) Name() string { return prebindPluginName @@ -93,11 +101,39 @@ func (pp *PrebindPlugin) Prebind(pc *framework.PluginContext, pod *v1.Pod, nodeN return nil } +// reset used to reset numPrebindCalled. +func (pp *PrebindPlugin) reset() { + pp.numPrebindCalled = 0 +} + // NewPrebindPlugin is the factory for prebind plugin. func NewPrebindPlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { return pbdPlugin, nil } +var unresPlugin = &UnreservePlugin{} + +// Name returns name of the plugin. +func (up *UnreservePlugin) Name() string { + return unreservePluginName +} + +// Unreserve is a test function that returns an error or nil, depending on the +// value of "failUnreserve". +func (up *UnreservePlugin) Unreserve(pc *framework.PluginContext, pod *v1.Pod, nodeName string) { + up.numUnreserveCalled++ +} + +// reset used to reset numUnreserveCalled. +func (up *UnreservePlugin) reset() { + up.numUnreserveCalled = 0 +} + +// NewUnreservePlugin is the factory for unreserve plugin. +func NewUnreservePlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return unresPlugin, nil +} + // TestReservePlugin tests invocation of reserve plugins. func TestReservePlugin(t *testing.T) { // Create a plugin registry for testing. Register only a reserve plugin. @@ -216,3 +252,87 @@ func TestPrebindPlugin(t *testing.T) { cleanupPods(cs, t, []*v1.Pod{pod}) } } + +// TestUnreservePlugin tests invocation of un-reserve plugin +func TestUnreservePlugin(t *testing.T) { + // TODO: register more plugin which would trigger un-reserve plugin + registry := framework.Registry{ + unreservePluginName: NewUnreservePlugin, + prebindPluginName: NewPrebindPlugin, + } + + // Create the master and the scheduler with the test plugin set. + context := initTestSchedulerWithOptions(t, + initTestMaster(t, "unreserve-plugin", nil), + false, nil, registry, false, time.Second) + defer cleanupTest(t, context) + + cs := context.clientSet + // Add a few nodes. + _, err := createNodes(cs, "test-node", nil, 2) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) + } + + tests := []struct { + prebindFail bool + prebindReject bool + }{ + { + prebindFail: false, + prebindReject: false, + }, + { + prebindFail: true, + prebindReject: false, + }, + { + prebindFail: false, + prebindReject: true, + }, + { + prebindFail: true, + prebindReject: true, + }, + } + + for i, test := range tests { + pbdPlugin.failPrebind = test.prebindFail + pbdPlugin.rejectPrebind = test.prebindReject + + // Create a best effort pod. + pod, err := createPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating a test pod: %v", err) + } + + if test.prebindFail { + if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(cs, pod.Namespace, pod.Name)); err != nil { + t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) + } + if unresPlugin.numUnreserveCalled == 0 || unresPlugin.numUnreserveCalled != pbdPlugin.numPrebindCalled { + t.Errorf("test #%v: Expected the unreserve plugin to be called %d times, was called %d times.", i, pbdPlugin.numPrebindCalled, unresPlugin.numUnreserveCalled) + } + } else { + if test.prebindReject { + if err = waitForPodUnschedulable(cs, pod); err != nil { + t.Errorf("test #%v: Didn't expected the pod to be scheduled. error: %v", i, err) + } + if unresPlugin.numUnreserveCalled == 0 || unresPlugin.numUnreserveCalled != pbdPlugin.numPrebindCalled { + t.Errorf("test #%v: Expected the unreserve plugin to be called %d times, was called %d times.", i, pbdPlugin.numPrebindCalled, unresPlugin.numUnreserveCalled) + } + } else { + if err = waitForPodToSchedule(cs, pod); err != nil { + t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) + } + if unresPlugin.numUnreserveCalled > 0 { + t.Errorf("test #%v: Didn't expected the unreserve plugin to be called, was called %d times.", i, unresPlugin.numUnreserveCalled) + } + } + } + unresPlugin.reset() + pbdPlugin.reset() + cleanupPods(cs, t, []*v1.Pod{pod}) + } +} From 8a6fede9e6f53503b862f965d0337c39d94ca563 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 13:56:16 +0800 Subject: [PATCH 090/194] remove dot imports in e2e/storage/vsphere --- .../vsphere/persistent_volumes-vsphere.go | 38 +++--- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 64 +++++----- .../e2e/storage/vsphere/pvc_label_selector.go | 30 ++--- test/e2e/storage/vsphere/vsphere_common.go | 4 +- test/e2e/storage/vsphere/vsphere_scale.go | 30 ++--- .../storage/vsphere/vsphere_statefulsets.go | 36 +++--- test/e2e/storage/vsphere/vsphere_stress.go | 48 +++---- test/e2e/storage/vsphere/vsphere_utils.go | 24 ++-- .../vsphere/vsphere_volume_cluster_ds.go | 28 ++-- .../vsphere/vsphere_volume_datastore.go | 20 +-- .../vsphere/vsphere_volume_diskformat.go | 42 +++--- .../vsphere/vsphere_volume_disksize.go | 24 ++-- .../storage/vsphere/vsphere_volume_fstype.go | 50 ++++---- .../vsphere/vsphere_volume_master_restart.go | 30 ++--- .../vsphere/vsphere_volume_node_delete.go | 38 +++--- .../vsphere/vsphere_volume_node_poweroff.go | 36 +++--- .../vsphere/vsphere_volume_ops_storm.go | 32 ++--- .../storage/vsphere/vsphere_volume_perf.go | 26 ++-- .../vsphere/vsphere_volume_placement.go | 76 +++++------ .../vsphere/vsphere_volume_vpxd_restart.go | 32 ++--- .../vsphere/vsphere_volume_vsan_policy.go | 106 ++++++++-------- .../storage/vsphere/vsphere_zone_support.go | 120 +++++++++--------- 22 files changed, 467 insertions(+), 467 deletions(-) diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index d33b1460a18..051de8e0167 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -19,8 +19,8 @@ package vsphere import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 4. Create a POD using the PVC. 5. Verify Disk and Attached to the node. */ - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) c = f.ClientSet @@ -95,23 +95,23 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { StorageClassName: &emptyStorageClass, } } - By("Creating the PV and PVC") + ginkgo.By("Creating the PV and PVC") pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("Creating the Client Pod") + ginkgo.By("Creating the Client Pod") clientPod, err = framework.CreateClientPod(c, ns, pvc) framework.ExpectNoError(err) node = clientPod.Spec.NodeName - By("Verify disk should be attached to the node") + ginkgo.By("Verify disk should be attached to the node") isAttached, err := diskIsAttached(volumePath, node) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), "disk is not attached with the node") + gomega.Expect(isAttached).To(gomega.BeTrue(), "disk is not attached with the node") }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources") if c != nil { framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) @@ -147,12 +147,12 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 2. Delete POD, POD deletion should succeed. */ - It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { - By("Deleting the Claim") + ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { + ginkgo.By("Deleting the Claim") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil - By("Deleting the Pod") + ginkgo.By("Deleting the Pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) }) @@ -163,12 +163,12 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 1. Delete PV. 2. Delete POD, POD deletion should succeed. */ - It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() { - By("Deleting the Persistent Volume") + ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() { + ginkgo.By("Deleting the Persistent Volume") framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) pv = nil - By("Deleting the pod") + ginkgo.By("Deleting the pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) }) /* @@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 2. Restart kubelet 3. Verify that written file is accessible after kubelet restart */ - It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { + ginkgo.It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod) }) @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 4. Start kubelet. 5. Verify that volume mount not to be found. */ - It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { + ginkgo.It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod) }) @@ -205,15 +205,15 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods) 3. Verify volume should be detached from the node. */ - It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { - By("Deleting the Namespace") + ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { + ginkgo.By("Deleting the Namespace") err := c.CoreV1().Namespaces().Delete(ns, nil) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) framework.ExpectNoError(err) - By("Verifying Persistent Disk detaches") + ginkgo.By("Verifying Persistent Disk detaches") waitForVSphereDiskToDetach(volumePath, node) }) }) diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index ddedcdc83cb..580c19d7872 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -20,8 +20,8 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,14 +42,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) utils.SIGDescribe("persistentvolumereclaim:vsphere", func() { - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) nodeInfo = GetReadySchedulableRandomNodeInfo() @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { volumePath = "" }) - AfterEach(func() { + ginkgo.AfterEach(func() { testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc) }) @@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { 5. Delete PVC 6. Verify PV is deleted automatically. */ - It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { + ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { var err error volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) framework.ExpectNoError(err) @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { deletePVCAfterBind(c, ns, pvc, pv) pvc = nil - By("verify pv is deleted") + ginkgo.By("verify pv is deleted") err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { 8. Delete the pod. 9. Verify PV should be detached from the node and automatically deleted. */ - It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { + ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { var err error volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) @@ -110,35 +110,35 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { // Wait for PV and PVC to Bind framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("Creating the Pod") + ginkgo.By("Creating the Pod") pod, err := framework.CreateClientPod(c, ns, pvc) framework.ExpectNoError(err) - By("Deleting the Claim") + ginkgo.By("Deleting the Claim") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil // Verify PV is Present, after PVC is deleted and PV status should be Failed. pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(gomega.HaveOccurred()) - By("Verify the volume is attached to the node") + ginkgo.By("Verify the volume is attached to the node") isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) - Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) - Expect(isVolumeAttached).To(BeTrue()) + gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred()) + gomega.Expect(isVolumeAttached).To(gomega.BeTrue()) - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}) e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim") - By("Deleting the Pod") + ginkgo.By("Deleting the Pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) - By("Verify PV is detached from the node after Pod is deleted") - Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred()) + ginkgo.By("Verify PV is detached from the node after Pod is deleted") + gomega.Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(gomega.HaveOccurred()) - By("Verify PV should be deleted automatically") + ginkgo.By("Verify PV should be deleted automatically") framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) pv = nil volumePath = "" @@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { 11. Created POD using PVC created in Step 10 and verify volume content is matching. */ - It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { + ginkgo.It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { var err error var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) @@ -171,27 +171,27 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { writeContentToVSpherePV(c, pvc, volumeFileContent) - By("Delete PVC") + ginkgo.By("Delete PVC") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil - By("Verify PV is retained") + ginkgo.By("Verify PV is retained") e2elog.Logf("Waiting for PV %v to become Released", pv.Name) err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) - By("Creating the PV for same volume path") + ginkgo.By("Creating the PV for same volume path") pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) pv, err = c.CoreV1().PersistentVolumes().Create(pv) framework.ExpectNoError(err) - By("creating the pvc") + ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) framework.ExpectNoError(err) - By("wait for the pv and pvc to bind") + ginkgo.By("wait for the pv and pvc to bind") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) verifyContentOfVSpherePV(c, pvc, volumeFileContent) @@ -201,19 +201,19 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { // Test Setup for persistentvolumereclaim tests for vSphere Provider func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { - By("running testSetupVSpherePersistentVolumeReclaim") - By("creating vmdk") + ginkgo.By("running testSetupVSpherePersistentVolumeReclaim") + ginkgo.By("creating vmdk") volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) if err != nil { return } - By("creating the pv") + ginkgo.By("creating the pv") pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) pv, err = c.CoreV1().PersistentVolumes().Create(pv) if err != nil { return } - By("creating the pvc") + ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) return @@ -221,7 +221,7 @@ func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *No // Test Cleanup for persistentvolumereclaim tests for vSphere Provider func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { - By("running testCleanupVSpherePersistentVolumeReclaim") + ginkgo.By("running testCleanupVSpherePersistentVolumeReclaim") if len(volumePath) > 0 { err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) framework.ExpectNoError(err) @@ -238,10 +238,10 @@ func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo * func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { var err error - By("wait for the pv and pvc to bind") + ginkgo.By("wait for the pv and pvc to bind") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("delete pvc") + ginkgo.By("delete pvc") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) if !apierrs.IsNotFound(err) { diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 9226cac96d1..4b4272cf16c 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -19,7 +19,7 @@ package vsphere import ( "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -57,7 +57,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { err error nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") c = f.ClientSet ns = f.Namespace.Name @@ -72,67 +72,67 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { }) utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() { - AfterEach(func() { - By("Running clean up actions") + ginkgo.AfterEach(func() { + ginkgo.By("Running clean up actions") if framework.ProviderIs("vsphere") { testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol) } }) - It("should bind volume with claim for given label", func() { + ginkgo.It("should bind volume with claim for given label", func() { volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels) framework.ExpectNoError(err) - By("wait for the pvc_ssd to bind with pv_ssd") + ginkgo.By("wait for the pvc_ssd to bind with pv_ssd") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)) - By("Verify status of pvc_vvol is pending") + ginkgo.By("Verify status of pvc_vvol is pending") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) - By("delete pvc_ssd") + ginkgo.By("delete pvc_ssd") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name) - By("verify pv_ssd is deleted") + ginkgo.By("verify pv_ssd is deleted") err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) volumePath = "" - By("delete pvc_vvol") + ginkgo.By("delete pvc_vvol") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name) }) }) }) func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) { - By("creating vmdk") + ginkgo.By("creating vmdk") volumePath = "" volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) if err != nil { return } - By("creating the pv with label volume-type:ssd") + ginkgo.By("creating the pv with label volume-type:ssd") pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd) if err != nil { return } - By("creating pvc with label selector to match with volume-type:vvol") + ginkgo.By("creating pvc with label selector to match with volume-type:vvol") pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol) if err != nil { return } - By("creating pvc with label selector to match with volume-type:ssd") + ginkgo.By("creating pvc with label selector to match with volume-type:ssd") pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd) return } func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) { - By("running testCleanupVSpherePVClabelselector") + ginkgo.By("running testCleanupVSpherePVClabelselector") if len(volumePath) > 0 { nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) } diff --git a/test/e2e/storage/vsphere/vsphere_common.go b/test/e2e/storage/vsphere/vsphere_common.go index b85598f9ab5..a8950928d74 100644 --- a/test/e2e/storage/vsphere/vsphere_common.go +++ b/test/e2e/storage/vsphere/vsphere_common.go @@ -20,7 +20,7 @@ import ( "os" "strconv" - . "github.com/onsi/gomega" + "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/framework" ) @@ -67,7 +67,7 @@ const ( func GetAndExpectStringEnvVar(varName string) string { varValue := os.Getenv(varName) - Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set") + gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set") return varValue } diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 4959e66a8dc..3dd7e4538ba 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -20,8 +20,8 @@ import ( "fmt" "strconv" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -79,8 +79,8 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod) numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances) - Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5") - Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count") + gomega.Expect(numberOfInstances > 5).NotTo(gomega.BeTrue(), "Maximum allowed instances are 5") + gomega.Expect(numberOfInstances > volumeCount).NotTo(gomega.BeTrue(), "Number of instances should be less than the total volume count") policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) @@ -108,14 +108,14 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { } }) - It("vsphere scale tests", func() { + ginkgo.It("vsphere scale tests", func() { var pvcClaimList []string nodeVolumeMap := make(map[string][]string) // Volumes will be provisioned with each different types of Storage Class scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class - By(fmt.Sprintf("Creating Storage Class : %q", scname)) + ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname)) var sc *storageV1.StorageClass scParams := make(map[string]string) var err error @@ -130,7 +130,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { scParams[Datastore] = datastoreName } sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil)) - Expect(sc).NotTo(BeNil(), "Storage class is empty") + gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") framework.ExpectNoError(err, "Failed to create storage class") defer client.StorageV1().StorageClasses().Delete(scname, nil) scArrays[index] = sc @@ -154,11 +154,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) for _, pod := range podList.Items { pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) - By("Deleting pod") + ginkgo.By("Deleting pod") err = framework.DeletePodWithWait(f, client, &pod) framework.ExpectNoError(err) } - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") err = waitForVSphereDisksToDetach(nodeVolumeMap) framework.ExpectNoError(err) @@ -182,7 +182,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string { // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() nodeVolumeMap := make(map[string][]string) nodeSelectorIndex := 0 for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod { @@ -191,17 +191,17 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s } pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod) for i := 0; i < volumesPerPod; i++ { - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) framework.ExpectNoError(err) pvclaims[i] = pvclaim } - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") @@ -210,7 +210,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s for _, pv := range persistentvolumes { nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) } - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) nodeSelectorIndex++ } diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 0804b2245da..ae47c1be067 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -19,8 +19,8 @@ package vsphere import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -57,19 +57,19 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { namespace string client clientset.Interface ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") namespace = f.Namespace.Name client = f.ClientSet Bootstrap(f) }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("Deleting all statefulset in namespace: %v", namespace) framework.DeleteAllStatefulSets(client, namespace) }) - It("vsphere statefulset testing", func() { - By("Creating StorageClass for Statefulset") + ginkgo.It("vsphere statefulset testing", func() { + ginkgo.By("Creating StorageClass for Statefulset") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil) @@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) - By("Creating statefulset") + ginkgo.By("Creating statefulset") statefulsetTester := framework.NewStatefulSetTester(client) statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace) replicas := *(statefulset.Spec.Replicas) @@ -85,8 +85,8 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) framework.ExpectNoError(statefulsetTester.CheckMount(statefulset, mountPath)) ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset) - Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") // Get the list of Volumes attached to Pods before scale down volumesBeforeScaleDown := make(map[string]string) @@ -101,17 +101,17 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { } } - By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) + ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) _, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1) framework.ExpectNoError(scaledownErr) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1) // After scale down, verify vsphere volumes are detached from deleted pods - By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") + ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") for _, sspod := range ssPodsBeforeScaleDown.Items { _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) if err != nil { - Expect(apierrs.IsNotFound(err), BeTrue()) + gomega.Expect(apierrs.IsNotFound(err), gomega.BeTrue()) for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) @@ -122,18 +122,18 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { } } - By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) + ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) _, scaleupErr := statefulsetTester.Scale(statefulset, replicas) framework.ExpectNoError(scaleupErr) statefulsetTester.WaitForStatusReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset) - Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") // After scale up, verify all vsphere volumes are attached to node VMs. - By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") + ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") for _, sspod := range ssPodsAfterScaleUp.Items { err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) framework.ExpectNoError(err) @@ -144,9 +144,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) e2elog.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) // Verify scale up has re-attached the same volumes and not introduced new volume - Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse()) + gomega.Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(gomega.BeFalse()) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName) - Expect(isVolumeAttached).To(BeTrue()) + gomega.Expect(isVolumeAttached).To(gomega.BeTrue()) framework.ExpectNoError(verifyDiskAttachedError) } } diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 87407acb69b..d8e8ee4b231 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -20,8 +20,8 @@ import ( "fmt" "sync" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,34 +53,34 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") client = f.ClientSet namespace = f.Namespace.Name nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, // Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc. instances = GetAndExpectIntEnvVar(VCPStressInstances) - Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) - Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") + gomega.Expect(instances <= volumesPerNode*len(nodeList.Items)).To(gomega.BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) + gomega.Expect(instances > len(scNames)).To(gomega.BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") iterations = GetAndExpectIntEnvVar(VCPStressIterations) framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS") - Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") + gomega.Expect(iterations > 0).To(gomega.BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) - It("vsphere stress tests", func() { + ginkgo.It("vsphere stress tests", func() { scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class - By(fmt.Sprintf("Creating Storage Class : %v", scname)) + ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname)) var sc *storageV1.StorageClass var err error switch scname { @@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } - Expect(sc).NotTo(BeNil()) + gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(scname, nil) scArrays[index] = sc @@ -123,50 +123,50 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun // goroutine to perform volume lifecycle operations in parallel func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) { defer wg.Done() - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() for iterationCount := 0; iterationCount < iterations; iterationCount++ { logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1) - By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) + ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) + ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) + ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) - Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred()) + ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) + gomega.Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(gomega.HaveOccurred()) // Get the copy of the Pod to know the assigned node name. pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) + ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) - Expect(isVolumeAttached).To(BeTrue()) - Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) + gomega.Expect(isVolumeAttached).To(gomega.BeTrue()) + gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) + ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) + ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) + ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) - Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred()) + ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) + gomega.Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(gomega.HaveOccurred()) } } diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 6a3360c8c47..c183bf3a481 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -24,8 +24,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" @@ -404,7 +404,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste // Verify disks are attached to the node isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) + gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) // Verify Volumes are accessible filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) @@ -441,7 +441,7 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n } } } - Expect(commonDatastores).To(ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") + gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") } } @@ -631,7 +631,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) { var nodeVM mo.VirtualMachine err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM) framework.ExpectNoError(err) - Expect(nodeVM.Config).NotTo(BeNil()) + gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil()) vmxPath = nodeVM.Config.Files.VmPathName e2elog.Logf("vmx file path is %s", vmxPath) @@ -643,7 +643,7 @@ func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { numNodes := 0 for i := 0; i < 36; i++ { nodeList := framework.GetReadySchedulableNodesOrDie(client) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") numNodes = len(nodeList.Items) if numNodes == expectedNodes { @@ -777,7 +777,7 @@ func getUUIDFromProviderID(providerID string) string { // GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state func GetReadySchedulableNodeInfos() []*NodeInfo { nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") var nodesInfo []*NodeInfo for _, node := range nodeList.Items { nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name) @@ -793,7 +793,7 @@ func GetReadySchedulableNodeInfos() []*NodeInfo { // and it's associated NodeInfo object is returned. func GetReadySchedulableRandomNodeInfo() *NodeInfo { nodesInfo := GetReadySchedulableNodeInfos() - Expect(nodesInfo).NotTo(BeEmpty()) + gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty()) return nodesInfo[rand.Int()%len(nodesInfo)] } @@ -815,7 +815,7 @@ func invokeVCenterServiceControl(command, service, host string) error { func expectVolumeToBeAttached(nodeName, volumePath string) { isAttached, err := diskIsAttached(volumePath, nodeName) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) + gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) } // expectVolumesToBeAttached checks if the given Volumes are attached to the @@ -824,7 +824,7 @@ func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) { for i, pod := range pods { nodeName := pod.Spec.NodeName volumePath := volumePaths[i] - By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) } } @@ -835,7 +835,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str for i, pod := range pods { podName := pod.Name filePath := filePaths[i] - By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName)) + ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName)) verifyFilesExistOnVSphereVolume(namespace, podName, filePath) } } @@ -861,7 +861,7 @@ func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []str for i, pod := range pods { podName := pod.Name filePath := filePaths[i] - By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName)) + ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName)) expectFileContentToMatch(namespace, podName, filePath, contents[i]) } } diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index a655c9e8598..f6cdfa55885 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -17,8 +17,8 @@ limitations under the License. package vsphere import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -66,10 +66,10 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 6. Delete the volume */ - It("verify static provisioning on clustered datastore", func() { + ginkgo.It("verify static provisioning on clustered datastore", func() { var volumePath string - By("creating a test vsphere volume") + ginkgo.By("creating a test vsphere volume") volumeOptions := new(VolumeOptions) volumeOptions.CapacityKB = 2097152 volumeOptions.Name = "e2e-vmdk-" + namespace @@ -79,31 +79,31 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v framework.ExpectNoError(err) defer func() { - By("Deleting the vsphere volume") + ginkgo.By("Deleting the vsphere volume") nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) }() podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) - By("Creating pod") + ginkgo.By("Creating pod") pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) - By("Waiting for pod to be ready") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be ready") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) // get fresh pod info pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName - By("Verifying volume is attached") + ginkgo.By("Verifying volume is attached") expectVolumeToBeAttached(nodeName, volumePath) - By("Deleting pod") + ginkgo.By("Deleting pod") err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") err = waitForVSphereDiskToDetach(volumePath, nodeName) framework.ExpectNoError(err) }) @@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 1. Create storage class parameter and specify datastore to be a clustered datastore name 2. invokeValidPolicyTest - util to do e2e dynamic provision test */ - It("verify dynamic provision with default parameter on clustered datastore", func() { + ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func() { scParameters[Datastore] = clusterDatastore invokeValidPolicyTest(f, client, namespace, scParameters) }) @@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 1. Create storage class parameter and specify storage policy to be a tag based spbm policy 2. invokeValidPolicyTest - util to do e2e dynamic provision test */ - It("verify dynamic provision with spbm policy on clustered datastore", func() { + ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func() { policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster) scParameters[SpbmStoragePolicy] = policyDatastoreCluster invokeValidPolicyTest(f, client, namespace, scParameters) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 2e2774a1e9d..5e1d74f227b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -21,8 +21,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", namespace string scParameters map[string]string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -64,12 +64,12 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", } }) - It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { - By("Invoking Test for invalid datastore") + ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { + ginkgo.By("Invoking Test for invalid datastore") scParameters[Datastore] = InvalidDatastore scParameters[DiskFormat] = ThinDisk err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found` if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -78,19 +78,19 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", }) func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { - By("Creating Storage Class With Invalid Datastore") + ginkgo.By("Creating Storage Class With Invalid Datastore") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Expect claim to fail provisioning volume") + ginkgo.By("Expect claim to fail provisioning volume") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 3616f8fb7ec..e5ba9cc23d7 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -20,8 +20,8 @@ import ( "context" "path/filepath" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" @@ -65,7 +65,7 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { nodeKeyValueLabel map[string]string nodeLabelValue string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -86,16 +86,16 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { } }) - It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { - By("Invoking Test for diskformat: eagerzeroedthick") + ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.By("Invoking Test for diskformat: eagerzeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") }) - It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() { - By("Invoking Test for diskformat: zeroedthick") + ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.By("Invoking Test for diskformat: zeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") }) - It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() { - By("Invoking Test for diskformat: thin") + ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.By("Invoking Test for diskformat: thin") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin") }) }) @@ -106,14 +106,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st scParameters := make(map[string]string) scParameters["diskformat"] = diskFormat - By("Creating Storage Class With DiskFormat") + ginkgo.By("Creating Storage Class With DiskFormat") storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) framework.ExpectNoError(err) @@ -122,7 +122,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil) }() - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) @@ -138,32 +138,32 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info to check EagerlyScrub and ThinProvisioned property */ - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") pod, err := client.CoreV1().Pods(namespace).Create(podSpec) framework.ExpectNoError(err) - By("Waiting for pod to be running") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be running") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) - Expect(isAttached).To(BeTrue()) + gomega.Expect(isAttached).To(gomega.BeTrue()) framework.ExpectNoError(err) - By("Verify Disk Format") - Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed") + ginkgo.By("Verify Disk Format") + gomega.Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(gomega.BeTrue(), "DiskFormat Verification Failed") var volumePaths []string volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) - By("Delete pod and wait for volume to be detached from node") + ginkgo.By("Delete pod and wait for volume to be detached from node") deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths) } func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { - By("Verifing disk format") + ginkgo.By("Verifing disk format") eagerlyScrub := false thinProvisioned := false diskFound := false @@ -194,7 +194,7 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath } } - Expect(diskFound).To(BeTrue(), "Failed to find disk") + gomega.Expect(diskFound).To(gomega.BeTrue(), "Failed to find disk") isDiskFormatCorrect := false if diskFormat == "eagerzeroedthick" { if eagerlyScrub == true && thinProvisioned == false { diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index b46d322e11b..3289b0eebcb 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -19,8 +19,8 @@ package vsphere import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { scParameters map[string]string datastore string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -59,38 +59,38 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) - It("verify dynamically provisioned pv has size rounded up correctly", func() { - By("Invoking Test disk size") + ginkgo.It("verify dynamically provisioned pv has size rounded up correctly", func() { + ginkgo.By("Invoking Test disk size") scParameters[Datastore] = datastore scParameters[DiskFormat] = ThinDisk diskSize := "1" expectedDiskSize := "1Mi" - By("Creating Storage Class") + ginkgo.By("Creating Storage Class") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil)) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectNoError(err) - By("Getting new copy of PVC") + ginkgo.By("Getting new copy of PVC") pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - By("Getting PV created") + ginkgo.By("Getting PV created") pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) - By("Verifying if provisioned PV has the correct size") + ginkgo.By("Verifying if provisioned PV has the correct size") expectedCapacity := resource.MustParse(expectedDiskSize) pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] - Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value())) + gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value())) }) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 9746490558e..74982c083ab 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -20,8 +20,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -69,26 +69,26 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() { client clientset.Interface namespace string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) + gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) }) - It("verify fstype - ext3 formatted volume", func() { - By("Invoking Test for fstype: ext3") + ginkgo.It("verify fstype - ext3 formatted volume", func() { + ginkgo.By("Invoking Test for fstype: ext3") invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType) }) - It("verify fstype - default value should be ext4", func() { - By("Invoking Test for fstype: Default Value - ext4") + ginkgo.It("verify fstype - default value should be ext4", func() { + ginkgo.By("Invoking Test for fstype: Default Value - ext4") invokeTestForFstype(f, client, namespace, "", Ext4FSType) }) - It("verify invalid fstype", func() { - By("Invoking Test for fstype: invalid Value") + ginkgo.It("verify invalid fstype", func() { + ginkgo.By("Invoking Test for fstype: invalid Value") invokeTestForInvalidFstype(f, client, namespace, InvalidFSType) }) }) @@ -99,7 +99,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam scParameters["fstype"] = fstype // Create Persistent Volume - By("Creating Storage Class With Fstype") + ginkgo.By("Creating Storage Class With Fstype") pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) // Create Pod and verify the persistent volume is accessible @@ -110,7 +110,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam // Detach and delete volume detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) } func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { @@ -118,24 +118,24 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa scParameters["fstype"] = fstype // Create Persistent Volume - By("Creating Storage Class With Invalid Fstype") + ginkgo.By("Creating Storage Class With Invalid Fstype") pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) // Detach and delete volume detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) - Expect(eventList.Items).NotTo(BeEmpty()) + gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found` isFound := false for _, item := range eventList.Items { @@ -143,7 +143,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa isFound = true } } - Expect(isFound).To(BeTrue(), "Unable to verify MountVolume.MountDevice failure") + gomega.Expect(isFound).To(gomega.BeTrue(), "Unable to verify MountVolume.MountDevice failure") } func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { @@ -151,13 +151,13 @@ func createVolume(client clientset.Interface, namespace string, scParameters map framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) return pvclaim, persistentvolumes @@ -166,13 +166,13 @@ func createVolume(client clientset.Interface, namespace string, scParameters map func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) framework.ExpectNoError(err) // Asserts: Right disk is attached to the pod - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) return pod } @@ -180,11 +180,11 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st // detachVolume delete the volume passed in the argument and wait until volume is detached from the node, func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) nodeName := pod.Spec.NodeName - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodWithWait(f, client, pod) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") waitForVSphereDiskToDetach(volPath, nodeName) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index e437fdf6a99..a4f71d39334 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -19,8 +19,8 @@ package vsphere import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup nodeNameList []string nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -79,22 +79,22 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup } }) - It("verify volume remains attached after master kubelet restart", func() { + ginkgo.It("verify volume remains attached after master kubelet restart", func() { // Create pod on each node for i := 0; i < numNodes; i++ { - By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) + ginkgo.By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) + ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) defer framework.DeletePodWithWait(f, client, pod) - By("Waiting for pod to be ready") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be ready") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -102,16 +102,16 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup pods = append(pods, pod) nodeName := pod.Spec.NodeName - By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) } - By("Restarting kubelet on master node") + ginkgo.By("Restarting kubelet on master node") masterAddress := framework.GetMasterHost() + ":22" err := framework.RestartKubelet(masterAddress) framework.ExpectNoError(err, "Unable to restart kubelet on master node") - By("Verifying the kubelet on master node is up") + ginkgo.By("Verifying the kubelet on master node is up") err = framework.WaitForKubeletUp(masterAddress) framework.ExpectNoError(err) @@ -119,18 +119,18 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup volumePath := volumePaths[i] nodeName := pod.Spec.NodeName - By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) - By(fmt.Sprintf("Deleting pod on node %s", nodeName)) + ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) err = waitForVSphereDiskToDetach(volumePath, nodeName) framework.ExpectNoError(err) - By(fmt.Sprintf("Deleting volume %s", volumePath)) + ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index 2b53fd36607..8e5d6133575 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -20,8 +20,8 @@ import ( "context" "os" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/object" clientset "k8s.io/client-go/kubernetes" @@ -38,7 +38,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] err error ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -46,14 +46,14 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(err) workingDir = os.Getenv("VSPHERE_WORKING_DIR") - Expect(workingDir).NotTo(BeEmpty()) + gomega.Expect(workingDir).NotTo(gomega.BeEmpty()) }) - It("node unregister", func() { - By("Get total Ready nodes") + ginkgo.It("node unregister", func() { + ginkgo.By("Get total Ready nodes") nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") + gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test") totalNodesCount := len(nodeList.Items) nodeVM := nodeList.Items[0] @@ -75,44 +75,44 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] framework.ExpectNoError(err) // Unregister Node VM - By("Unregister a node VM") + ginkgo.By("Unregister a node VM") unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject) // Ready nodes should be 1 less - By("Verifying the ready node counts") - Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(BeTrue(), "Unable to verify expected ready node count") + ginkgo.By("Verifying the ready node counts") + gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(gomega.BeTrue(), "Unable to verify expected ready node count") nodeList = framework.GetReadySchedulableNodesOrDie(client) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") var nodeNameList []string for _, node := range nodeList.Items { nodeNameList = append(nodeNameList, node.ObjectMeta.Name) } - Expect(nodeNameList).NotTo(ContainElement(nodeVM.ObjectMeta.Name)) + gomega.Expect(nodeNameList).NotTo(gomega.ContainElement(nodeVM.ObjectMeta.Name)) // Register Node VM - By("Register back the node VM") + ginkgo.By("Register back the node VM") registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost) // Ready nodes should be equal to earlier count - By("Verifying the ready node counts") - Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(BeTrue(), "Unable to verify expected ready node count") + ginkgo.By("Verifying the ready node counts") + gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(gomega.BeTrue(), "Unable to verify expected ready node count") nodeList = framework.GetReadySchedulableNodesOrDie(client) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") nodeNameList = nodeNameList[:0] for _, node := range nodeList.Items { nodeNameList = append(nodeNameList, node.ObjectMeta.Name) } - Expect(nodeNameList).To(ContainElement(nodeVM.ObjectMeta.Name)) + gomega.Expect(nodeNameList).To(gomega.ContainElement(nodeVM.ObjectMeta.Name)) // Sanity test that pod provisioning works - By("Sanity check for volume lifecycle") + ginkgo.By("Sanity check for volume lifecycle") scParameters := make(map[string]string) storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY") - Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment") + gomega.Expect(storagePolicy).NotTo(gomega.BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment") scParameters[SpbmStoragePolicy] = storagePolicy invokeValidPolicyTest(f, client, namespace, scParameters) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index a4a095c5d28..ab042e322db 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/object" vimtypes "github.com/vmware/govmomi/vim25/types" @@ -49,15 +49,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", namespace string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") - Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test") }) /* @@ -75,43 +75,43 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", 11. Delete the PVC 12. Delete the StorageClass */ - It("verify volume status after node power off", func() { - By("Creating a Storage Class") + ginkgo.It("verify volume status after node power off", func() { + ginkgo.By("Creating a Storage Class") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err)) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Waiting for PVC to be in bound phase") + ginkgo.By("Waiting for PVC to be in bound phase") pvclaims := []*v1.PersistentVolumeClaim{pvclaim} pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) volumePath := pvs[0].Spec.VsphereVolume.VolumePath - By("Creating a Deployment") + ginkgo.By("Creating a Deployment") deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) - By("Get pod from the deployement") + ginkgo.By("Get pod from the deployement") podList, err := e2edeploy.GetPodsForDeployment(client, deployment) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployement with err: %v", err)) - Expect(podList.Items).NotTo(BeEmpty()) + gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] node1 := pod.Spec.NodeName - By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) + ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) isAttached, err := diskIsAttached(volumePath, node1) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), "Disk is not attached to the node") + gomega.Expect(isAttached).To(gomega.BeTrue(), "Disk is not attached to the node") - By(fmt.Sprintf("Power off the node: %v", node1)) + ginkgo.By(fmt.Sprintf("Power off the node: %v", node1)) nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) @@ -128,15 +128,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", node2, err := waitForPodToFailover(client, deployment, node1) framework.ExpectNoError(err, "Pod did not fail over to a different node") - By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) + ginkgo.By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) err = waitForVSphereDiskToAttach(volumePath, node2) framework.ExpectNoError(err, "Disk is not attached to the node") - By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) + ginkgo.By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) err = waitForVSphereDiskToDetach(volumePath, node1) framework.ExpectNoError(err, "Disk is not detached from the node") - By(fmt.Sprintf("Power on the previous node: %v", node1)) + ginkgo.By(fmt.Sprintf("Power on the previous node: %v", node1)) vm.PowerOn(ctx) err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn) framework.ExpectNoError(err, "Unable to power on the node") diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 4e995ddac47..af1d482b10b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -21,8 +21,8 @@ import ( "os" "strconv" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" @@ -58,12 +58,12 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { err error volume_ops_scale int ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) + gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) if os.Getenv("VOLUME_OPS_SCALE") != "" { volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE")) framework.ExpectNoError(err) @@ -72,25 +72,25 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { } pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale) }) - AfterEach(func() { - By("Deleting PVCs") + ginkgo.AfterEach(func() { + ginkgo.By("Deleting PVCs") for _, claim := range pvclaims { framework.DeletePersistentVolumeClaim(client, claim.Name, namespace) } - By("Deleting StorageClass") + ginkgo.By("Deleting StorageClass") err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) framework.ExpectNoError(err) }) - It("should create pod with many volumes and verify no attach call fails", func() { - By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale)) - By("Creating Storage Class") + ginkgo.It("should create pod with many volumes and verify no attach call fails", func() { + ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale)) + ginkgo.By("Creating Storage Class") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil)) framework.ExpectNoError(err) - By("Creating PVCs using the Storage Class") + ginkgo.By("Creating PVCs using the Storage Class") count := 0 for count < volume_ops_scale { pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -98,21 +98,21 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { count++ } - By("Waiting for all claims to be in bound phase") + ginkgo.By("Waiting for all claims to be in bound phase") persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PVs to the node") + ginkgo.By("Creating pod to attach PVs to the node") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By("Verify all volumes are accessible and available in the pod") + ginkgo.By("Verify all volumes are accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod)) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") for _, pv := range persistentvolumes { waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 0966bd11e2b..7873b13b7ab 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" @@ -61,7 +61,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { iterations int ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -76,18 +76,18 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) nodes := framework.GetReadySchedulableNodesOrDie(client) - Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) + gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items)) - Expect(volumeCount).To(BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg) + gomega.Expect(volumeCount).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg) msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode) - Expect(volumesPerPod).To(BeNumerically("<=", SCSIUnitsAvailablePerNode), msg) + gomega.Expect(volumesPerPod).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode), msg) nodeSelectorList = createNodeLabels(client, namespace, nodes) }) - It("vcp performance tests", func() { + ginkgo.It("vcp performance tests", func() { scList := getTestStorageClasses(client, policyName, datastoreName) defer func(scList []*storageV1.StorageClass) { for _, sc := range scList { @@ -124,7 +124,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class - By(fmt.Sprintf("Creating Storage Class : %v", scname)) + ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname)) var sc *storageV1.StorageClass var err error switch scname { @@ -147,7 +147,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } - Expect(sc).NotTo(BeNil()) + gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) scArrays[index] = sc } @@ -165,7 +165,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I latency = make(map[string]float64) numPods := volumeCount / volumesPerPod - By(fmt.Sprintf("Creating %d PVCs", volumeCount)) + ginkgo.By(fmt.Sprintf("Creating %d PVCs", volumeCount)) start := time.Now() for i := 0; i < numPods; i++ { var pvclaims []*v1.PersistentVolumeClaim @@ -185,7 +185,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I elapsed := time.Since(start) latency[CreateOp] = elapsed.Seconds() - By("Creating pod to attach PVs to the node") + ginkgo.By("Creating pod to attach PVs to the node") start = time.Now() for i, pvclaims := range totalpvclaims { nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] @@ -202,7 +202,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I verifyVSphereVolumesAccessible(client, pod, totalpvs[i]) } - By("Deleting pods") + ginkgo.By("Deleting pods") start = time.Now() for _, pod := range totalpods { err := framework.DeletePodWithWait(f, client, pod) @@ -220,7 +220,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I err := waitForVSphereDisksToDetach(nodeVolumeMap) framework.ExpectNoError(err) - By("Deleting the PVCs") + ginkgo.By("Deleting the PVCs") start = time.Now() for _, pvclaims := range totalpvclaims { for _, pvc := range pvclaims { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 2c1e152d653..4b61542259f 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -21,8 +21,8 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" @@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { nodeInfo *NodeInfo vsp *VSphere ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) c = f.ClientSet @@ -59,13 +59,13 @@ var _ = utils.SIGDescribe("Volume Placement", func() { nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) vsp = nodeInfo.VSphere } - By("creating vmdk") + ginkgo.By("creating vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) }) - AfterEach(func() { + ginkgo.AfterEach(func() { for _, volumePath := range volumePaths { vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef) } @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { */ - It("should create and delete pod with the same volume source on the same worker node", func() { + ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() { var volumeFiles []string pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) @@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) + ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 13. Delete pod. */ - It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { + ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { var volumeFiles []string pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -152,7 +152,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) + ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths) newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) @@ -177,13 +177,13 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 10. Wait for vmdk1 and vmdk2 to be detached from node. */ - It("should create and delete pod with multiple volumes from same datastore", func() { - By("creating another vmdk") + ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() { + ginkgo.By("creating another vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { } createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -219,8 +219,8 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 9. Delete POD. 10. Wait for vmdk1 and vmdk2 to be detached from node. */ - It("should create and delete pod with multiple volumes from different datastore", func() { - By("creating another vmdk on non default shared datastore") + ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() { + ginkgo.By("creating another vmdk on non default shared datastore") var volumeOptions *VolumeOptions volumeOptions = new(VolumeOptions) volumeOptions.CapacityKB = 2097152 @@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -243,7 +243,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -271,7 +271,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching. 11. Wait for vmdk1 and vmdk2 to be detached from node. */ - It("test back to back pod creation and deletion with different volume sources on the same worker node", func() { + ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() { var ( podA *v1.Pod podB *v1.Pod @@ -282,10 +282,10 @@ var _ = utils.SIGDescribe("Volume Placement", func() { ) defer func() { - By("clean up undeleted pods") + ginkgo.By("clean up undeleted pods") framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name) - By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) + ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) for _, volumePath := range volumePaths { framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name)) } @@ -293,17 +293,17 @@ var _ = utils.SIGDescribe("Volume Placement", func() { testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0]) // Create another VMDK Volume - By("creating another vmdk") + ginkgo.By("creating another vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) testvolumePathsPodB = append(testvolumePathsPodA, volumePath) for index := 0; index < 5; index++ { - By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) + ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA) - By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) + ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB) podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1) @@ -312,21 +312,21 @@ var _ = utils.SIGDescribe("Volume Placement", func() { podBFiles = append(podBFiles, podBFileName) // Create empty files on the mounted volumes on the pod to verify volume is writable - By("Creating empty file on volume mounted on pod-A") + ginkgo.By("Creating empty file on volume mounted on pod-A") framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) - By("Creating empty file volume mounted on pod-B") + ginkgo.By("Creating empty file volume mounted on pod-B") framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) // Verify newly and previously created files present on the volume mounted on the pod - By("Verify newly Created file and previously created files present on volume mounted on pod-A") + ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A") verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...) - By("Verify newly Created file and previously created files present on volume mounted on pod-B") + ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B") verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...) - By("Deleting pod-A") + ginkgo.By("Deleting pod-A") framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name) - By("Deleting pod-B") + ginkgo.By("Deleting pod-B") framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name) } }) @@ -354,38 +354,38 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { var pod *v1.Pod var err error - By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) pod, err = client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) - By("Waiting for pod to be ready") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be ready") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) - By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) + ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) for _, volumePath := range volumePaths { isAttached, err := diskIsAttached(volumePath, nodeName) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node") + gomega.Expect(isAttached).To(gomega.BeTrue(), "disk:"+volumePath+" is not attached with the node") } return pod } func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) { // Create empty files on the mounted volumes on the pod to verify volume is writable - By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname)) + ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname)) createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate) // Verify newly and previously created files present on the volume mounted on the pod - By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname)) + ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname)) verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...) } func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { - By("Deleting pod") + ginkgo.By("Deleting pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) - By("Waiting for volume to be detached from the node") + ginkgo.By("Waiting for volume to be detached from the node") for _, volumePath := range volumePaths { framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName)) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 6cc4412dda9..08900a90a4c 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -21,8 +21,8 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs vcNodesMap map[string][]node ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Requires SSH access to vCenter. framework.SkipUnlessProviderIs("vsphere") @@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs nodes := framework.GetReadySchedulableNodesOrDie(client) numNodes := len(nodes.Items) - Expect(numNodes).NotTo(BeZero(), "No nodes are available for testing volume access through vpxd restart") + gomega.Expect(numNodes).NotTo(gomega.BeZero(), "No nodes are available for testing volume access through vpxd restart") vcNodesMap = make(map[string][]node) for i := 0; i < numNodes; i++ { @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs } }) - It("verify volume remains attached through vpxd restart", func() { + ginkgo.It("verify volume remains attached through vpxd restart", func() { for vcHost, nodes := range vcNodesMap { var ( volumePaths []string @@ -109,28 +109,28 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs e2elog.Logf("Testing for nodes on vCenter host: %s", vcHost) for i, node := range nodes { - By(fmt.Sprintf("Creating test vsphere volume %d", i)) + ginkgo.By(fmt.Sprintf("Creating test vsphere volume %d", i)) volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) + ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) - By(fmt.Sprintf("Waiting for pod %d to be ready", i)) - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pods = append(pods, pod) nodeName := pod.Spec.NodeName - By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) - By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) + ginkgo.By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10)) randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10)) err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent) @@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs fileContents = append(fileContents, randomContent) } - By("Stopping vpxd on the vCenter host") + ginkgo.By("Stopping vpxd on the vCenter host") vcAddress := vcHost + ":22" err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress) framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host") @@ -147,7 +147,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs expectFilesToBeAccessible(namespace, pods, filePaths) expectFileContentsToMatch(namespace, pods, filePaths, fileContents) - By("Starting vpxd on the vCenter host") + ginkgo.By("Starting vpxd on the vCenter host") err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress) framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host") @@ -160,15 +160,15 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs nodeName := pod.Spec.NodeName volumePath := volumePaths[i] - By(fmt.Sprintf("Deleting pod on node %s", nodeName)) + ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) err = waitForVSphereDiskToDetach(volumePath, nodeName) framework.ExpectNoError(err) - By(fmt.Sprintf("Deleting volume %s", volumePath)) + ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 4af59a4025e..ab587cb51db 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -23,8 +23,8 @@ import ( "strings" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp tagPolicy string masterNode string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -111,13 +111,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp framework.Failf("Unable to find ready and schedulable Node") } masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client) - Expect(masternodes).NotTo(BeEmpty()) + gomega.Expect(masternodes).NotTo(gomega.BeEmpty()) masterNode = masternodes.List()[0] }) // Valid policy. - It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -125,8 +125,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) scParameters[Policy_DiskStripes] = "1" scParameters[Policy_ObjectSpaceReservation] = "30" e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -134,8 +134,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = VsanDatastore @@ -144,8 +144,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -153,13 +153,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Invalid VSAN storage capabilities parameters. - It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) + ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -168,13 +168,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Invalid policy on a VSAN test bed. // diskStripes value has to be between 1 and 12. - It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Invalid value for " + Policy_DiskStripes + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -183,12 +183,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Invalid policy on a VSAN test bed. // hostFailuresToTolerate value has to be between 0 and 3 including. - It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) + ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -197,14 +197,14 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Specify a valid VSAN policy on a non-VSAN test bed. // The test should fail. - It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore)) + ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore)) scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = VmfsDatastore e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " + "The policy parameters will work only with VSAN Datastore." if !strings.Contains(err.Error(), errorMsg) { @@ -212,15 +212,15 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp } }) - It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) + ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) scParameters[SpbmStoragePolicy] = policyName scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters) invokeValidPolicyTest(f, client, namespace, scParameters) }) - It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() { + ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() { scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = VsanDatastore @@ -229,42 +229,42 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters) }) - It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore)) + ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore)) scParameters[SpbmStoragePolicy] = tagPolicy scParameters[Datastore] = VsanDatastore scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } }) - It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) + ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) scParameters[SpbmStoragePolicy] = BronzeStoragePolicy scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } }) - It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) + ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) scParameters[SpbmStoragePolicy] = policyName - Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty()) + gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty()) scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -273,71 +273,71 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { - By("Creating Storage Class With storage policy params") + ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodWithWait(f, client, pod) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { - By("Creating Storage Class With storage policy params") + ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) } func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { - By("Creating Storage Class With storage policy params") + ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Expect claim to fail provisioning volume") + ginkgo.By("Expect claim to fail provisioning volume") _, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -351,5 +351,5 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.." nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode) - Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg) + gomega.Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(gomega.BeTrue(), errorMsg) } diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index f1679557a4a..03f0e0052ff 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -21,8 +21,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Zone Support", func() { zoneC string zoneD string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -115,52 +115,52 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { - By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) + ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) zones = append(zones, zoneA) verifyPVZoneLabels(client, namespace, nil, zones) }) - It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { - By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) + ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVZoneLabels(client, namespace, nil, zones) }) - It("Verify PVC creation with invalid zone specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) + ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) zones = append(zones, zoneD) err := verifyPVCCreationFails(client, namespace, nil, zones) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } }) - It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { - By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) }) - It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { - By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) }) - It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) + ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, scParameters, zones) @@ -170,22 +170,22 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) + ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) + ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy zones = append(zones, zoneA) err := verifyPVCCreationFails(client, namespace, scParameters, zones) @@ -195,16 +195,16 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) + ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) @@ -215,8 +215,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) + ginkgo.It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore2 zones = append(zones, zoneC) @@ -227,8 +227,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with no zones")) + ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with no zones")) err := verifyPVCCreationFails(client, namespace, nil, nil) errorMsg := "No shared datastores found in the Kubernetes cluster" if !strings.Contains(err.Error(), errorMsg) { @@ -236,8 +236,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) + ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 err := verifyPVCCreationFails(client, namespace, scParameters, nil) errorMsg := "No shared datastores found in the Kubernetes cluster" @@ -246,8 +246,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) + ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy err := verifyPVCCreationFails(client, namespace, scParameters, nil) errorMsg := "No shared datastores found in the Kubernetes cluster" @@ -256,8 +256,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) + ginkgo.It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 err := verifyPVCCreationFails(client, namespace, scParameters, nil) @@ -267,8 +267,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { - By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) + ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, nil, zones) errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]" @@ -277,8 +277,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() { - By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) zones = append(zones, zoneA) zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, nil, zones) @@ -288,8 +288,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) + ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal zones = append(zones, zoneA) err := verifyPVCCreationFails(client, namespace, scParameters, zones) @@ -299,8 +299,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal scParameters[Datastore] = vsanDatastore1 @@ -314,31 +314,31 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By("Verify persistent volume was created on the right zone") + ginkgo.By("Verify persistent volume was created on the right zone") verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones) - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodWithWait(f, client, pod) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } @@ -347,7 +347,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -355,9 +355,9 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) e2elog.Logf("Failure message : %+q", eventList.Items[0].Message) @@ -369,23 +369,23 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the storage class") + ginkgo.By("Creating PVC using the storage class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Verify zone information is present in the volume labels") + ginkgo.By("Verify zone information is present in the volume labels") for _, pv := range persistentvolumes { // Multiple zones are separated with "__" pvZoneLabels := strings.Split(pv.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"], "__") for _, zone := range zones { - Expect(pvZoneLabels).Should(ContainElement(zone), "Incorrect or missing zone labels in pv.") + gomega.Expect(pvZoneLabels).Should(gomega.ContainElement(zone), "Incorrect or missing zone labels in pv.") } } } From 1058877fbf77c368747115d237347c6b6fc6a2ad Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 13:56:26 +0800 Subject: [PATCH 091/194] remove dot imports in e2e/storage --- test/e2e/storage/csi_mock_volume.go | 90 ++++---- test/e2e/storage/csi_volumes.go | 26 +-- test/e2e/storage/detach_mounted.go | 28 +-- test/e2e/storage/drivers/csi.go | 14 +- test/e2e/storage/drivers/in_tree.go | 68 +++--- test/e2e/storage/empty_dir_wrapper.go | 26 +-- test/e2e/storage/ephemeral_volume.go | 8 +- test/e2e/storage/flexvolume.go | 26 +-- .../flexvolume_mounted_volume_resize.go | 40 ++-- test/e2e/storage/flexvolume_online_resize.go | 34 +-- .../generic_persistent_volume-disruptive.go | 22 +- test/e2e/storage/in_tree_volumes.go | 4 +- test/e2e/storage/mounted_volume_resize.go | 36 +-- .../nfs_persistent_volume-disruptive.go | 42 ++-- test/e2e/storage/pd.go | 114 +++++----- test/e2e/storage/persistent_volumes-gce.go | 40 ++-- test/e2e/storage/persistent_volumes-local.go | 212 +++++++++--------- test/e2e/storage/persistent_volumes.go | 76 +++---- test/e2e/storage/pv_protection.go | 36 +-- test/e2e/storage/pvc_protection.go | 56 ++--- test/e2e/storage/regional_pd.go | 62 ++--- test/e2e/storage/subpath.go | 8 +- test/e2e/storage/testsuites/base.go | 24 +- test/e2e/storage/testsuites/multivolume.go | 46 ++-- test/e2e/storage/testsuites/provisioning.go | 100 ++++----- test/e2e/storage/testsuites/snapshottable.go | 32 +-- test/e2e/storage/testsuites/subpath.go | 108 ++++----- test/e2e/storage/testsuites/volume_io.go | 16 +- test/e2e/storage/testsuites/volumemode.go | 45 ++-- test/e2e/storage/testsuites/volumes.go | 10 +- test/e2e/storage/utils/local.go | 22 +- test/e2e/storage/utils/utils.go | 48 ++-- test/e2e/storage/volume_expand.go | 62 ++--- test/e2e/storage/volume_limits.go | 6 +- test/e2e/storage/volume_metrics.go | 84 +++---- test/e2e/storage/volume_provisioning.go | 146 ++++++------ test/e2e/storage/volumes.go | 8 +- 37 files changed, 912 insertions(+), 913 deletions(-) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 7c9b7dfba58..0ef00d2287d 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -42,8 +42,8 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) type cleanupFuncs func() @@ -132,7 +132,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { - By("Creating pod") + ginkgo.By("Creating pod") var sc *storagev1.StorageClass if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") @@ -197,12 +197,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { var errs []error for _, pod := range m.pods { - By(fmt.Sprintf("Deleting pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) errs = append(errs, framework.DeletePodWithWait(f, cs, pod)) } for _, claim := range m.pvcs { - By(fmt.Sprintf("Deleting claim %s", claim.Name)) + ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name)) claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) if err == nil { cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) @@ -212,11 +212,11 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, sc := range m.sc { - By(fmt.Sprintf("Deleting storageclass %s", sc.Name)) + ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name)) cs.StorageV1().StorageClasses().Delete(sc.Name, nil) } - By("Cleaning up resources") + ginkgo.By("Cleaning up resources") for _, cleanupFunc := range m.testCleanups { cleanupFunc() } @@ -230,7 +230,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. - Context("CSI attach test using mock driver", func() { + ginkgo.Context("CSI attach test using mock driver", func() { tests := []struct { name string disableAttach bool @@ -252,7 +252,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - It(t.name, func() { + ginkgo.It(t.name, func() { var err error init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) defer cleanup() @@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) - By("Checking if VolumeAttachment was created for the pod") + ginkgo.By("Checking if VolumeAttachment was created for the pod") handle := getVolumeHandle(m.cs, claim) attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) attachmentName := fmt.Sprintf("csi-%x", attachmentHash) @@ -279,14 +279,14 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } } if test.disableAttach { - Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found") + gomega.Expect(err).To(gomega.HaveOccurred(), "Unexpected VolumeAttachment found") } }) } }) - Context("CSI workload information using mock driver", func() { + ginkgo.Context("CSI workload information using mock driver", func() { var ( err error podInfoTrue = true @@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - It(t.name, func() { + ginkgo.It(t.name, func() { init(testParameters{ registerDriver: test.deployClusterRegistrar, scName: "csi-mock-sc-" + f.UniqueName, @@ -338,7 +338,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) - By("Checking CSI driver logs") + ginkgo.By("Checking CSI driver logs") // The driver is deployed as a statefulset with stable pod names driverPodName := "csi-mockplugin-0" @@ -348,8 +348,8 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } }) - Context("CSI volume limit information using mock driver", func() { - It("should report attach limit when limit is bigger than 0 [Slow]", func() { + ginkgo.Context("CSI volume limit information using mock driver", func() { + ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func() { // define volume limit to be 2 for this test var err error @@ -362,28 +362,28 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs) framework.ExpectNoError(err, "while fetching node %v", err) - Expect(nodeAttachLimit).To(Equal(2)) + gomega.Expect(nodeAttachLimit).To(gomega.Equal(2)) _, _, pod1 := createPod() - Expect(pod1).NotTo(BeNil(), "while creating first pod") + gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod") err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) _, _, pod2 := createPod() - Expect(pod2).NotTo(BeNil(), "while creating second pod") + gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod") err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) framework.ExpectNoError(err, "Failed to start pod2: %v", err) _, _, pod3 := createPod() - Expect(pod3).NotTo(BeNil(), "while creating third pod") + gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod") err = waitForMaxVolumeCondition(pod3, m.cs) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) }) }) - Context("CSI Volume expansion [Feature:ExpandCSIVolumes]", func() { + ginkgo.Context("CSI Volume expansion [Feature:ExpandCSIVolumes]", func() { tests := []struct { name string nodeExpansionRequired bool @@ -412,7 +412,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - It(t.name, func() { + ginkgo.It(t.name, func() { var err error tp := testParameters{ enableResizing: true, @@ -430,18 +430,18 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ns := f.Namespace.Name sc, pvc, pod := createPod() - Expect(pod).NotTo(BeNil(), "while creating pod for resizing") + gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") - Expect(*sc.AllowVolumeExpansion).To(BeTrue(), "failed creating sc with allowed expansion") + gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, m.cs) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { @@ -449,43 +449,43 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } if test.expectFailure { err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait) - Expect(err).To(HaveOccurred(), "unexpected resizing condition on PVC") + gomega.Expect(err).To(gomega.HaveOccurred(), "unexpected resizing condition on PVC") return } - By("Waiting for persistent volume resize to finish") + ginkgo.By("Waiting for persistent volume resize to finish") err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for CSI PV resize to finish") checkPVCSize := func() { - By("Waiting for PVC resize to finish") + ginkgo.By("Waiting for PVC resize to finish") pvc, err = waitForFSResize(pvc, m.cs) framework.ExpectNoError(err, "while waiting for PVC resize to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") } // if node expansion is not required PVC should be resized as well if !test.nodeExpansionRequired { checkPVCSize() } else { - By("Checking for conditions on pvc") + ginkgo.By("Checking for conditions on pvc") pvc, err = m.cs.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While fetching pvc after controller resize") inProgressConditions := pvc.Status.Conditions if len(inProgressConditions) > 0 { - Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition") + gomega.Expect(inProgressConditions[0].Type).To(gomega.Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition") } - By("Deleting the previously created pod") + ginkgo.By("Deleting the previously created pod") err = framework.DeletePodWithWait(f, m.cs, pod) framework.ExpectNoError(err, "while deleting pod for resizing") - By("Creating a new pod with same volume") + ginkgo.By("Creating a new pod with same volume") pod2, err := createPodWithPVC(pvc) - Expect(pod2).NotTo(BeNil(), "while creating pod for csi resizing") + gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod for csi resizing") framework.ExpectNoError(err, "while recreating pod for resizing") checkPVCSize() @@ -493,7 +493,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) } }) - Context("CSI online volume expansion [Feature:ExpandCSIVolumes][Feature:ExpandInUseVolumes]", func() { + ginkgo.Context("CSI online volume expansion [Feature:ExpandCSIVolumes][Feature:ExpandInUseVolumes]", func() { tests := []struct { name string disableAttach bool @@ -508,7 +508,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - It(test.name, func() { + ginkgo.It(test.name, func() { var err error params := testParameters{enableResizing: true, enableNodeExpansion: true} if test.disableAttach { @@ -521,34 +521,34 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { defer cleanup() sc, pvc, pod := createPod() - Expect(pod).NotTo(BeNil(), "while creating pod for resizing") + gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") - Expect(*sc.AllowVolumeExpansion).To(BeTrue(), "failed creating sc with allowed expansion") + gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, m.cs) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvc.Name) } - By("Waiting for persistent volume resize to finish") + ginkgo.By("Waiting for persistent volume resize to finish") err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for PV resize to finish") - By("Waiting for PVC resize to finish") + ginkgo.By("Waiting for PVC resize to finish") pvc, err = waitForFSResize(pvc, m.cs) framework.ExpectNoError(err, "while waiting for PVC to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") }) } @@ -801,7 +801,7 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st return "" } if pv.Spec.CSI == nil { - Expect(pv.Spec.CSI).NotTo(BeNil()) + gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil()) return "" } return pv.Spec.CSI.VolumeHandle diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index e3cebb088a0..08fc7b87fef 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -26,8 +26,8 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/rand" ) @@ -55,52 +55,52 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { for _, initDriver := range csiTestDrivers { curDriver := initDriver() - Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { + ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { testsuites.DefineTestSuite(curDriver, csiTestSuites) }) } // TODO: PD CSI driver needs to be serial because it uses a fixed name. Address as part of #71289 - Context("CSI Topology test using GCE PD driver [Serial]", func() { + ginkgo.Context("CSI Topology test using GCE PD driver [Serial]", func() { f := framework.NewDefaultFramework("csitopology") driver := drivers.InitGcePDCSIDriver().(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. var ( config *testsuites.PerTestConfig testCleanup func() ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { driver.SkipUnsupportedTest(testpatterns.TestPattern{}) config, testCleanup = driver.PrepareTest(f) }) - AfterEach(func() { + ginkgo.AfterEach(func() { if testCleanup != nil { testCleanup() } }) - It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() { + ginkgo.It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() { suffix := "topology-positive" testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */) }) - It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() { + ginkgo.It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() { suffix := "delayed" testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */) }) - It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() { + ginkgo.It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() { suffix := "delayed-topology-positive" testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */) }) - It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() { + ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() { framework.SkipUnlessMultizone(config.Framework.ClientSet) suffix := "topology-negative" testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */) }) - It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() { + ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() { framework.SkipUnlessMultizone(config.Framework.ClientSet) suffix := "delayed-topology-negative" testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */) @@ -124,7 +124,7 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela if delayBinding { _, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */) - Expect(node).ToNot(BeNil(), "Unexpected nil node found") + gomega.Expect(node).ToNot(gomega.BeNil(), "Unexpected nil node found") } else { test.TestDynamicProvisioning() } @@ -136,7 +136,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela // Use different zones for pod and PV zones, err := framework.GetClusterZones(cs) framework.ExpectNoError(err) - Expect(zones.Len()).To(BeNumerically(">=", 2)) + gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2)) zonesList := zones.UnsortedList() podZoneIndex := rand.Intn(zones.Len()) podZone := zonesList[podZoneIndex] diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index e25e1c6dc15..8a64d3e93a2 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var ( @@ -49,7 +49,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { var node v1.Node var suffix string - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "local") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") @@ -62,13 +62,13 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { suffix = ns.Name }) - It("should not work when mount is in progress [Slow]", func() { + ginkgo.It("should not work when mount is in progress [Slow]", func() { driver := "attachable-with-long-mount" driverInstallAs := driver + "-" + suffix - By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) - By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) volumeSource := v1.VolumeSource{ FlexVolume: &v1.FlexVolumeSource{ @@ -77,31 +77,31 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { } clientPod := getFlexVolumePod(volumeSource, node.Name) - By("Creating pod that uses slow format volume") + ginkgo.By("Creating pod that uses slow format volume") pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod) framework.ExpectNoError(err) uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) - By("waiting for volumes to be attached to node") + ginkgo.By("waiting for volumes to be attached to node") err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume to attach to %s node", node.Name) - By("waiting for volume-in-use on the node after pod creation") + ginkgo.By("waiting for volume-in-use on the node after pod creation") err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume in use") - By("waiting for kubelet to start mounting the volume") + ginkgo.By("waiting for kubelet to start mounting the volume") time.Sleep(20 * time.Second) - By("Deleting the flexvolume pod") + ginkgo.By("Deleting the flexvolume pod") err = framework.DeletePodWithWait(f, cs, pod) framework.ExpectNoError(err, "in deleting the pod") // Wait a bit for node to sync the volume status time.Sleep(30 * time.Second) - By("waiting for volume-in-use on the node after pod deletion") + ginkgo.By("waiting for volume-in-use on the node after pod deletion") err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume in use") @@ -109,13 +109,13 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { // we previously already waited for 30s. time.Sleep(durationForStuckMount) - By("waiting for volume to disappear from node in-use") + ginkgo.By("waiting for volume to disappear from node in-use") err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume to be removed from in-use") - By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) + ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) uninstallFlex(cs, &node, "k8s", driverInstallAs) - By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) + ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) uninstallFlex(cs, nil, "k8s", driverInstallAs) }) }) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index e9c03319d20..b4e0c88a574 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -40,7 +40,7 @@ import ( "math/rand" "strconv" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" @@ -127,7 +127,7 @@ func (h *hostpathCSIDriver) GetClaimSize() string { } func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) + ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) cancelLogging := testsuites.StartPodLogs(f) cs := f.ClientSet @@ -161,7 +161,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per } return config, func() { - By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name)) + ginkgo.By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name)) cleanup() cancelLogging() } @@ -258,7 +258,7 @@ func (m *mockCSIDriver) GetClaimSize() string { } func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - By("deploying csi mock driver") + ginkgo.By("deploying csi mock driver") cancelLogging := testsuites.StartPodLogs(f) cs := f.ClientSet @@ -306,7 +306,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest } return config, func() { - By("uninstalling csi mock driver") + ginkgo.By("uninstalling csi mock driver") cleanup() cancelLogging() } @@ -391,7 +391,7 @@ func (g *gcePDCSIDriver) GetClaimSize() string { } func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - By("deploying csi gce-pd driver") + ginkgo.By("deploying csi gce-pd driver") cancelLogging := testsuites.StartPodLogs(f) // It would be safer to rename the gcePD driver, but that // hasn't been done before either and attempts to do so now led to @@ -426,7 +426,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes Prefix: "gcepd", Framework: f, }, func() { - By("uninstalling gce-pd driver") + ginkgo.By("uninstalling gce-pd driver") cleanup() cancelLogging() } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 5a16e80534b..5bcd3c3426a 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -43,8 +43,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" storagev1 "k8s.io/api/storage/v1" @@ -114,7 +114,7 @@ func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { nv, ok := volume.(*nfsVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to NFS test volume") return &v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ Server: nv.serverIP, @@ -126,7 +126,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { nv, ok := volume.(*nfsVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to NFS test volume") return &v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ Server: nv.serverIP, @@ -165,7 +165,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization: %v", err) - By("creating an external dynamic provisioner pod") + ginkgo.By("creating an external dynamic provisioner pod") n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) return &testsuites.PerTestConfig{ @@ -255,7 +255,7 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { gv, ok := volume.(*glusterVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Gluster test volume") name := gv.prefix + "-server" return &v1.VolumeSource{ @@ -270,7 +270,7 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume t func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { gv, ok := volume.(*glusterVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Gluster test volume") name := gv.prefix + "-server" return &v1.PersistentVolumeSource{ @@ -378,7 +378,7 @@ func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { iv, ok := volume.(*iSCSIVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to iSCSI test volume") volSource := v1.VolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ @@ -396,7 +396,7 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume tests func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { iv, ok := volume.(*iSCSIVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to iSCSI test volume") pvSource := v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{ @@ -491,7 +491,7 @@ func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { rv, ok := volume.(*rbdVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to RBD test volume") volSource := v1.VolumeSource{ RBD: &v1.RBDVolumeSource{ @@ -513,7 +513,7 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { rv, ok := volume.(*rbdVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to RBD test volume") f := rv.f ns := f.Namespace @@ -614,7 +614,7 @@ func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { cv, ok := volume.(*cephVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Ceph test volume") return &v1.VolumeSource{ CephFS: &v1.CephFSVolumeSource{ @@ -630,7 +630,7 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { cv, ok := volume.(*cephVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Ceph test volume") ns := cv.f.Namespace @@ -784,7 +784,7 @@ func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPat func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { hv, ok := volume.(*hostPathSymlinkVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume") // hostPathSymlink doesn't support readOnly volume if readOnly { @@ -859,13 +859,13 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v } // h.prepPod will be reused in cleanupDriver. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod) - Expect(err).ToNot(HaveOccurred(), "while creating hostPath init pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath init pod") err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) - Expect(err).ToNot(HaveOccurred(), "while waiting for hostPath init pod to succeed") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath init pod to succeed") err = framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while deleting hostPath init pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath init pod") return &hostPathSymlinkVolume{ sourcePath: sourcePath, targetPath: targetPath, @@ -881,13 +881,13 @@ func (v *hostPathSymlinkVolume) DeleteVolume() { v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod) - Expect(err).ToNot(HaveOccurred(), "while creating hostPath teardown pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath teardown pod") err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) - Expect(err).ToNot(HaveOccurred(), "while waiting for hostPath teardown pod to succeed") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath teardown pod to succeed") err = framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while deleting hostPath teardown pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath teardown pod") } // emptydir @@ -995,7 +995,7 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { cv, ok := volume.(*cinderVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Cinder test volume") volSource := v1.VolumeSource{ Cinder: &v1.CinderVolumeSource{ @@ -1011,7 +1011,7 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume test func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { cv, ok := volume.(*cinderVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Cinder test volume") pvSource := v1.PersistentVolumeSource{ Cinder: &v1.CinderPersistentVolumeSource{ @@ -1055,7 +1055,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te // We assume that namespace.Name is a random string volumeName := ns.Name - By("creating a test Cinder volume") + ginkgo.By("creating a test Cinder volume") output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() outputString := string(output[:]) e2elog.Logf("cinder output:\n%s", outputString) @@ -1079,7 +1079,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te break } e2elog.Logf("Volume ID: %s", volumeID) - Expect(volumeID).NotTo(Equal("")) + gomega.Expect(volumeID).NotTo(gomega.Equal("")) return &cinderVolume{ volumeName: volumeName, volumeID: volumeID, @@ -1166,7 +1166,7 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { gv, ok := volume.(*gcePdVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to GCE PD test volume") volSource := v1.VolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: gv.volumeName, @@ -1181,7 +1181,7 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume tests func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { gv, ok := volume.(*gcePdVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to GCE PD test volume") pvSource := v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: gv.volumeName, @@ -1234,7 +1234,7 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, } } - By("creating a test gce pd volume") + ginkgo.By("creating a test gce pd volume") vname, err := framework.CreatePDWithRetry() framework.ExpectNoError(err) return &gcePdVolume{ @@ -1291,7 +1291,7 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { vsv, ok := volume.(*vSphereVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1311,7 +1311,7 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume tes func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { vsv, ok := volume.(*vSphereVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1415,7 +1415,7 @@ func (a *azureDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { av, ok := volume.(*azureVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Azure test volume") diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] @@ -1434,7 +1434,7 @@ func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume tests func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { av, ok := volume.(*azureVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Azure test volume") diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] @@ -1476,7 +1476,7 @@ func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo } func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { - By("creating a test azure disk volume") + ginkgo.By("creating a test azure disk volume") volumeName, err := framework.CreatePDWithRetry() framework.ExpectNoError(err) return &azureVolume{ @@ -1589,7 +1589,7 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf // TODO: Fix authorization error in attach operation and uncomment below /* func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { - By("creating a test aws volume") + ginkgo.By("creating a test aws volume") var err error a.volumeName, err = framework.CreatePDWithRetry() framework.ExpectNoError(err)) @@ -1773,7 +1773,7 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { lv, ok := volume.(*localVolume) - Expect(ok).To(BeTrue(), "Failed to cast test volume to local test volume") + gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to local test volume") return &v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ Path: lv.ltr.Path, diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 48373f660aa..56b949752b6 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -29,8 +29,8 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -144,15 +144,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { pod = f.PodClient().CreateSync(pod) defer func() { - By("Cleaning up the secret") + ginkgo.By("Cleaning up the secret") if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } - By("Cleaning up the configmap") + ginkgo.By("Cleaning up the configmap") if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil { framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) } - By("Cleaning up the pod") + ginkgo.By("Cleaning up the pod") if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete pod %v: %v", pod.Name, err) } @@ -194,7 +194,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance. // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container. // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem. - It("should not cause race condition when used for git_repo [Serial] [Slow]", func() { + ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func() { gitURL, gitRepo, cleanup := createGitServer(f) defer cleanup() volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo) @@ -255,11 +255,11 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle } return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { - By("Cleaning up the git server pod") + ginkgo.By("Cleaning up the git server pod") if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } - By("Cleaning up the git server svc") + ginkgo.By("Cleaning up the git server svc") if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } @@ -287,7 +287,7 @@ func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMoun } func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { - By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount)) + ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount)) for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ { configMapName := fmt.Sprintf("racey-configmap-%d", i) configMapNames = append(configMapNames, configMapName) @@ -307,7 +307,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { } func deleteConfigMaps(f *framework.Framework, configMapNames []string) { - By("Cleaning up the configMaps") + ginkgo.By("Cleaning up the configMaps") for _, configMapName := range configMapNames { err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) framework.ExpectNoError(err, "unable to delete configMap %v", configMapName) @@ -346,10 +346,10 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID()) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) + gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) targetNode := nodeList.Items[0] - By("Creating RC which spawns configmap-volume pods") + ginkgo.By("Creating RC which spawns configmap-volume pods") affinity := &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ @@ -412,7 +412,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) - By("Ensuring each pod is running") + ginkgo.By("Ensuring each pod is running") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index fe901cc334a..421e458ba43 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var ( @@ -46,13 +46,13 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { f := framework.NewDefaultFramework("pv") - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet }) - Describe("When pod refers to non-existent ephemeral storage", func() { + ginkgo.Describe("When pod refers to non-existent ephemeral storage", func() { for _, testSource := range invalidEphemeralSource("pod-ephm-test") { - It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { + ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 9fd649bebb2..cd0cae880f3 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -24,7 +24,7 @@ import ( "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" clientset "k8s.io/client-go/kubernetes" @@ -92,7 +92,7 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir) sshAndLog(cmd, host, true /*failOnError*/) - data := testfiles.ReadOrDie(filePath, Fail) + data := testfiles.ReadOrDie(filePath, ginkgo.Fail) cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data)) sshAndLog(cmd, host, true /*failOnError*/) @@ -164,7 +164,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { var config volume.TestConfig var suffix string - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "local") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") @@ -182,36 +182,36 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { suffix = ns.Name }) - It("should be mountable when non-attachable", func() { + ginkgo.It("should be mountable when non-attachable", func() { driver := "dummy" driverInstallAs := driver + "-" + suffix - By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) testFlexVolume(driverInstallAs, cs, config, f) - By("waiting for flex client pod to terminate") + ginkgo.By("waiting for flex client pod to terminate") if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } - By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) + ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) uninstallFlex(cs, &node, "k8s", driverInstallAs) }) - It("should be mountable when attachable", func() { + ginkgo.It("should be mountable when attachable", func() { driver := "dummy-attachable" driverInstallAs := driver + "-" + suffix - By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) - By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) testFlexVolume(driverInstallAs, cs, config, f) - By("waiting for flex client pod to terminate") + ginkgo.By("waiting for flex client pod to terminate") if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } @@ -219,9 +219,9 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { // Detach might occur after pod deletion. Wait before deleting driver. time.Sleep(detachTimeout) - By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) + ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) uninstallFlex(cs, &node, "k8s", driverInstallAs) - By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) + ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) uninstallFlex(cs, nil, "k8s", driverInstallAs) }) }) diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 9f7f8450959..cb8a24c1ee2 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -20,8 +20,8 @@ import ( "fmt" "path" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { ) f := framework.NewDefaultFramework("mounted-flexvolume-expand") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("aws", "gce", "local") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") @@ -88,7 +88,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { fmt.Printf("storage class creation error: %v\n", err) } framework.ExpectNoError(err, "Error creating resizable storage class") - Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) + gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue()) pvc = getClaim("2Gi", ns) pvc.Spec.StorageClassName = &resizableSc.Name @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize") if c != nil { @@ -114,13 +114,13 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { } }) - It("Should verify mounted flex volumes can be resized", func() { + ginkgo.It("Should verify mounted flex volumes can be resized", func() { driver := "dummy-attachable" nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node := nodeList.Items[0] - By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver)) - By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{ @@ -136,52 +136,52 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { pv, err = framework.CreatePV(c, pv) framework.ExpectNoError(err, "Error creating pv %v", err) - By("Waiting for PVC to be in bound phase") + ginkgo.By("Waiting for PVC to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} var pvs []*v1.PersistentVolume pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) - By("Creating a deployment with the provisioned volume") + ginkgo.By("Creating a deployment with the provisioned volume") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvc.Name) } - By("Waiting for cloudprovider resize to finish") + ginkgo.By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") - By("Getting a pod from deployment") + ginkgo.By("Getting a pod from deployment") podList, err := e2edeploy.GetPodsForDeployment(c, deployment) - Expect(podList.Items).NotTo(BeEmpty()) + gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] - By("Deleting the pod from deployment") + ginkgo.By("Deleting the pod from deployment") err = framework.DeletePodWithWait(f, c, &pod) framework.ExpectNoError(err, "while deleting pod for resizing") - By("Waiting for deployment to create new pod") + ginkgo.By("Waiting for deployment to create new pod") pod, err = waitForDeploymentToRecreatePod(c, deployment) framework.ExpectNoError(err, "While waiting for pod to be recreated") - By("Waiting for file system resize to finish") + ginkgo.By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") }) }) diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index f592c969460..956865e30ea 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -20,8 +20,8 @@ import ( "fmt" "path" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -49,7 +49,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa ) f := framework.NewDefaultFramework("mounted-flexvolume-expand") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("aws", "gce", "local") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") @@ -86,7 +86,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa fmt.Printf("storage class creation error: %v\n", err) } framework.ExpectNoError(err, "Error creating resizable storage class: %v", err) - Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) + gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue()) pvc = getClaim("2Gi", ns) pvc.Spec.StorageClassName = &resizableSc.Name @@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa } }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize") if c != nil { @@ -113,13 +113,13 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa } }) - It("should be resizable when mounted", func() { + ginkgo.It("should be resizable when mounted", func() { driver := "dummy-attachable" node := nodeList.Items[0] - By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver)) - By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) + ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{ @@ -135,44 +135,44 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa pv, err = framework.CreatePV(c, pv) framework.ExpectNoError(err, "Error creating pv %v", err) - By("Waiting for PVC to be in bound phase") + ginkgo.By("Waiting for PVC to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} var pvs []*v1.PersistentVolume pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) var pod *v1.Pod - By("Creating pod") + ginkgo.By("Creating pod") pod, err = framework.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims) framework.ExpectNoError(err, "Failed to create pod %v", err) defer framework.DeletePodWithWait(f, c, pod) - By("Waiting for pod to go to 'running' state") + ginkgo.By("Waiting for pod to go to 'running' state") err = f.WaitForPodRunning(pod.ObjectMeta.Name) framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err) - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvc.Name) } - By("Waiting for cloudprovider resize to finish") + ginkgo.By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") - By("Waiting for file system resize to finish") + ginkgo.By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") }) }) diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 8d635b9c48d..bec7ccd361c 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -17,8 +17,8 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -35,7 +35,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { ns string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Skip tests unless number of nodes is 2 framework.SkipUnlessNodeCountIsAtLeast(2) framework.SkipIfProviderIs("local") @@ -56,8 +56,8 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { runTest: utils.TestVolumeUnmountsFromForceDeletedPod, }, } - Context("When kubelet restarts", func() { - // Test table housing the It() title string and test spec. runTest is type testBody, defined at + ginkgo.Context("When kubelet restarts", func() { + // Test table housing the ginkgo.It() title string and test spec. runTest is type testBody, defined at // the start of this file. To add tests, define a function mirroring the testBody signature and assign // to runTest. var ( @@ -65,19 +65,19 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { pvc *v1.PersistentVolumeClaim pv *v1.PersistentVolume ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { e2elog.Logf("Initializing pod and pvcs for test") clientPod, pvc, pv = createPodPVCFromSC(f, c, ns) }) for _, test := range disruptiveTestTable { func(t disruptiveTest) { - It(t.testItStmt, func() { - By("Executing Spec") + ginkgo.It(t.testItStmt, func() { + ginkgo.By("Executing Spec") t.runTest(c, f, clientPod) }) }(test) } - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("Tearing down test spec") tearDownTestCase(c, f, ns, clientPod, pvc, pv, false) pvc, clientPod = nil, nil @@ -97,9 +97,9 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) - By("Creating a pod with dynamically provisioned volume") + ginkgo.By("Creating a pod with dynamically provisioned volume") pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims) framework.ExpectNoError(err, "While creating pods for kubelet restart test") return pod, pvc, pvs[0] diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index 42e5ec3bc62..183a142dd36 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -17,7 +17,7 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("In-tree Volumes", func() { for _, initDriver := range testDrivers { curDriver := initDriver() - Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { + ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { testsuites.DefineTestSuite(curDriver, testSuites) }) } diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 24bdf4aad85..843e8a954be 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -19,8 +19,8 @@ package storage import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" @@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { ) f := framework.NewDefaultFramework("mounted-volume-expand") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name @@ -83,7 +83,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { } resizableSc, err = createStorageClass(test, ns, "resizing", c) framework.ExpectNoError(err, "Error creating resizable storage class") - Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) + gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue()) pvc = newClaim(test, ns, "default") pvc.Spec.StorageClassName = &resizableSc.Name @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize") if c != nil { @@ -109,57 +109,57 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { } }) - It("Should verify mounted devices can be resized", func() { + ginkgo.It("Should verify mounted devices can be resized", func() { pvcClaims := []*v1.PersistentVolumeClaim{pvc} // The reason we use a node selector is because we do not want pod to move to different node when pod is deleted. // Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted. // We should consider adding a unit test that exercises this better. - By("Creating a deployment with selected PVC") + ginkgo.By("Creating a deployment with selected PVC") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) // PVC should be bound at this point - By("Checking for bound PVC") + ginkgo.By("Checking for bound PVC") pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvc.Name) } - By("Waiting for cloudprovider resize to finish") + ginkgo.By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") - By("Getting a pod from deployment") + ginkgo.By("Getting a pod from deployment") podList, err := e2edeploy.GetPodsForDeployment(c, deployment) - Expect(podList.Items).NotTo(BeEmpty()) + gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] - By("Deleting the pod from deployment") + ginkgo.By("Deleting the pod from deployment") err = framework.DeletePodWithWait(f, c, &pod) framework.ExpectNoError(err, "while deleting pod for resizing") - By("Waiting for deployment to create new pod") + ginkgo.By("Waiting for deployment to create new pod") pod, err = waitForDeploymentToRecreatePod(c, deployment) framework.ExpectNoError(err, "While waiting for pod to be recreated") - By("Waiting for file system resize to finish") + ginkgo.By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") }) }) diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 48868f41545..9fa50814c1d 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { selector *metav1.LabelSelector ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. framework.SkipUnlessNodeCountIsAtLeast(MinNodes) framework.SkipIfProviderIs("local") @@ -98,15 +98,15 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { break } } - Expect(clientNodeIP).NotTo(BeEmpty()) + gomega.Expect(clientNodeIP).NotTo(gomega.BeEmpty()) } }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.DeletePodWithWait(f, c, nfsServerPod) }) - Context("when kube-controller-manager restarts", func() { + ginkgo.Context("when kube-controller-manager restarts", func() { var ( diskName1, diskName2 string err error @@ -117,11 +117,11 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { clientPod *v1.Pod ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce") framework.SkipUnlessSSHKeyPresent() - By("Initializing first PD with PVPVC binding") + ginkgo.By("Initializing first PD with PVPVC binding") pvSource1, diskName1 = volume.CreateGCEVolume() framework.ExpectNoError(err) pvConfig1 = framework.PersistentVolumeConfig{ @@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1)) - By("Initializing second PD with PVPVC binding") + ginkgo.By("Initializing second PD with PVPVC binding") pvSource2, diskName2 = volume.CreateGCEVolume() framework.ExpectNoError(err) pvConfig2 = framework.PersistentVolumeConfig{ @@ -147,12 +147,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2)) - By("Attaching both PVC's to a single pod") + ginkgo.By("Attaching both PVC's to a single pod") clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") framework.ExpectNoError(err) }) - AfterEach(func() { + ginkgo.AfterEach(func() { // Delete client/user pod first framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) @@ -175,20 +175,20 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { } }) - It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() { - By("Deleting PVC for volume 2") + ginkgo.It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() { + ginkgo.By("Deleting PVC for volume 2") err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns) framework.ExpectNoError(err) pvc2 = nil - By("Restarting the kube-controller-manager") + ginkgo.By("Restarting the kube-controller-manager") err = framework.RestartControllerManager() framework.ExpectNoError(err) err = framework.WaitForControllerManagerUp() framework.ExpectNoError(err) e2elog.Logf("kube-controller-manager restarted") - By("Observing the kube-controller-manager healthy for at least 2 minutes") + ginkgo.By("Observing the kube-controller-manager healthy for at least 2 minutes") // Continue checking for 2 minutes to make sure kube-controller-manager is healthy err = framework.CheckForControllerManagerHealthy(2 * time.Minute) framework.ExpectNoError(err) @@ -196,25 +196,25 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { }) - Context("when kubelet restarts", func() { + ginkgo.Context("when kubelet restarts", func() { var ( clientPod *v1.Pod pv *v1.PersistentVolume pvc *v1.PersistentVolumeClaim ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { e2elog.Logf("Initializing test spec") clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name) }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("Tearing down test spec") tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */) pv, pvc, clientPod = nil, nil, nil }) - // Test table housing the It() title string and test spec. runTest is type testBody, defined at + // Test table housing the ginkgo.It() title string and test spec. runTest is type testBody, defined at // the start of this file. To add tests, define a function mirroring the testBody signature and assign // to runTest. disruptiveTestTable := []disruptiveTest{ @@ -235,8 +235,8 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { // Test loop executes each disruptiveTest iteratively. for _, test := range disruptiveTestTable { func(t disruptiveTest) { - It(t.testItStmt, func() { - By("Executing Spec") + ginkgo.It(t.testItStmt, func() { + ginkgo.By("Executing Spec") t.runTest(c, f, clientPod) }) }(test) diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 255e52af0ac..c4f4614983e 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -27,8 +27,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ) f := framework.NewDefaultFramework("pod-disks") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessNodeCountIsAtLeast(minNodes) cs = f.ClientSet ns = f.Namespace.Name @@ -77,14 +77,14 @@ var _ = utils.SIGDescribe("Pod Disks", func() { podClient = cs.CoreV1().Pods(ns) nodeClient = cs.CoreV1().Nodes() nodes = framework.GetReadySchedulableNodesOrDie(cs) - Expect(len(nodes.Items)).To(BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes)) + gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes)) host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name) host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name) mathrand.Seed(time.Now().UnixNano()) }) - Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() { + ginkgo.Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() { const ( podDefaultGrace = "default (30s)" podImmediateGrace = "immediate (0s)" @@ -126,29 +126,29 @@ var _ = utils.SIGDescribe("Pod Disks", func() { readOnly := t.readOnly readOnlyTxt := readOnlyMap[readOnly] - It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func() { + ginkgo.It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func() { framework.SkipUnlessProviderIs("gce", "gke", "aws") if readOnly { framework.SkipIfProviderIs("aws") } - By("creating PD") + ginkgo.By("creating PD") diskName, err := framework.CreatePDWithRetry() framework.ExpectNoError(err, "Error creating PD") var fmtPod *v1.Pod if readOnly { // if all test pods are RO then need a RW pod to format pd - By("creating RW fmt Pod to ensure PD is formatted") + ginkgo.By("creating RW fmt Pod to ensure PD is formatted") fmtPod = testPDPod([]string{diskName}, host0Name, false, 1) _, err = podClient.Create(fmtPod) framework.ExpectNoError(err, "Failed to create fmtPod") framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name)) - By("deleting the fmtPod") + ginkgo.By("deleting the fmtPod") framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") e2elog.Logf("deleted fmtPod %q", fmtPod.Name) - By("waiting for PD to detach") + ginkgo.By("waiting for PD to detach") framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) } @@ -158,7 +158,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { defer func() { // Teardown should do nothing unless test failed - By("defer: cleaning up PD-RW test environment") + ginkgo.By("defer: cleaning up PD-RW test environment") e2elog.Logf("defer cleanup errors can usually be ignored") if fmtPod != nil { podClient.Delete(fmtPod.Name, podDelOpt) @@ -168,7 +168,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) }() - By("creating host0Pod on node0") + ginkgo.By("creating host0Pod on node0") _, err = podClient.Create(host0Pod) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) @@ -176,50 +176,50 @@ var _ = utils.SIGDescribe("Pod Disks", func() { var containerName, testFile, testFileContents string if !readOnly { - By("writing content to host0Pod on node0") + ginkgo.By("writing content to host0Pod on node0") containerName = "mycontainer" testFile = "/testpd1/tracker" testFileContents = fmt.Sprintf("%v", mathrand.Int()) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) - By("verifying PD is present in node0's VolumeInUse list") + ginkgo.By("verifying PD is present in node0's VolumeInUse list") framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */)) - By("deleting host0Pod") // delete this pod before creating next pod + ginkgo.By("deleting host0Pod") // delete this pod before creating next pod framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod") e2elog.Logf("deleted host0Pod %q", host0Pod.Name) } - By("creating host1Pod on node1") + ginkgo.By("creating host1Pod on node1") _, err = podClient.Create(host1Pod) framework.ExpectNoError(err, "Failed to create host1Pod") framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name)) e2elog.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name) if readOnly { - By("deleting host0Pod") + ginkgo.By("deleting host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod") e2elog.Logf("deleted host0Pod %q", host0Pod.Name) } else { - By("verifying PD contents in host1Pod") + ginkgo.By("verifying PD contents in host1Pod") verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents}) e2elog.Logf("verified PD contents in pod %q", host1Pod.Name) - By("verifying PD is removed from node0") + ginkgo.By("verifying PD is removed from node0") framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) e2elog.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name) } - By("deleting host1Pod") + ginkgo.By("deleting host1Pod") framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod") e2elog.Logf("deleted host1Pod %q", host1Pod.Name) - By("Test completed successfully, waiting for PD to detach from both nodes") + ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes") waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host1Name) }) } }) - Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow]", func() { + ginkgo.Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow]", func() { type testT struct { numContainers int numPDs int @@ -242,14 +242,14 @@ var _ = utils.SIGDescribe("Pod Disks", func() { numPDs := t.numPDs numContainers := t.numContainers - It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() { + ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() { framework.SkipUnlessProviderIs("gce", "gke", "aws") var host0Pod *v1.Pod var err error fileAndContentToVerify := make(map[string]string) diskNames := make([]string, 0, numPDs) - By(fmt.Sprintf("creating %d PD(s)", numPDs)) + ginkgo.By(fmt.Sprintf("creating %d PD(s)", numPDs)) for i := 0; i < numPDs; i++ { name, err := framework.CreatePDWithRetry() framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i)) @@ -258,7 +258,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { defer func() { // Teardown should do nothing unless test failed. - By("defer: cleaning up PD-RW test environment") + ginkgo.By("defer: cleaning up PD-RW test environment") e2elog.Logf("defer cleanup errors can usually be ignored") if host0Pod != nil { podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)) @@ -270,13 +270,13 @@ var _ = utils.SIGDescribe("Pod Disks", func() { for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop e2elog.Logf("PD Read/Writer Iteration #%v", i) - By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers)) + ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers)) host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers) _, err = podClient.Create(host0Pod) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) - By(fmt.Sprintf("writing %d file(s) via a container", numPDs)) + ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs)) containerName := "mycontainer" if numContainers > 1 { containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) @@ -289,16 +289,16 @@ var _ = utils.SIGDescribe("Pod Disks", func() { e2elog.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name) } - By("verifying PD contents via a container") + ginkgo.By("verifying PD contents via a container") if numContainers > 1 { containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) } verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) - By("deleting host0Pod") + ginkgo.By("deleting host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") } - By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs)) + ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs)) for _, diskName := range diskNames { waitForPDDetach(diskName, host0Name) } @@ -306,7 +306,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { } }) - Context("detach in a disrupted environment [Slow] [Disruptive]", func() { + ginkgo.Context("detach in a disrupted environment [Slow] [Disruptive]", func() { const ( deleteNode = 1 // delete physical node deleteNodeObj = 2 // delete node's api object only @@ -333,11 +333,11 @@ var _ = utils.SIGDescribe("Pod Disks", func() { for _, t := range tests { disruptOp := t.disruptOp - It(fmt.Sprintf("when %s", t.descr), func() { + ginkgo.It(fmt.Sprintf("when %s", t.descr), func() { framework.SkipUnlessProviderIs("gce") origNodeCnt := len(nodes.Items) // healhy nodes running kubelet - By("creating a pd") + ginkgo.By("creating a pd") diskName, err := framework.CreatePDWithRetry() framework.ExpectNoError(err, "Error creating a pd") @@ -346,21 +346,21 @@ var _ = utils.SIGDescribe("Pod Disks", func() { containerName := "mycontainer" defer func() { - By("defer: cleaning up PD-RW test env") + ginkgo.By("defer: cleaning up PD-RW test env") e2elog.Logf("defer cleanup errors can usually be ignored") - By("defer: delete host0Pod") + ginkgo.By("defer: delete host0Pod") podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)) - By("defer: detach and delete PDs") + ginkgo.By("defer: detach and delete PDs") detachAndDeletePDs(diskName, []types.NodeName{host0Name}) if disruptOp == deleteNode || disruptOp == deleteNodeObj { if disruptOp == deleteNodeObj { targetNode.ObjectMeta.SetResourceVersion("0") // need to set the resource version or else the Create() fails - By("defer: re-create host0 node object") + ginkgo.By("defer: re-create host0 node object") _, err := nodeClient.Create(targetNode) framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name)) } - By("defer: verify the number of ready nodes") + ginkgo.By("defer: verify the number of ready nodes") numNodes := countReadyNodes(cs, host0Name) // if this defer is reached due to an Expect then nested // Expects are lost, so use Failf here @@ -370,43 +370,43 @@ var _ = utils.SIGDescribe("Pod Disks", func() { } }() - By("creating host0Pod on node0") + ginkgo.By("creating host0Pod on node0") _, err = podClient.Create(host0Pod) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) - By("waiting for host0Pod to be running") + ginkgo.By("waiting for host0Pod to be running") framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) - By("writing content to host0Pod") + ginkgo.By("writing content to host0Pod") testFile := "/testpd1/tracker" testFileContents := fmt.Sprintf("%v", mathrand.Int()) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) - By("verifying PD is present in node0's VolumeInUse list") + ginkgo.By("verifying PD is present in node0's VolumeInUse list") framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/)) if disruptOp == deleteNode { - By("getting gce instances") + ginkgo.By("getting gce instances") gceCloud, err := gce.GetGCECloud() framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err)) output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone) framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output)) - Expect(true, strings.Contains(string(output), string(host0Name))) + gomega.Expect(true, strings.Contains(string(output), string(host0Name))) - By("deleting host0") + ginkgo.By("deleting host0") err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name)) framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err)) - By("expecting host0 node to be re-created") + ginkgo.By("expecting host0 node to be re-created") numNodes := countReadyNodes(cs, host0Name) - Expect(numNodes).To(Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)) + gomega.Expect(numNodes).To(gomega.Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)) output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone) framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output)) - Expect(false, strings.Contains(string(output), string(host0Name))) + gomega.Expect(false, strings.Contains(string(output), string(host0Name))) } else if disruptOp == deleteNodeObj { - By("deleting host0's node api object") + ginkgo.By("deleting host0's node api object") framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") - By("deleting host0Pod") + ginkgo.By("deleting host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") } else if disruptOp == evictPod { @@ -416,7 +416,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { Namespace: ns, }, } - By("evicting host0Pod") + ginkgo.By("evicting host0Pod") err = wait.PollImmediate(framework.Poll, podEvictTimeout, func() (bool, error) { err = cs.CoreV1().Pods(ns).Evict(evictTarget) if err != nil { @@ -428,16 +428,16 @@ var _ = utils.SIGDescribe("Pod Disks", func() { framework.ExpectNoError(err, fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout)) } - By("waiting for pd to detach from host0") + ginkgo.By("waiting for pd to detach from host0") waitForPDDetach(diskName, host0Name) }) } }) - It("should be able to delete a non-existent PD without error", func() { + ginkgo.It("should be able to delete a non-existent PD without error", func() { framework.SkipUnlessProviderIs("gce") - By("delete a PD") + ginkgo.By("delete a PD") framework.ExpectNoError(framework.DeletePDWithRetry("non-exist")) }) }) @@ -472,7 +472,7 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName break } } - Expect(strings.TrimSpace(value)).To(Equal(strings.TrimSpace(expectedContents))) + gomega.Expect(strings.TrimSpace(value)).To(gomega.Equal(strings.TrimSpace(expectedContents))) } } @@ -608,10 +608,10 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) { for _, host := range hosts { e2elog.Logf("Detaching GCE PD %q from node %q.", diskName, host) detachPD(host, diskName) - By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host)) + ginkgo.By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host)) waitForPDDetach(diskName, host) } - By(fmt.Sprintf("Deleting PD %q", diskName)) + ginkgo.By(fmt.Sprintf("Deleting PD %q", diskName)) framework.ExpectNoError(framework.DeletePDWithRetry(diskName)) } diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index f70d079fd48..d79d67c44b1 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -17,8 +17,8 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -42,12 +42,12 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { // initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up. func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { - By("Creating the PV and PVC") + ginkgo.By("Creating the PV and PVC") pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("Creating the Client Pod") + ginkgo.By("Creating the Client Pod") clientPod, err := framework.CreateClientPod(c, ns, pvc) framework.ExpectNoError(err) return clientPod, pv, pvc @@ -71,7 +71,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { ) f := framework.NewDefaultFramework("pv") - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { selector = metav1.SetAsLabelSelector(volLabel) framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing Test Spec") + ginkgo.By("Initializing Test Spec") diskName, err = framework.CreatePDWithRetry() framework.ExpectNoError(err) pvConfig = framework.PersistentVolumeConfig{ @@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { node = types.NodeName(clientPod.Spec.NodeName) }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources") if c != nil { framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) @@ -120,45 +120,45 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { // Attach a persistent disk to a pod using a PVC. // Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. - It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() { + ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() { - By("Deleting the Claim") + ginkgo.By("Deleting the Claim") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) - Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue()) + gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue()) - By("Deleting the Pod") + ginkgo.By("Deleting the Pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) - By("Verifying Persistent Disk detach") + ginkgo.By("Verifying Persistent Disk detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") }) // Attach a persistent disk to a pod using a PVC. // Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. - It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() { + ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() { - By("Deleting the Persistent Volume") + ginkgo.By("Deleting the Persistent Volume") framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) - Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue()) + gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue()) - By("Deleting the client pod") + ginkgo.By("Deleting the client pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) - By("Verifying Persistent Disk detaches") + ginkgo.By("Verifying Persistent Disk detaches") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") }) // Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted. - It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() { + ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() { - By("Deleting the Namespace") + ginkgo.By("Deleting the Namespace") err := c.CoreV1().Namespaces().Delete(ns, nil) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout) framework.ExpectNoError(err) - By("Verifying Persistent Disk detaches") + ginkgo.By("Verifying Persistent Disk detaches") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") }) }) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index eb069aad270..5b06a95e54e 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -24,8 +24,8 @@ import ( "sync" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -145,10 +145,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { scName string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Get all the schedulable nodes nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling") + gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling") // Cap max number of nodes maxLen := len(nodes.Items) @@ -187,10 +187,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr) testMode := immediateMode - Context(ctxString, func() { + ginkgo.Context(ctxString, func() { var testVol *localTestVolume - BeforeEach(func() { + ginkgo.BeforeEach(func() { if testVolType == GCELocalSSDVolumeType { SkipUnlessLocalSSDExists(config, "scsi", "fs", config.node0) } @@ -199,99 +199,99 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { testVol = testVols[0] }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupLocalVolumes(config, []*localTestVolume{testVol}) cleanupStorageClass(config) }) - Context("One pod requesting one prebound PVC", func() { + ginkgo.Context("One pod requesting one prebound PVC", func() { var ( pod1 *v1.Pod pod1Err error ) - BeforeEach(func() { - By("Creating pod1") + ginkgo.BeforeEach(func() { + ginkgo.By("Creating pod1") pod1, pod1Err = createLocalPod(config, testVol, nil) framework.ExpectNoError(pod1Err) verifyLocalPod(config, testVol, pod1, config.node0.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) - By("Writing in pod1") + ginkgo.By("Writing in pod1") podRWCmdExec(pod1, writeCmd) }) - AfterEach(func() { - By("Deleting pod1") + ginkgo.AfterEach(func() { + ginkgo.By("Deleting pod1") framework.DeletePodOrFail(config.client, config.ns, pod1.Name) }) - It("should be able to mount volume and read from pod1", func() { - By("Reading in pod1") + ginkgo.It("should be able to mount volume and read from pod1", func() { + ginkgo.By("Reading in pod1") // testFileContent was written in BeforeEach testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType) }) - It("should be able to mount volume and write from pod1", func() { + ginkgo.It("should be able to mount volume and write from pod1", func() { // testFileContent was written in BeforeEach testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType) - By("Writing in pod1") + ginkgo.By("Writing in pod1") writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType) podRWCmdExec(pod1, writeCmd) }) }) - Context("Two pods mounting a local volume at the same time", func() { - It("should be able to write from pod1 and read from pod2", func() { + ginkgo.Context("Two pods mounting a local volume at the same time", func() { + ginkgo.It("should be able to write from pod1 and read from pod2", func() { twoPodsReadWriteTest(config, testVol) }) }) - Context("Two pods mounting a local volume one after the other", func() { - It("should be able to write from pod1 and read from pod2", func() { + ginkgo.Context("Two pods mounting a local volume one after the other", func() { + ginkgo.It("should be able to write from pod1 and read from pod2", func() { twoPodsReadWriteSerialTest(config, testVol) }) }) - Context("Set fsGroup for local volume", func() { - BeforeEach(func() { + ginkgo.Context("Set fsGroup for local volume", func() { + ginkgo.BeforeEach(func() { if testVolType == BlockLocalVolumeType { framework.Skipf("We don't set fsGroup on block device, skipped.") } }) - It("should set fsGroup for one pod [Slow]", func() { - By("Checking fsGroup is set") + ginkgo.It("should set fsGroup for one pod [Slow]", func() { + ginkgo.By("Checking fsGroup is set") pod := createPodWithFsGroupTest(config, testVol, 1234, 1234) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodOrFail(config.client, config.ns, pod.Name) }) - It("should set same fsGroup for two pods simultaneously [Slow]", func() { + ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func() { fsGroup := int64(1234) - By("Create first pod and check fsGroup is set") + ginkgo.By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) - By("Create second pod with same fsGroup and check fsGroup is correct") + ginkgo.By("Create second pod with same fsGroup and check fsGroup is correct") pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) - By("Deleting first pod") + ginkgo.By("Deleting first pod") framework.DeletePodOrFail(config.client, config.ns, pod1.Name) - By("Deleting second pod") + ginkgo.By("Deleting second pod") framework.DeletePodOrFail(config.client, config.ns, pod2.Name) }) - It("should set different fsGroup for second pod if first pod is deleted", func() { + ginkgo.It("should set different fsGroup for second pod if first pod is deleted", func() { framework.Skipf("Disabled temporarily, reopen after #73168 is fixed") fsGroup1, fsGroup2 := int64(1234), int64(4321) - By("Create first pod and check fsGroup is set") + ginkgo.By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) - By("Deleting first pod") + ginkgo.By("Deleting first pod") err := framework.DeletePodWithWait(f, config.client, pod1) framework.ExpectNoError(err, "while deleting first pod") - By("Create second pod and check fsGroup is the new one") + ginkgo.By("Create second pod and check fsGroup is the new one") pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2) - By("Deleting second pod") + ginkgo.By("Deleting second pod") framework.DeletePodOrFail(config.client, config.ns, pod2.Name) }) }) @@ -299,10 +299,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) } - Context("Local volume that cannot be mounted [Slow]", func() { + ginkgo.Context("Local volume that cannot be mounted [Slow]", func() { // TODO: // - check for these errors in unit tests instead - It("should fail due to non-existent path", func() { + ginkgo.It("should fail due to non-existent path", func() { testVol := &localTestVolume{ ltr: &utils.LocalTestResource{ Node: config.node0, @@ -310,16 +310,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }, localVolumeType: DirectoryLocalVolumeType, } - By("Creating local PVC and PV") + ginkgo.By("Creating local PVC and PV") createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode) pod, err := createLocalPod(config, testVol, nil) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) }) - It("should fail due to wrong node", func() { + ginkgo.It("should fail due to wrong node", func() { if len(config.nodes) < 2 { framework.Skipf("Runs only when number of nodes >= 2") } @@ -332,19 +332,19 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { framework.ExpectNoError(err) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) cleanupLocalVolumes(config, []*localTestVolume{testVol}) }) }) - Context("Pod with node different from PV's NodeAffinity", func() { + ginkgo.Context("Pod with node different from PV's NodeAffinity", func() { var ( testVol *localTestVolume volumeType localVolumeType ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { if len(config.nodes) < 2 { framework.Skipf("Runs only when number of nodes >= 2") } @@ -355,78 +355,78 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { testVol = testVols[0] }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupLocalVolumes(config, []*localTestVolume{testVol}) cleanupStorageClass(config) }) - It("should fail scheduling due to different NodeAffinity", func() { + ginkgo.It("should fail scheduling due to different NodeAffinity", func() { testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeAffinity, immediateMode) }) - It("should fail scheduling due to different NodeSelector", func() { + ginkgo.It("should fail scheduling due to different NodeSelector", func() { testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeSelector, immediateMode) }) }) - Context("StatefulSet with pod affinity [Slow]", func() { + ginkgo.Context("StatefulSet with pod affinity [Slow]", func() { var testVols map[string][]*localTestVolume const ( ssReplicas = 3 volsPerNode = 6 ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { setupStorageClass(config, &waitMode) testVols = map[string][]*localTestVolume{} for i, node := range config.nodes { // The PVCs created here won't be used - By(fmt.Sprintf("Setting up local volumes on node %q", node.Name)) + ginkgo.By(fmt.Sprintf("Setting up local volumes on node %q", node.Name)) vols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, &config.nodes[i], volsPerNode, waitMode) testVols[node.Name] = vols } }) - AfterEach(func() { + ginkgo.AfterEach(func() { for _, vols := range testVols { cleanupLocalVolumes(config, vols) } cleanupStorageClass(config) }) - It("should use volumes spread across nodes when pod has anti-affinity", func() { + ginkgo.It("should use volumes spread across nodes when pod has anti-affinity", func() { if len(config.nodes) < ssReplicas { framework.Skipf("Runs only when number of nodes >= %v", ssReplicas) } - By("Creating a StatefulSet with pod anti-affinity on nodes") + ginkgo.By("Creating a StatefulSet with pod anti-affinity on nodes") ss := createStatefulSet(config, ssReplicas, volsPerNode, true, false) validateStatefulSet(config, ss, true) }) - It("should use volumes on one node when pod has affinity", func() { - By("Creating a StatefulSet with pod affinity on nodes") + ginkgo.It("should use volumes on one node when pod has affinity", func() { + ginkgo.By("Creating a StatefulSet with pod affinity on nodes") ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false) validateStatefulSet(config, ss, false) }) - It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() { + ginkgo.It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() { if len(config.nodes) < ssReplicas { framework.Skipf("Runs only when number of nodes >= %v", ssReplicas) } - By("Creating a StatefulSet with pod anti-affinity on nodes") + ginkgo.By("Creating a StatefulSet with pod anti-affinity on nodes") ss := createStatefulSet(config, ssReplicas, 1, true, true) validateStatefulSet(config, ss, true) }) - It("should use volumes on one node when pod management is parallel and pod has affinity", func() { - By("Creating a StatefulSet with pod affinity on nodes") + ginkgo.It("should use volumes on one node when pod management is parallel and pod has affinity", func() { + ginkgo.By("Creating a StatefulSet with pod affinity on nodes") ss := createStatefulSet(config, ssReplicas, 1, false, true) validateStatefulSet(config, ss, false) }) }) - Context("Stress with local volumes [Serial]", func() { + ginkgo.Context("Stress with local volumes [Serial]", func() { var ( allLocalVolumes = make(map[string][]*localTestVolume) volType = TmpfsLocalVolumeType @@ -440,13 +440,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { podsFactor = 4 ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { setupStorageClass(config, &waitMode) for i, node := range config.nodes { - By(fmt.Sprintf("Setting up %d local volumes on node %q", volsPerNode, node.Name)) + ginkgo.By(fmt.Sprintf("Setting up %d local volumes on node %q", volsPerNode, node.Name)) allLocalVolumes[node.Name] = setupLocalVolumes(config, volType, &config.nodes[i], volsPerNode) } - By(fmt.Sprintf("Create %d PVs", volsPerNode*len(config.nodes))) + ginkgo.By(fmt.Sprintf("Create %d PVs", volsPerNode*len(config.nodes))) var err error for _, localVolumes := range allLocalVolumes { for _, localVolume := range localVolumes { @@ -455,7 +455,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { framework.ExpectNoError(err) } } - By("Start a goroutine to recycle unbound PVs") + ginkgo.By("Start a goroutine to recycle unbound PVs") wg.Add(1) go func() { defer wg.Done() @@ -483,7 +483,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { continue } // Delete and create a new PV for same local volume storage - By(fmt.Sprintf("Delete %q and create a new PV for same local volume storage", pv.Name)) + ginkgo.By(fmt.Sprintf("Delete %q and create a new PV for same local volume storage", pv.Name)) for _, localVolumes := range allLocalVolumes { for _, localVolume := range localVolumes { if localVolume.pv.Name != pv.Name { @@ -503,19 +503,19 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }() }) - AfterEach(func() { - By("Stop and wait for recycle goroutine to finish") + ginkgo.AfterEach(func() { + ginkgo.By("Stop and wait for recycle goroutine to finish") close(stopCh) wg.Wait() - By("Clean all PVs") + ginkgo.By("Clean all PVs") for nodeName, localVolumes := range allLocalVolumes { - By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName)) + ginkgo.By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName)) cleanupLocalVolumes(config, localVolumes) } cleanupStorageClass(config) }) - It("should be able to process many pods and reuse local volumes", func() { + ginkgo.It("should be able to process many pods and reuse local volumes", func() { var ( podsLock sync.Mutex // Have one extra pod pending @@ -528,7 +528,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { // Create pods gradually instead of all at once because scheduler has // exponential backoff - By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods)) + ginkgo.By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods)) stop := make(chan struct{}) go wait.Until(func() { podsLock.Lock() @@ -573,7 +573,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } }() - By("Waiting for all pods to complete successfully") + ginkgo.By("Waiting for all pods to complete successfully") err := wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) { podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{}) if err != nil { @@ -605,12 +605,12 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) }) - Context("Pods sharing a single local PV [Serial]", func() { + ginkgo.Context("Pods sharing a single local PV [Serial]", func() { var ( pv *v1.PersistentVolume ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { localVolume := &localTestVolume{ ltr: &utils.LocalTestResource{ Node: config.node0, @@ -624,16 +624,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { framework.ExpectNoError(err) }) - AfterEach(func() { + ginkgo.AfterEach(func() { if pv == nil { return } - By(fmt.Sprintf("Clean PV %s", pv.Name)) + ginkgo.By(fmt.Sprintf("Clean PV %s", pv.Name)) err := config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err) }) - It("all pods should be running", func() { + ginkgo.It("all pods should be running", func() { var ( pvc *v1.PersistentVolumeClaim pods = map[string]*v1.Pod{} @@ -641,17 +641,17 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { err error ) pvc = framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns) - By(fmt.Sprintf("Create a PVC %s", pvc.Name)) + ginkgo.By(fmt.Sprintf("Create a PVC %s", pvc.Name)) pvc, err = framework.CreatePVC(config.client, config.ns, pvc) framework.ExpectNoError(err) - By(fmt.Sprintf("Create %d pods to use this PVC", count)) + ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) for i := 0; i < count; i++ { pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, selinuxLabel, nil) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) framework.ExpectNoError(err) pods[pod.Name] = pod } - By("Wait for all pods are running") + ginkgo.By("Wait for all pods are running") err = wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) { podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{}) if err != nil { @@ -692,7 +692,7 @@ func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nodeName string) *v1.Pod func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) { - By(fmt.Sprintf("local-volume-type: %s", testVolType)) + ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVolType)) testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, bindingMode) testVol := testVols[0] @@ -708,20 +708,20 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp // Test two pods at the same time, write from pod1, and read from pod2 func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { - By("Creating pod1 to write to the PV") + ginkgo.By("Creating pod1 to write to the PV") pod1, pod1Err := createLocalPod(config, testVol, nil) framework.ExpectNoError(pod1Err) verifyLocalPod(config, testVol, pod1, config.node0.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) - By("Writing in pod1") + ginkgo.By("Writing in pod1") podRWCmdExec(pod1, writeCmd) // testFileContent was written after creating pod1 testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) - By("Creating pod2 to read from the PV") + ginkgo.By("Creating pod2 to read from the PV") pod2, pod2Err := createLocalPod(config, testVol, nil) framework.ExpectNoError(pod2Err) verifyLocalPod(config, testVol, pod2, config.node0.Name) @@ -731,45 +731,45 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType) - By("Writing in pod2") + ginkgo.By("Writing in pod2") podRWCmdExec(pod2, writeCmd) - By("Reading in pod1") + ginkgo.By("Reading in pod1") testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType) - By("Deleting pod1") + ginkgo.By("Deleting pod1") framework.DeletePodOrFail(config.client, config.ns, pod1.Name) - By("Deleting pod2") + ginkgo.By("Deleting pod2") framework.DeletePodOrFail(config.client, config.ns, pod2.Name) } // Test two pods one after other, write from pod1, and read from pod2 func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) { - By("Creating pod1") + ginkgo.By("Creating pod1") pod1, pod1Err := createLocalPod(config, testVol, nil) framework.ExpectNoError(pod1Err) verifyLocalPod(config, testVol, pod1, config.node0.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) - By("Writing in pod1") + ginkgo.By("Writing in pod1") podRWCmdExec(pod1, writeCmd) // testFileContent was written after creating pod1 testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) - By("Deleting pod1") + ginkgo.By("Deleting pod1") framework.DeletePodOrFail(config.client, config.ns, pod1.Name) - By("Creating pod2") + ginkgo.By("Creating pod2") pod2, pod2Err := createLocalPod(config, testVol, nil) framework.ExpectNoError(pod2Err) verifyLocalPod(config, testVol, pod2, config.node0.Name) - By("Reading in pod2") + ginkgo.By("Reading in pod2") testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) - By("Deleting pod2") + ginkgo.By("Deleting pod2") framework.DeletePodOrFail(config.client, config.ns, pod2.Name) } @@ -810,7 +810,7 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType, vols := []*localTestVolume{} for i := 0; i < count; i++ { ltrType, ok := setupLocalVolumeMap[localVolumeType] - Expect(ok).To(BeTrue()) + gomega.Expect(ok).To(gomega.BeTrue()) ltr := config.ltrMgr.Create(node, ltrType, nil) vols = append(vols, &localTestVolume{ ltr: ltr, @@ -822,7 +822,7 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType, func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) { for _, volume := range volumes { - By("Cleaning up PVC and PV") + ginkgo.By("Cleaning up PVC and PV") errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc) if len(errs) > 0 { framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) @@ -847,7 +847,7 @@ func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Po podNodeName, err := podNodeName(config, pod) framework.ExpectNoError(err) e2elog.Logf("pod %q created on Node %q", pod.Name, podNodeName) - Expect(podNodeName).To(Equal(expectedNodeName)) + gomega.Expect(podNodeName).To(gomega.Equal(expectedNodeName)) } func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) framework.PersistentVolumeClaimConfig { @@ -928,11 +928,11 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod for _, volume := range volumes { pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(volume.pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(pvc.Status.Phase).To(Equal(v1.ClaimPending)) + gomega.Expect(pvc.Status.Phase).To(gomega.Equal(v1.ClaimPending)) } return false, nil }) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) } } @@ -984,7 +984,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, } func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { - By("Creating a pod") + ginkgo.By("Creating a pod") return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout) } @@ -1024,7 +1024,7 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) { readCmd := createReadCmd(testFileDir, testFile, volumeType) readOut := podRWCmdExec(pod, readCmd) - Expect(readOut).To(ContainSubstring(testFileContent)) + gomega.Expect(readOut).To(gomega.ContainSubstring(testFileContent)) } // Execute a read or write command in a pod. @@ -1045,10 +1045,10 @@ func setupLocalVolumesPVCsPVs( count int, mode storagev1.VolumeBindingMode) []*localTestVolume { - By("Initializing test volumes") + ginkgo.By("Initializing test volumes") testVols := setupLocalVolumes(config, localVolumeType, node, count) - By("Creating local PVCs and PVs") + ginkgo.By("Creating local PVCs and PVs") createLocalPVCsPVs(config, testVols, mode) return testVols @@ -1165,10 +1165,10 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b if anti { // Verify that each pod is on a different node - Expect(nodes.Len()).To(Equal(len(pods.Items))) + gomega.Expect(nodes.Len()).To(gomega.Equal(len(pods.Items))) } else { // Verify that all pods are on same node. - Expect(nodes.Len()).To(Equal(1)) + gomega.Expect(nodes.Len()).To(gomega.Equal(1)) } // Validate all PVCs are bound diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 3dc0c61a599..194d8b899df 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -21,7 +21,7 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,16 +39,16 @@ import ( // phase. Note: the PV is deleted in the AfterEach, not here. func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { // 1. verify that the PV and PVC have bound correctly - By("Validating the PV-PVC binding") + ginkgo.By("Validating the PV-PVC binding") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) // 2. create the nfs writer pod, test if the write was successful, // then delete the pod and verify that it was deleted - By("Checking pod has write access to PersistentVolume") + ginkgo.By("Checking pod has write access to PersistentVolume") framework.ExpectNoError(framework.CreateWaitAndDeletePod(f, c, ns, pvc)) // 3. delete the PVC, wait for PV to become "Released" - By("Deleting the PVC to invoke the reclaim policy.") + ginkgo.By("Deleting the PVC to invoke the reclaim policy.") framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)) } @@ -61,7 +61,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, var err error // 1. verify each PV permits write access to a client pod - By("Checking pod has write access to PersistentVolumes") + ginkgo.By("Checking pod has write access to PersistentVolumes") for pvcKey := range claims { pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{}) if err != nil { @@ -82,7 +82,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, } // 2. delete each PVC, wait for its bound PV to reach `expectedPhase` - By("Deleting PVCs to invoke reclaim policy") + ginkgo.By("Deleting PVCs to invoke reclaim policy") if err = framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil { return err } @@ -91,7 +91,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, var _ = utils.SIGDescribe("PersistentVolumes", func() { - // global vars for the Context()s and It()'s below + // global vars for the ginkgo.Context()s and ginkgo.It()'s below f := framework.NewDefaultFramework("pv") var ( c clientset.Interface @@ -105,7 +105,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { err error ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name // Enforce binding only within test space via selector labels @@ -115,14 +115,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Testing configurations of a single a PV/PVC pair, multiple evenly paired PVs/PVCs, // and multiple unevenly paired PV/PVCs - Describe("NFS", func() { + ginkgo.Describe("NFS", func() { var ( nfsServerPod *v1.Pod serverIP string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { _, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) pvConfig = framework.PersistentVolumeConfig{ NamePrefix: "nfs-", @@ -142,15 +142,15 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name) pv, pvc = nil, nil pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{} }) - Context("with Single PV - PVC pairs", func() { + ginkgo.Context("with Single PV - PVC pairs", func() { // Note: this is the only code where the pv is deleted. - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources.") if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) @@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create an nfs PV, then a claim that matches the PV, and a pod that // contains the claim. Verify that the PV and PVC bind correctly, and // that the pod can write to the nfs volume. - It("should create a non-pre-bound PV and PVC: test write access ", func() { + ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func() { pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -171,7 +171,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create a claim first, then a nfs PV that matches the claim, and a // pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. - It("create a PVC and non-pre-bound PV: test write access", func() { + ginkgo.It("create a PVC and non-pre-bound PV: test write access", func() { pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create a claim first, then a pre-bound nfs PV that matches the claim, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. - It("create a PVC and a pre-bound PV: test write access", func() { + ginkgo.It("create a PVC and a pre-bound PV: test write access", func() { pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -189,7 +189,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create a nfs PV first, then a pre-bound PVC that matches the PV, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. - It("create a PV and a pre-bound PVC: test write access", func() { + ginkgo.It("create a PV and a pre-bound PVC: test write access", func() { pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -205,14 +205,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Note: future tests may wish to incorporate the following: // a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods // in different namespaces. - Context("with multiple PVs and PVCs all in same ns", func() { + ginkgo.Context("with multiple PVs and PVCs all in same ns", func() { // scope the pv and pvc maps to be available in the AfterEach // note: these maps are created fresh in CreatePVsPVCs() var pvols framework.PVMap var claims framework.PVCMap - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols)) errs := framework.PVPVCMapCleanup(c, ns, pvols, claims) if len(errs) > 0 { @@ -226,7 +226,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 2 PVs and 4 PVCs. // Note: PVs are created before claims and no pre-binding - It("should create 2 PVs and 4 PVCs: test write access", func() { + ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func() { numPVs, numPVCs := 2, 4 pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -236,7 +236,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 3 PVs and 3 PVCs. // Note: PVs are created before claims and no pre-binding - It("should create 3 PVs and 3 PVCs: test write access", func() { + ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func() { numPVs, numPVCs := 3, 3 pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -246,7 +246,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 4 PVs and 2 PVCs. // Note: PVs are created before claims and no pre-binding. - It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() { + ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() { numPVs, numPVCs := 4, 2 pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -258,43 +258,43 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // This Context isolates and tests the "Recycle" reclaim behavior. On deprecation of the // Recycler, this entire context can be removed without affecting the test suite or leaving behind // dead code. - Context("when invoking the Recycle reclaim policy", func() { - BeforeEach(func() { + ginkgo.Context("when invoking the Recycle reclaim policy", func() { + ginkgo.BeforeEach(func() { pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources.") if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) - // This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked + // This ginkgo.It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked // for files. If files are found, the checking Pod fails, failing the test. Otherwise, the pod // (and test) succeed. - It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() { - By("Writing to the volume.") + ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() { + ginkgo.By("Writing to the volume.") pod := framework.MakeWritePod(ns, pvc) pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)) - By("Deleting the claim") + ginkgo.By("Deleting the claim") framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)) - By("Re-mounting the volume.") + ginkgo.By("Re-mounting the volume.") pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) pvc, err = framework.CreatePVC(c, ns, pvc) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name) // If a file is detected in /mnt, fail the pod and do not restart it. - By("Verifying the mount has been cleaned.") + ginkgo.By("Verifying the mount has been cleaned.") mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod, err = c.CoreV1().Pods(ns).Create(pod) @@ -306,21 +306,21 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { }) }) - Describe("Default StorageClass", func() { - Context("pods that use multiple volumes", func() { + ginkgo.Describe("Default StorageClass", func() { + ginkgo.Context("pods that use multiple volumes", func() { - AfterEach(func() { + ginkgo.AfterEach(func() { framework.DeleteAllStatefulSets(c, ns) }) - It("should be reschedulable [Slow]", func() { + ginkgo.It("should be reschedulable [Slow]", func() { // Only run on providers with default storageclass framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure") numVols := 4 ssTester := framework.NewStatefulSetTester(c) - By("Creating a StatefulSet pod to initialize data") + ginkgo.By("Creating a StatefulSet pod to initialize data") writeCmd := "true" for i := 0; i < numVols; i++ { writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i)) @@ -353,7 +353,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { framework.ExpectNoError(err) ssTester.WaitForRunningAndReady(1, ss) - By("Deleting the StatefulSet but not the volumes") + ginkgo.By("Deleting the StatefulSet but not the volumes") // Scale down to 0 first so that the Delete is quick ss, err = ssTester.Scale(ss, 0) framework.ExpectNoError(err) @@ -361,7 +361,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err) - By("Creating a new Statefulset and validating the data") + ginkgo.By("Creating a new Statefulset and validating the data") validateCmd := "true" for i := 0; i < numVols; i++ { validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i)) diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 3d3e32136fb..820607ed570 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -19,8 +19,8 @@ package storage import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,7 +48,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ) f := framework.NewDefaultFramework("pv-protection") - BeforeEach(func() { + ginkgo.BeforeEach(func() { client = f.ClientSet nameSpace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) @@ -73,60 +73,60 @@ var _ = utils.SIGDescribe("PV Protection", func() { StorageClassName: &emptyStorageClass, } - By("Creating a PV") + ginkgo.By("Creating a PV") // make the pv definitions pv = framework.MakePersistentVolume(pvConfig) // create the PV pv, err = client.CoreV1().PersistentVolumes().Create(pv) framework.ExpectNoError(err, "Error creating PV") - By("Waiting for PV to enter phase Available") + ginkgo.By("Waiting for PV to enter phase Available") framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second)) - By("Checking that PV Protection finalizer is set") + ginkgo.By("Checking that PV Protection finalizer is set") pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While getting PV status") - Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) + gomega.Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(gomega.BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources.") if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) - It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { - By("Deleting the PV") + ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { + ginkgo.By("Deleting the PV") err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout) }) - It("Verify that PV bound to a PVC is not removed immediately", func() { - By("Creating a PVC") + ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() { + ginkgo.By("Creating a PVC") pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err, "Error creating PVC") - By("Waiting for PVC to become Bound") + ginkgo.By("Waiting for PVC to become Bound") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") + ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") - By("Checking that the PV status is Terminating") + ginkgo.By("Checking that the PV status is Terminating") pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PV status") - Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) + gomega.Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil)) - By("Deleting the PVC that is bound to the PV") + ginkgo.By("Deleting the PVC that is bound to the PV") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") - By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") + ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout) }) }) diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 93e26176e03..c96237fab69 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -17,8 +17,8 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,12 +41,12 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ) f := framework.NewDefaultFramework("pvc-protection") - BeforeEach(func() { + ginkgo.BeforeEach(func() { client = f.ClientSet nameSpace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) - By("Creating a PVC") + ginkgo.By("Creating a PVC") suffix := "pvc-protection" framework.SkipIfNoDefaultStorageClass(client) testStorageClass := testsuites.StorageClassTest{ @@ -57,86 +57,86 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Error creating PVC") pvcCreatedAndNotDeleted = true - By("Creating a Pod that becomes Running and therefore is actively using the PVC") + ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pod, err = framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "") framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running") - By("Waiting for PVC to become Bound") + ginkgo.By("Waiting for PVC to become Bound") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - By("Checking that PVC Protection finalizer is set") + ginkgo.By("Checking that PVC Protection finalizer is set") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While getting PVC status") - Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) + gomega.Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(gomega.BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) }) - AfterEach(func() { + ginkgo.AfterEach(func() { if pvcCreatedAndNotDeleted { framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace) } }) - It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { - By("Deleting the pod using the PVC") + ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { + ginkgo.By("Deleting the pod using the PVC") err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") - By("Deleting the PVC") + ginkgo.By("Deleting the PVC") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) pvcCreatedAndNotDeleted = false }) - It("Verify that PVC in active use by a pod is not removed immediately", func() { - By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") + ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func() { + ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") - By("Checking that the PVC status is Terminating") + ginkgo.By("Checking that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") - Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) + gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil)) - By("Deleting the pod that uses the PVC") + ginkgo.By("Deleting the pod that uses the PVC") err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") - By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") + ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) pvcCreatedAndNotDeleted = false }) - It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { - By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") + ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { + ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") - By("Checking that the PVC status is Terminating") + ginkgo.By("Checking that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") - Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) + gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil)) - By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") + ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable") - By("Deleting the second pod that uses the PVC that is being deleted") + ginkgo.By("Deleting the second pod that uses the PVC that is being deleted") err = framework.DeletePodWithWait(f, client, secondPod) framework.ExpectNoError(err, "Error terminating and deleting pod") - By("Checking again that the PVC status is Terminating") + ginkgo.By("Checking again that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") - Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) + gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil)) - By("Deleting the first pod that uses the PVC") + ginkgo.By("Deleting the first pod that uses the PVC") err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") - By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") + ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) pvcCreatedAndNotDeleted = false }) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 72aacb9d9c8..8a3d5ee7b1a 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -17,8 +17,8 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "fmt" "strings" @@ -60,7 +60,7 @@ var _ = utils.SIGDescribe("Regional PD", func() { var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name @@ -68,26 +68,26 @@ var _ = utils.SIGDescribe("Regional PD", func() { framework.SkipUnlessMultizone(c) }) - Describe("RegionalPD", func() { - It("should provision storage [Slow]", func() { + ginkgo.Describe("RegionalPD", func() { + ginkgo.It("should provision storage [Slow]", func() { testVolumeProvisioning(c, ns) }) - It("should provision storage with delayed binding [Slow]", func() { + ginkgo.It("should provision storage with delayed binding [Slow]", func() { testRegionalDelayedBinding(c, ns, 1 /* pvcCount */) testRegionalDelayedBinding(c, ns, 3 /* pvcCount */) }) - It("should provision storage in the allowedTopologies [Slow]", func() { + ginkgo.It("should provision storage in the allowedTopologies [Slow]", func() { testRegionalAllowedTopologies(c, ns) }) - It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() { + ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() { testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 1 /* pvcCount */) testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 3 /* pvcCount */) }) - It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() { + ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() { testZonalFailover(c, ns) }) }) @@ -112,7 +112,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { ExpectedSize: repdMinSize, PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil()) + gomega.Expect(volume).NotTo(gomega.BeNil()) err := checkGCEPD(volume, "pd-standard") framework.ExpectNoError(err, "checkGCEPD") @@ -133,7 +133,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { ExpectedSize: repdMinSize, PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil()) + gomega.Expect(volume).NotTo(gomega.BeNil()) err := checkGCEPD(volume, "pd-standard") framework.ExpectNoError(err, "checkGCEPD") @@ -174,7 +174,7 @@ func testZonalFailover(c clientset.Interface, ns string) { claimTemplate.Spec.StorageClassName = &class.Name statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns) - By("creating a StorageClass " + class.Name) + ginkgo.By("creating a StorageClass " + class.Name) _, err := c.StorageV1().StorageClasses().Create(class) framework.ExpectNoError(err) defer func() { @@ -183,7 +183,7 @@ func testZonalFailover(c clientset.Interface, ns string) { "Error deleting StorageClass %s", class.Name) }() - By("creating a StatefulSet") + ginkgo.By("creating a StatefulSet") _, err = c.CoreV1().Services(ns).Create(service) framework.ExpectNoError(err) _, err = c.AppsV1().StatefulSets(ns).Create(statefulSet) @@ -210,24 +210,24 @@ func testZonalFailover(c clientset.Interface, ns string) { err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout) if err != nil { pod := getPod(c, ns, regionalPDLabels) - Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(), + gomega.Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(gomega.BeTrue(), "The statefulset pod has the following conditions: %s", pod.Status.Conditions) framework.ExpectNoError(err) } pvc := getPVC(c, ns, regionalPDLabels) - By("getting zone information from pod") + ginkgo.By("getting zone information from pod") pod := getPod(c, ns, regionalPDLabels) nodeName := pod.Spec.NodeName node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) podZone := node.Labels[v1.LabelZoneFailureDomain] - By("tainting nodes in the zone the pod is scheduled in") + ginkgo.By("tainting nodes in the zone the pod is scheduled in") selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone})) nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone) defer func() { @@ -235,11 +235,11 @@ func testZonalFailover(c clientset.Interface, ns string) { removeTaintFunc() }() - By("deleting StatefulSet pod") + ginkgo.By("deleting StatefulSet pod") err = c.CoreV1().Pods(ns).Delete(pod.Name, &metav1.DeleteOptions{}) // Verify the pod is scheduled in the other zone. - By("verifying the pod is scheduled in a different zone.") + ginkgo.By("verifying the pod is scheduled in a different zone.") var otherZone string if cloudZones[0] == podZone { otherZone = cloudZones[1] @@ -262,22 +262,22 @@ func testZonalFailover(c clientset.Interface, ns string) { err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout) if err != nil { pod := getPod(c, ns, regionalPDLabels) - Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(), + gomega.Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(gomega.BeTrue(), "The statefulset pod has the following conditions: %s", pod.Status.Conditions) framework.ExpectNoError(err) } - By("verifying the same PVC is used by the new pod") - Expect(getPVC(c, ns, regionalPDLabels).Name).To(Equal(pvc.Name), + ginkgo.By("verifying the same PVC is used by the new pod") + gomega.Expect(getPVC(c, ns, regionalPDLabels).Name).To(gomega.Equal(pvc.Name), "The same PVC should be used after failover.") - By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.") + ginkgo.By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.") logs, err := framework.GetPodLogs(c, ns, pod.Name, "") framework.ExpectNoError(err, "Error getting logs from pod %s in namespace %s", pod.Name, ns) lineCount := len(strings.Split(strings.TrimSpace(logs), "\n")) expectedLineCount := 2 - Expect(lineCount).To(Equal(expectedLineCount), + gomega.Expect(lineCount).To(gomega.Equal(expectedLineCount), "Line count of the written file should be %d.", expectedLineCount) } @@ -305,13 +305,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) reversePatches[node.Name] = reversePatchBytes _, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } return func() { for nodeName, reversePatch := range reversePatches { _, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } } } @@ -425,7 +425,7 @@ func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.P options := metav1.ListOptions{LabelSelector: selector.String()} pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options) framework.ExpectNoError(err) - Expect(len(pvcList.Items)).To(Equal(1), "There should be exactly 1 PVC matched.") + gomega.Expect(len(pvcList.Items)).To(gomega.Equal(1), "There should be exactly 1 PVC matched.") return &pvcList.Items[0] } @@ -435,7 +435,7 @@ func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.P options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(options) framework.ExpectNoError(err) - Expect(len(podList.Items)).To(Equal(1), "There should be exactly 1 pod matched.") + gomega.Expect(len(podList.Items)).To(gomega.Equal(1), "There should be exactly 1 pod matched.") return &podList.Items[0] } @@ -534,8 +534,8 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec { func getTwoRandomZones(c clientset.Interface) []string { zones, err := framework.GetClusterZones(c) - Expect(err).ToNot(HaveOccurred()) - Expect(zones.Len()).To(BeNumerically(">=", 2), + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2), "The test should only be run in multizone clusters.") zone1, _ := zones.PopAny() diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index c8d6218b1c2..e68a7a9c3ea 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -24,18 +24,18 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = utils.SIGDescribe("Subpath", func() { f := framework.NewDefaultFramework("subpath") - Context("Atomic writer volumes", func() { + ginkgo.Context("Atomic writer volumes", func() { var err error var privilegedSecurityContext bool = false - BeforeEach(func() { - By("Setting up data") + ginkgo.BeforeEach(func() { + ginkgo.By("Setting up data") secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}} secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) if err != nil && !apierrors.IsAlreadyExists(err) { diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 6f3b32f9b10..f708794356f 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -24,7 +24,7 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -88,8 +88,8 @@ func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) { suite := testSuiteInit() for _, pattern := range suite.getTestSuiteInfo().testPatterns { p := pattern - Context(getTestNameStr(suite, p), func() { - BeforeEach(func() { + ginkgo.Context(getTestNameStr(suite, p), func() { + ginkgo.BeforeEach(func() { // Skip unsupported tests to avoid unnecessary resource initialization skipUnsupportedTest(driver, p) }) @@ -214,7 +214,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p claimSize := dDriver.GetClaimSize() r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType) - By("creating a StorageClass " + r.sc.Name) + ginkgo.By("creating a StorageClass " + r.sc.Name) var err error r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc) framework.ExpectNoError(err) @@ -244,12 +244,12 @@ func (r *genericVolumeTestResource) cleanupResource() { if r.pvc != nil || r.pv != nil { switch volType { case testpatterns.PreprovisionedPV: - By("Deleting pv and pvc") + ginkgo.By("Deleting pv and pvc") if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) } case testpatterns.DynamicPV: - By("Deleting pvc") + ginkgo.By("Deleting pvc") // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", @@ -269,7 +269,7 @@ func (r *genericVolumeTestResource) cleanupResource() { } if r.sc != nil { - By("Deleting sc") + ginkgo.By("Deleting sc") deleteStorageClass(f.ClientSet, r.sc.Name) } @@ -330,7 +330,7 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC( cs := f.ClientSet ns := f.Namespace.Name - By("creating a claim") + ginkgo.By("creating a claim") pvc := getClaim(claimSize, ns) pvc.Spec.StorageClassName = &sc.Name if volMode != "" { @@ -455,12 +455,12 @@ func StartPodLogs(f *framework.Framework) func() { ns := f.Namespace to := podlogs.LogOutput{ - StatusWriter: GinkgoWriter, + StatusWriter: ginkgo.GinkgoWriter, } if framework.TestContext.ReportDir == "" { - to.LogWriter = GinkgoWriter + to.LogWriter = ginkgo.GinkgoWriter } else { - test := CurrentGinkgoTestDescription() + test := ginkgo.CurrentGinkgoTestDescription() reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") // We end the prefix with a slash to ensure that all logs // end up in a directory named after the current test. @@ -476,7 +476,7 @@ func StartPodLogs(f *framework.Framework) func() { // after a failed test. Logging them live is only useful for interactive // debugging, not when we collect reports. if framework.TestContext.ReportDir == "" { - podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter) + podlogs.WatchPods(ctx, cs, ns.Name, ginkgo.GinkgoWriter) } return cancel diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index a4446417dff..c7508a4865c 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -73,7 +73,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter l local ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Check preconditions. if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] { framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode) @@ -115,7 +115,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // [ node1 ] ==> [ node1 ] // / \ <- same volume mode / \ // [volume1] [volume2] [volume1] [volume2] - It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() { + ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() { // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. @@ -144,7 +144,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // [ node1 ] ==> [ node2 ] // / \ <- same volume mode / \ // [volume1] [volume2] [volume1] [volume2] - It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() { + ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() { // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. @@ -182,7 +182,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // [ node1 ] ==> [ node1 ] // / \ <- different volume mode / \ // [volume1] [volume2] [volume1] [volume2] - It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() { + ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() { if pattern.VolMode == v1.PersistentVolumeFilesystem { framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping") } @@ -220,7 +220,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // [ node1 ] ==> [ node2 ] // / \ <- different volume mode / \ // [volume1] [volume2] [volume1] [volume2] - It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() { + ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() { if pattern.VolMode == v1.PersistentVolumeFilesystem { framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping") } @@ -267,7 +267,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // [ node1 ] // \ / <- same volume mode // [volume1] - It("should concurrently access the single volume from pods on the same node", func() { + ginkgo.It("should concurrently access the single volume from pods on the same node", func() { init() defer cleanup() @@ -291,7 +291,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // [ node1 ] [ node2 ] // \ / <- same volume mode // [volume1] - It("should concurrently access the single volume from pods on different node", func() { + ginkgo.It("should concurrently access the single volume from pods on different node", func() { init() defer cleanup() @@ -324,7 +324,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter // If readSeedBase > 0, read test are done before write/read test assuming that there is already data written. func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string, node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string { - By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) + ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs, false, "", false, false, framework.SELinuxLabel, nil, node, framework.PodStartTimeout) @@ -338,18 +338,18 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n // CreateSecPodWithNodeSelection make volumes accessible via /mnt/volume({i} + 1) index := i + 1 path := fmt.Sprintf("/mnt/volume%d", index) - By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) + ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path) if readSeedBase > 0 { - By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) + ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i)) } - By(fmt.Sprintf("Checking if write to the volume%d works properly", index)) + ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index)) utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i)) - By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) + ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i)) } @@ -397,7 +397,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int // Create each pod with pvc for i := 0; i < numPods; i++ { index := i + 1 - By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node)) + ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node)) pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, @@ -425,11 +425,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int // Check if volume can be accessed from each pod for i, pod := range pods { index := i + 1 - By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) + ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path) if i != 0 { - By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1)) + ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1)) // For 1st pod, no one has written data yet, so pass the read check utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) } @@ -437,10 +437,10 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int // Update the seed and check if write/read works properly seed = time.Now().UTC().UnixNano() - By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index)) + ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index)) utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) - By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index)) + ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index)) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) } @@ -456,24 +456,24 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int for i, pod := range pods { index := i + 1 // index of pod and index of pvc match, because pods are created above way - By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) + ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1") if i == 0 { // This time there should be data that last pod wrote, for 1st pod - By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index)) + ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index)) } else { - By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1)) + ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1)) } utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) // Update the seed and check if write/read works properly seed = time.Now().UTC().UnixNano() - By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index)) + ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index)) utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) - By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index)) + ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index)) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) } } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 160ac444f9f..2ea8ee109ff 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -99,7 +99,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte l local ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Check preconditions. if pattern.VolType != testpatterns.DynamicPV { framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType) @@ -150,7 +150,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) } - It("should provision storage with defaults", func() { + ginkgo.It("should provision storage with defaults", func() { init() defer cleanup() @@ -160,7 +160,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte l.testCase.TestDynamicProvisioning() }) - It("should provision storage with mount options", func() { + ginkgo.It("should provision storage with mount options", func() { if dInfo.SupportedMountOption == nil { framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) } @@ -175,7 +175,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte l.testCase.TestDynamicProvisioning() }) - It("should access volume from different nodes", func() { + ginkgo.It("should access volume from different nodes", func() { init() defer cleanup() @@ -198,7 +198,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte l.testCase.TestDynamicProvisioning() }) - It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { + ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { if !dInfo.Capabilities[CapDataSource] { framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) } @@ -218,7 +218,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte l.pvc.Spec.DataSource = dataSource l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { - By("checking whether the created volume has the pre-populated data") + ginkgo.By("checking whether the created volume has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName}) } @@ -229,19 +229,19 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { client := t.Client - Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required") + gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required") claim := t.Claim - Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required") + gomega.Expect(claim).NotTo(gomega.BeNil(), "StorageClassTest.Claim is required") class := t.Class var err error if class != nil { - Expect(*claim.Spec.StorageClassName).To(Equal(class.Name)) - By("creating a StorageClass " + class.Name) + gomega.Expect(*claim.Spec.StorageClassName).To(gomega.Equal(class.Name)) + ginkgo.By("creating a StorageClass " + class.Name) _, err = client.StorageV1().StorageClasses().Create(class) // The "should provision storage with snapshot data source" test already has created the class. // TODO: make class creation optional and remove the IsAlreadyExists exception - Expect(err == nil || apierrs.IsAlreadyExists(err)).To(Equal(true)) + gomega.Expect(err == nil || apierrs.IsAlreadyExists(err)).To(gomega.Equal(true)) class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) framework.ExpectNoError(err) defer func() { @@ -250,7 +250,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { }() } - By("creating a claim") + ginkgo.By("creating a claim") claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) framework.ExpectNoError(err) defer func() { @@ -269,7 +269,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { pv := t.checkProvisioning(client, claim, class) - By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) + ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) // Wait for the PV to get deleted if reclaim policy is Delete. (If it's @@ -280,7 +280,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { // in a couple of minutes. Wait 20 minutes to recover from random cloud // hiccups. if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { - By(fmt.Sprintf("deleting the claim's PV %q", pv.Name)) + ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute)) } @@ -292,24 +292,24 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("checking the claim") + ginkgo.By("checking the claim") pv, err := framework.GetBoundPV(client, claim) framework.ExpectNoError(err) // Check sizes expectedCapacity := resource.MustParse(t.ExpectedSize) pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] - Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity") + gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity") requestedCapacity := resource.MustParse(t.ClaimSize) claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity") + gomega.Expect(claimCapacity.Value()).To(gomega.Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity") // Check PV properties - By("checking the PV") + ginkgo.By("checking the PV") // Every access mode in PV should be in PVC - Expect(pv.Spec.AccessModes).NotTo(BeZero()) + gomega.Expect(pv.Spec.AccessModes).NotTo(gomega.BeZero()) for _, pvMode := range pv.Spec.AccessModes { found := false for _, pvcMode := range claim.Spec.AccessModes { @@ -318,20 +318,20 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v break } } - Expect(found).To(BeTrue()) + gomega.Expect(found).To(gomega.BeTrue()) } - Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name)) - Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace)) + gomega.Expect(pv.Spec.ClaimRef.Name).To(gomega.Equal(claim.ObjectMeta.Name)) + gomega.Expect(pv.Spec.ClaimRef.Namespace).To(gomega.Equal(claim.ObjectMeta.Namespace)) if class == nil { - Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete)) + gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete)) } else { - Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy)) - Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions)) + gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*class.ReclaimPolicy)) + gomega.Expect(pv.Spec.MountOptions).To(gomega.Equal(class.MountOptions)) } if claim.Spec.VolumeMode != nil { - Expect(pv.Spec.VolumeMode).NotTo(BeNil()) - Expect(*pv.Spec.VolumeMode).To(Equal(*claim.Spec.VolumeMode)) + gomega.Expect(pv.Spec.VolumeMode).NotTo(gomega.BeNil()) + gomega.Expect(*pv.Spec.VolumeMode).To(gomega.Equal(*claim.Spec.VolumeMode)) } return pv } @@ -351,7 +351,7 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v // // This is a common test that can be called from a StorageClassTest.PvCheck. func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume { - By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) + ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) defer func() { @@ -369,7 +369,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent volume, err := framework.GetBoundPV(client, claim) framework.ExpectNoError(err) - By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) + ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) command = "grep 'hello world' /mnt/test/data" // We give the second pod the additional responsibility of checking the volume has @@ -403,7 +403,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent // // This is a common test that can be called from a StorageClassTest.PvCheck. func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) { - Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node") + gomega.Expect(node.Name).To(gomega.Equal(""), "this test only works when not locked onto a single node") var pod *v1.Pod defer func() { @@ -411,7 +411,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai StopPod(client, pod) }() - By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) + ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) @@ -424,7 +424,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai // Add node-anti-affinity. secondNode := node framework.SetAntiAffinity(&secondNode, actualNodeName) - By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) + ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) command = "grep 'hello world' /mnt/test/data" if framework.NodeOSDistroIs("windows") { command = "select-string 'hello world' /mnt/test/data" @@ -433,7 +433,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") - Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node") + gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node") StopPod(client, pod) pod = nil } @@ -448,15 +448,15 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[strin func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error - Expect(len(claims)).ToNot(Equal(0)) + gomega.Expect(len(claims)).ToNot(gomega.Equal(0)) namespace := claims[0].Namespace - By("creating a storage class " + t.Class.Name) + ginkgo.By("creating a storage class " + t.Class.Name) class, err := t.Client.StorageV1().StorageClasses().Create(t.Class) framework.ExpectNoError(err) defer deleteStorageClass(t.Client, class.Name) - By("creating claims") + ginkgo.By("creating claims") var claimNames []string var createdClaims []*v1.PersistentVolumeClaim for _, claim := range claims { @@ -481,12 +481,12 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P }() // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out - By("checking the claims are in pending state") + ginkgo.By("checking the claims are in pending state") err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) framework.ExpectError(err) verifyPVCsPending(t.Client, createdClaims) - By("creating a pod referring to the claims") + ginkgo.By("creating a pod referring to the claims") // Create a pod referring to the claim and wait for it to get to running var pod *v1.Pod if expectUnschedulable { @@ -509,7 +509,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) - By("re-checking the claims to see they binded") + ginkgo.By("re-checking the claims to see they binded") var pvs []*v1.PersistentVolume for _, claim := range createdClaims { // Get new copy of the claim @@ -523,7 +523,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P framework.ExpectNoError(err) pvs = append(pvs, pv) } - Expect(len(pvs)).To(Equal(len(createdClaims))) + gomega.Expect(len(pvs)).To(gomega.Equal(len(createdClaims))) return pvs, node } @@ -605,7 +605,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl // Get new copy of the claim claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) + gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending)) } } @@ -619,31 +619,31 @@ func prepareDataSourceForProvisioning( ) (*v1.TypedLocalObjectReference, func()) { var err error if class != nil { - By("[Initialize dataSource]creating a StorageClass " + class.Name) + ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name) _, err = client.StorageV1().StorageClasses().Create(class) framework.ExpectNoError(err) } - By("[Initialize dataSource]creating a initClaim") + ginkgo.By("[Initialize dataSource]creating a initClaim") updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim) framework.ExpectNoError(err) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("[Initialize dataSource]checking the initClaim") + ginkgo.By("[Initialize dataSource]checking the initClaim") // Get new copy of the initClaim _, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // write namespace to the /mnt/test (= the volume). - By("[Initialize dataSource]write data to volume") + ginkgo.By("[Initialize dataSource]write data to volume") command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace()) RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node) - By("[Initialize dataSource]creating a SnapshotClass") + ginkgo.By("[Initialize dataSource]creating a SnapshotClass") snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) - By("[Initialize dataSource]creating a snapshot") + ginkgo.By("[Initialize dataSource]creating a snapshot") snapshot := getSnapshot(updatedClaim.Name, updatedClaim.Namespace, snapshotClass.GetName()) snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Create(snapshot, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -651,7 +651,7 @@ func prepareDataSourceForProvisioning( WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) framework.ExpectNoError(err) - By("[Initialize dataSource]checking the snapshot") + ginkgo.By("[Initialize dataSource]checking the snapshot") // Get new copy of the snapshot snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 70f89e912cb..f5a2be989b3 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -82,9 +82,9 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt dDriver DynamicPVTestDriver ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Check preconditions. - Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot)) + gomega.Expect(pattern.SnapshotType).To(gomega.Equal(testpatterns.DynamicCreatedSnapshot)) dInfo := driver.GetDriverInfo() ok := false sDriver, ok = driver.(SnapshottableTestDriver) @@ -103,7 +103,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt // f must run inside an It or Context callback. f := framework.NewDefaultFramework("snapshotting") - It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { + ginkgo.It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { cs := f.ClientSet dc := f.DynamicClient @@ -122,7 +122,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt pvc.Spec.StorageClassName = &class.Name e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc) - By("creating a StorageClass " + class.Name) + ginkgo.By("creating a StorageClass " + class.Name) class, err := cs.StorageV1().StorageClasses().Create(class) framework.ExpectNoError(err) defer func() { @@ -130,7 +130,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil)) }() - By("creating a claim") + ginkgo.By("creating a claim") pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err) defer func() { @@ -144,7 +144,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("checking the claim") + ginkgo.By("checking the claim") // Get new copy of the claim pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -153,7 +153,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) - By("creating a SnapshotClass") + ginkgo.By("creating a SnapshotClass") vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { @@ -161,7 +161,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil)) }() - By("creating a snapshot") + ginkgo.By("creating a snapshot") snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName()) snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) @@ -177,7 +177,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) framework.ExpectNoError(err) - By("checking the snapshot") + ginkgo.By("checking the snapshot") // Get new copy of the snapshot snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) @@ -193,11 +193,11 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{}) // Check SnapshotContent properties - By("checking the SnapshotContent") - Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName())) - Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName())) - Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace())) - Expect(persistentVolumeRef["name"]).To(Equal(pv.Name)) + ginkgo.By("checking the SnapshotContent") + gomega.Expect(snapshotContentSpec["snapshotClassName"]).To(gomega.Equal(vsc.GetName())) + gomega.Expect(volumeSnapshotRef["name"]).To(gomega.Equal(snapshot.GetName())) + gomega.Expect(volumeSnapshotRef["namespace"]).To(gomega.Equal(snapshot.GetNamespace())) + gomega.Expect(persistentVolumeRef["name"]).To(gomega.Equal(pv.Name)) }) } diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 02a9444601f..8cdfc73e991 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -34,8 +34,8 @@ import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var ( @@ -147,9 +147,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T cleanup := func() { if l.pod != nil { - By("Deleting pod") + ginkgo.By("Deleting pod") err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) - Expect(err).ToNot(HaveOccurred(), "while deleting pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting pod") l.pod = nil } @@ -166,7 +166,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps) } - It("should support non-existent path", func() { + ginkgo.It("should support non-existent path", func() { init() defer cleanup() @@ -177,7 +177,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testReadFile(f, l.filePathInVolume, l.pod, 1) }) - It("should support existing directory", func() { + ginkgo.It("should support existing directory", func() { init() defer cleanup() @@ -191,7 +191,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testReadFile(f, l.filePathInVolume, l.pod, 1) }) - It("should support existing single file", func() { + ginkgo.It("should support existing single file", func() { init() defer cleanup() @@ -202,7 +202,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testReadFile(f, l.filePathInSubpath, l.pod, 0) }) - It("should support file as subpath", func() { + ginkgo.It("should support file as subpath", func() { init() defer cleanup() @@ -212,7 +212,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T TestBasicSubpath(f, f.Namespace.Name, l.pod) }) - It("should fail if subpath directory is outside the volume [Slow]", func() { + ginkgo.It("should fail if subpath directory is outside the volume [Slow]", func() { init() defer cleanup() @@ -223,7 +223,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodFailSubpath(f, l.pod, false) }) - It("should fail if subpath file is outside the volume [Slow]", func() { + ginkgo.It("should fail if subpath file is outside the volume [Slow]", func() { init() defer cleanup() @@ -234,7 +234,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodFailSubpath(f, l.pod, false) }) - It("should fail if non-existent subpath is outside the volume [Slow]", func() { + ginkgo.It("should fail if non-existent subpath is outside the volume [Slow]", func() { init() defer cleanup() @@ -245,7 +245,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodFailSubpath(f, l.pod, false) }) - It("should fail if subpath with backstepping is outside the volume [Slow]", func() { + ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow]", func() { init() defer cleanup() @@ -256,7 +256,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodFailSubpath(f, l.pod, false) }) - It("should support creating multiple subpath from same volumes [Slow]", func() { + ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func() { init() defer cleanup() @@ -282,7 +282,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testMultipleReads(f, l.pod, 0, filepath1, filepath2) }) - It("should support restarting containers using directory as subpath [Slow]", func() { + ginkgo.It("should support restarting containers using directory as subpath [Slow]", func() { init() defer cleanup() @@ -292,7 +292,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodContainerRestart(f, l.pod) }) - It("should support restarting containers using file as subpath [Slow]", func() { + ginkgo.It("should support restarting containers using file as subpath [Slow]", func() { init() defer cleanup() @@ -302,14 +302,14 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodContainerRestart(f, l.pod) }) - It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { + ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { init() defer cleanup() testSubpathReconstruction(f, l.pod, false) }) - It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { + ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { init() defer cleanup() @@ -321,7 +321,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testSubpathReconstruction(f, l.pod, true) }) - It("should support readOnly directory specified in the volumeMount", func() { + ginkgo.It("should support readOnly directory specified in the volumeMount", func() { init() defer cleanup() @@ -336,7 +336,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testReadFile(f, l.filePathInSubpath, l.pod, 0) }) - It("should support readOnly file specified in the volumeMount", func() { + ginkgo.It("should support readOnly file specified in the volumeMount", func() { init() defer cleanup() @@ -351,7 +351,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testReadFile(f, volumePath, l.pod, 0) }) - It("should support existing directories when readOnly specified in the volumeSource", func() { + ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func() { init() defer cleanup() if l.roVolSource == nil { @@ -379,7 +379,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testReadFile(f, l.filePathInSubpath, l.pod, 0) }) - It("should verify container cannot write to subpath readonly volumes [Slow]", func() { + ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func() { init() defer cleanup() if l.roVolSource == nil { @@ -399,7 +399,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T testPodFailSubpath(f, l.pod, true) }) - It("should be able to unmount after the subpath directory is deleted", func() { + ginkgo.It("should be able to unmount after the subpath directory is deleted", func() { init() defer cleanup() @@ -407,23 +407,23 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} - By(fmt.Sprintf("Creating pod %s", l.pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) removeUnusedContainers(l.pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod") defer func() { - By(fmt.Sprintf("Deleting pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) framework.DeletePodWithWait(f, f.ClientSet, pod) }() // Wait for pod to be running err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod) - Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running") // Exec into container that mounted the volume, delete subpath directory rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir) _, err = podContainerExec(l.pod, 1, rmCmd) - Expect(err).ToNot(HaveOccurred(), "while removing subpath directory") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while removing subpath directory") // Delete pod (from defer) and wait for it to be successfully deleted }) @@ -440,11 +440,11 @@ func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) { func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) { setReadCommand(filepath, &pod.Spec.Containers[0]) - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) - By(fmt.Sprintf("Deleting pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) err := framework.DeletePodWithWait(f, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting pod") } @@ -672,7 +672,7 @@ func addMultipleWrites(container *v1.Container, file1 string, file2 string) { } func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{ "content of file \"" + file1 + "\": mount-tester new file", @@ -690,13 +690,13 @@ func setReadCommand(file string, container *v1.Container) { func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) { setReadCommand(file, &pod.Spec.Containers[containerIndex]) - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) f.TestContainerOutput("subpath", pod, containerIndex, []string{ "content of file \"" + file + "\": mount-tester new file", }) - By(fmt.Sprintf("Deleting pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) err := framework.DeletePodWithWait(f, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting pod") } @@ -706,14 +706,14 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTermi } func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) { - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod") defer func() { framework.DeletePodWithWait(f, f.ClientSet, pod) }() - By("Checking for subpath error in container status") + ginkgo.By("Checking for subpath error in container status") err = waitForPodSubpathError(f, pod, allowContainerTerminationError) framework.ExpectNoError(err, "while waiting for subpath failure") } @@ -786,23 +786,23 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { } // Start pod - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod") defer func() { framework.DeletePodWithWait(f, f.ClientSet, pod) }() err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running") - By("Failing liveness probe") + ginkgo.By("Failing liveness probe") out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath)) e2elog.Logf("Pod exec output: %v", out) - Expect(err).ToNot(HaveOccurred(), "while failing liveness probe") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while failing liveness probe") // Check that container has restarted - By("Waiting for container to restart") + ginkgo.By("Waiting for container to restart") restarts := int32(0) err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) @@ -821,17 +821,17 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { } return false, nil }) - Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to restart") // Fix liveness probe - By("Rewriting the file") + ginkgo.By("Rewriting the file") writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath) out, err = podContainerExec(pod, 1, writeCmd) e2elog.Logf("Pod exec output: %v", out) - Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while rewriting the probe file") // Wait for container restarts to stabilize - By("Waiting for container to stop restarting") + ginkgo.By("Waiting for container to stop restarting") stableCount := int(0) stableThreshold := int(time.Minute / framework.Poll) err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { @@ -857,7 +857,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { } return false, nil }) - Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to stabilize") } func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) { @@ -874,30 +874,30 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete gracePeriod := int64(30) pod.Spec.TerminationGracePeriodSeconds = &gracePeriod - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod") err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running") pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred(), "while getting pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while getting pod") utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true) } func formatVolume(f *framework.Framework, pod *v1.Pod) { - By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating volume init pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating volume init pod") err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) - Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for volume init pod to succeed") err = framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod") + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting volume init pod") } func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) { diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 2db0b320d58..6bfe5332f71 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -29,7 +29,7 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -126,7 +126,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns. validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) } - It("should write files of various sizes, verify size, validate content [Slow]", func() { + ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func() { init() defer cleanup() @@ -230,7 +230,7 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc // Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file. func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error { - By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) + ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) loopCnt := fsize / testpatterns.MinFileSize writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath) _, err := utils.PodExec(pod, writeCmd) @@ -240,7 +240,7 @@ func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error { // Verify that the test file is the expected size and contains the expected content. func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error { - By("verifying file size") + ginkgo.By("verifying file size") rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath)) if err != nil || rtnstr == "" { return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err) @@ -253,7 +253,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize) } - By("verifying file hash") + ginkgo.By("verifying file hash") rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) if err != nil { return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err) @@ -274,7 +274,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err // Delete `fpath` to save some disk space on host. Delete errors are logged but ignored. func deleteFile(pod *v1.Pod, fpath string) { - By(fmt.Sprintf("deleting test file %s...", fpath)) + ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath)) _, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath)) if err != nil { // keep going, the test dir will be deleted when the volume is unmounted @@ -299,7 +299,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume. clientPod := makePodSpec(config, initCmd, volsrc, podSecContext) - By(fmt.Sprintf("starting %s", clientPod.Name)) + ginkgo.By(fmt.Sprintf("starting %s", clientPod.Name)) podsNamespacer := cs.CoreV1().Pods(config.Namespace) clientPod, err = podsNamespacer.Create(clientPod) if err != nil { @@ -307,7 +307,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume. } defer func() { deleteFile(clientPod, ddInput) - By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) + ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) e := framework.DeletePodWithWait(f, cs, clientPod) if e != nil { e2elog.Logf("client pod failed to delete: %v", e) diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 4d0b89af299..f13b295916e 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -19,8 +19,7 @@ package testsuites import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -166,17 +165,17 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern switch pattern.VolType { case testpatterns.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { - It("should fail to create pod by failing to mount volume [Slow]", func() { + ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() { init() defer cleanup() var err error - By("Creating sc") + ginkgo.By("Creating sc") l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) framework.ExpectNoError(err) - By("Creating pv and pvc") + ginkgo.By("Creating pv and pvc") l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) framework.ExpectNoError(err) @@ -187,27 +186,27 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) - By("Creating pod") + ginkgo.By("Creating pod") pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) defer func() { framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) }() - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) }) } else { - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { init() defer cleanup() var err error - By("Creating sc") + ginkgo.By("Creating sc") l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) framework.ExpectNoError(err) - By("Creating pv and pvc") + ginkgo.By("Creating pv and pvc") l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) framework.ExpectNoError(err) @@ -218,7 +217,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) - By("Creating pod") + ginkgo.By("Creating pod") pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) @@ -227,45 +226,45 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern }() framework.ExpectNoError(err) - By("Checking if persistent volume exists as expected volume mode") + ginkgo.By("Checking if persistent volume exists as expected volume mode") utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") - By("Checking if read/write to persistent volume works properly") + ginkgo.By("Checking if read/write to persistent volume works properly") utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") }) // TODO(mkimuram): Add more tests } case testpatterns.DynamicPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { - It("should fail in binding dynamic provisioned PV to PVC", func() { + ginkgo.It("should fail in binding dynamic provisioned PV to PVC", func() { init() defer cleanup() var err error - By("Creating sc") + ginkgo.By("Creating sc") l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) framework.ExpectNoError(err) - By("Creating pv and pvc") + ginkgo.By("Creating pv and pvc") l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) framework.ExpectNoError(err) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) }) } else { - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { init() defer cleanup() var err error - By("Creating sc") + ginkgo.By("Creating sc") l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) framework.ExpectNoError(err) - By("Creating pv and pvc") + ginkgo.By("Creating pv and pvc") l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) framework.ExpectNoError(err) @@ -278,7 +277,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) - By("Creating pod") + ginkgo.By("Creating pod") pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) @@ -287,10 +286,10 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern }() framework.ExpectNoError(err) - By("Checking if persistent volume exists as expected volume mode") + ginkgo.By("Checking if persistent volume exists as expected volume mode") utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") - By("Checking if read/write to persistent volume works properly") + ginkgo.By("Checking if read/write to persistent volume works properly") utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") }) // TODO(mkimuram): Add more tests diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 53870860a22..69290a485da 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -24,7 +24,7 @@ package testsuites import ( "fmt" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -140,7 +140,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) } - It("should be mountable", func() { + ginkgo.It("should be mountable", func() { skipPersistenceTest(driver) init() defer func() { @@ -171,7 +171,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) }) - It("should allow exec of files on the volume", func() { + ginkgo.It("should allow exec of files on the volume", func() { skipExecTest(driver) init() defer cleanup() @@ -229,10 +229,10 @@ func testScriptInPod( NodeName: config.ClientNodeName, }, } - By(fmt.Sprintf("Creating pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName}) - By(fmt.Sprintf("Deleting pod %s", pod.Name)) + ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) err := framework.DeletePodWithWait(f, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting pod") } diff --git a/test/e2e/storage/utils/local.go b/test/e2e/storage/utils/local.go index 6f9c4d1130f..93a0a9ff1e4 100644 --- a/test/e2e/storage/utils/local.go +++ b/test/e2e/storage/utils/local.go @@ -25,7 +25,7 @@ import ( "path/filepath" "strings" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" @@ -99,7 +99,7 @@ func (l *ltrMgr) getTestDir() string { func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource { hostDir := l.getTestDir() - By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir)) + ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir)) err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node) framework.ExpectNoError(err) return &LocalTestResource{ @@ -109,18 +109,18 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri } func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) { - By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path)) + ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path)) err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node) framework.ExpectNoError(err) - By("Removing the test directory") + ginkgo.By("Removing the test directory") err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node) framework.ExpectNoError(err) } // createAndSetupLoopDevice creates an empty file and associates a loop devie with it. func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) { - By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir)) + ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir)) mkdirCmd := fmt.Sprintf("mkdir -p %s", dir) count := size / 4096 // xfs requires at least 4096 blocks @@ -155,7 +155,7 @@ func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]stri // teardownLoopDevice tears down loop device by its associated storage directory. func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) { loopDev := l.findLoopDevice(dir, node) - By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir)) + ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir)) losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev) err := l.hostExec.IssueCommand(losetupDeleteCmd, node) framework.ExpectNoError(err) @@ -164,7 +164,7 @@ func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) { func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) { l.teardownLoopDevice(ltr.loopDir, ltr.Node) - By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir)) + ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir)) removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir) err := l.hostExec.IssueCommand(removeCmd, ltr.Node) framework.ExpectNoError(err) @@ -204,7 +204,7 @@ func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string] } func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) { - By("Removing the test directory") + ginkgo.By("Removing the test directory") removeCmd := fmt.Sprintf("rm -r %s", ltr.Path) err := l.hostExec.IssueCommand(removeCmd, ltr.Node) framework.ExpectNoError(err) @@ -223,7 +223,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[str } func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) { - By("Removing the test directory") + ginkgo.By("Removing the test directory") hostDir := ltr.Path hostDirBackend := hostDir + "-backend" removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend) @@ -243,7 +243,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters } func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) { - By("Removing the test directory") + ginkgo.By("Removing the test directory") hostDir := ltr.Path removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir) err := l.hostExec.IssueCommand(removeCmd, ltr.Node) @@ -263,7 +263,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, paramet } func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) { - By("Removing the test directory") + ginkgo.By("Removing the test directory") hostDir := ltr.Path hostDirBackend := hostDir + "-backend" removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend) diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 5864ac10f2f..6d40cf976f1 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -25,8 +25,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -82,7 +82,7 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) { if err != nil { if err, ok := err.(uexec.CodeExitError); ok { actualExitCode := err.ExitStatus() - Expect(actualExitCode).To(Equal(exitCode), + gomega.Expect(actualExitCode).To(gomega.Equal(exitCode), "%q should fail with exit code %d, but failed with exit code %d and error message %q", bashExec, exitCode, actualExitCode, err) } else { @@ -91,7 +91,7 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) { bashExec, exitCode, err) } } - Expect(err).To(HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode) + gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode) } // KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits @@ -138,7 +138,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) e2essh.LogResult(sshResult) - Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) + gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) if kOp == KStop { if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { @@ -155,7 +155,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { break } } - Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet") + gomega.Expect(isPidChanged).To(gomega.BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet") e2elog.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") time.Sleep(30 * time.Second) } @@ -182,23 +182,23 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP)) e2essh.LogResult(sshResult) - Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID") - Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty") + gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID") + gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty") return sshResult.Stdout } // TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) { - By("Writing to the volume.") + ginkgo.By("Writing to the volume.") file := "/mnt/_SUCCESS" out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file)) e2elog.Logf(out) framework.ExpectNoError(err) - By("Restarting kubelet") + ginkgo.By("Restarting kubelet") KubeletCommand(KRestart, c, clientPod) - By("Testing that written file is accessible.") + ginkgo.By("Testing that written file is accessible.") out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file)) e2elog.Logf(out) framework.ExpectNoError(err) @@ -212,28 +212,28 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f framework.ExpectNoError(err) nodeIP = nodeIP + ":22" - By("Expecting the volume mount to be found.") + ginkgo.By("Expecting the volume mount to be found.") result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) if checkSubpath { - By("Expecting the volume subpath mount to be found.") + ginkgo.By("Expecting the volume subpath mount to be found.") result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) } // This command is to make sure kubelet is started after test finishes no matter it fails or not. defer func() { KubeletCommand(KStart, c, clientPod) }() - By("Stopping the kubelet.") + ginkgo.By("Stopping the kubelet.") KubeletCommand(KStop, c, clientPod) - By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) + ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0)) } else { @@ -241,7 +241,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f } framework.ExpectNoError(err) - By("Starting the kubelet and waiting for pod to delete.") + ginkgo.By("Starting the kubelet and waiting for pod to delete.") KubeletCommand(KStart, c, clientPod) err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout) if err != nil { @@ -254,19 +254,19 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f time.Sleep(30 * time.Second) } - By("Expecting the volume mount not to be found.") + ginkgo.By("Expecting the volume mount not to be found.") result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") + gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName) if checkSubpath { - By("Expecting the volume subpath mount not to be found.") + ginkgo.By("Expecting the volume subpath mount not to be found.") result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") + gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName) } } @@ -394,7 +394,7 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) - By("locating the provisioner pod") + ginkgo.By("locating the provisioner pod") pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) @@ -411,7 +411,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, } roleBindingClient := client.RbacV1().RoleBindings(namespace) for _, saName := range saNames { - By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName)) + ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName)) binding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "psp-" + saName, diff --git a/test/e2e/storage/volume_expand.go b/test/e2e/storage/volume_expand.go index 02d7dc25d78..6042ce2c60d 100644 --- a/test/e2e/storage/volume_expand.go +++ b/test/e2e/storage/volume_expand.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" @@ -51,7 +51,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { ) f := framework.NewDefaultFramework("volume-expand") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name @@ -82,39 +82,39 @@ var _ = utils.SIGDescribe("Volume expand", func() { return tPVC, sc, nil } - AfterEach(func() { + ginkgo.AfterEach(func() { framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace)) framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(storageClassVar.Name, nil)) }) - It("should not allow expansion of pvcs without AllowVolumeExpansion property", func() { + ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func() { pvc, storageClassVar, err = setupFunc(false /* allowExpansion */, false /*BlockVolume*/) framework.ExpectNoError(err, "Error creating non-expandable PVC") - Expect(storageClassVar.AllowVolumeExpansion).To(BeNil()) + gomega.Expect(storageClassVar.AllowVolumeExpansion).To(gomega.BeNil()) pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) - By("Expanding non-expandable pvc") + ginkgo.By("Expanding non-expandable pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) - Expect(err).To(HaveOccurred(), "While updating non-expandable PVC") + gomega.Expect(err).To(gomega.HaveOccurred(), "While updating non-expandable PVC") }) - It("Verify if editing PVC allows resize", func() { + ginkgo.It("Verify if editing PVC allows resize", func() { pvc, storageClassVar, err = setupFunc(true /* allowExpansion */, false /*BlockVolume*/) framework.ExpectNoError(err, "Error creating non-expandable PVC") - By("Waiting for pvc to be in bound phase") + ginkgo.By("Waiting for pvc to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) - By("Creating a pod with dynamically provisioned volume") + ginkgo.By("Creating a pod with dynamically provisioned volume") pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") framework.ExpectNoError(err, "While creating pods for resizing") defer func() { @@ -122,34 +122,34 @@ var _ = utils.SIGDescribe("Volume expand", func() { framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") }() - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvc.Name) } - By("Waiting for cloudprovider resize to finish") + ginkgo.By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") - By("Checking for conditions on pvc") + ginkgo.By("Checking for conditions on pvc") pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While fetching pvc after controller resize") inProgressConditions := pvc.Status.Conditions - Expect(len(inProgressConditions)).To(Equal(1), "pvc must have file system resize pending condition") - Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition") + gomega.Expect(len(inProgressConditions)).To(gomega.Equal(1), "pvc must have file system resize pending condition") + gomega.Expect(inProgressConditions[0].Type).To(gomega.Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition") - By("Deleting the previously created pod") + ginkgo.By("Deleting the previously created pod") err = framework.DeletePodWithWait(f, c, pod) framework.ExpectNoError(err, "while deleting pod for resizing") - By("Creating a new pod with same volume") + ginkgo.By("Creating a new pod with same volume") pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") framework.ExpectNoError(err, "while recreating pod for resizing") defer func() { @@ -157,44 +157,44 @@ var _ = utils.SIGDescribe("Volume expand", func() { framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") }() - By("Waiting for file system resize to finish") + ginkgo.By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") }) - It("should allow expansion of block volumes", func() { + ginkgo.It("should allow expansion of block volumes", func() { pvc, storageClassVar, err = setupFunc(true /*allowExpansion*/, true /*blockVolume*/) - By("Waiting for pvc to be in bound phase") + ginkgo.By("Waiting for pvc to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - Expect(len(pvs)).To(Equal(1)) + gomega.Expect(len(pvs)).To(gomega.Equal(1)) - By("Expanding current pvc") + ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") - Expect(pvc).NotTo(BeNil()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvc.Name) } - By("Waiting for cloudprovider resize to finish") + ginkgo.By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") - By("Waiting for file system resize to finish") + ginkgo.By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions - Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions") }) }) diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go index 3d17d357523..97c82b25907 100644 --- a/test/e2e/storage/volume_limits.go +++ b/test/e2e/storage/volume_limits.go @@ -17,7 +17,7 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" @@ -30,13 +30,13 @@ var _ = utils.SIGDescribe("Volume limits", func() { c clientset.Interface ) f := framework.NewDefaultFramework("volume-limits-on-node") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("aws", "gce", "gke") c = f.ClientSet framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) - It("should verify that all nodes have volume limits", func() { + ginkgo.It("should verify that all nodes have volume limits", func() { nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if len(nodeList.Items) == 0 { framework.Failf("Unable to find ready and schedulable Node") diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 3858e3d1e9b..a077a5aeba9 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/prometheus/common/model" v1 "k8s.io/api/core/v1" @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ) f := framework.NewDefaultFramework("pv") - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name var err error @@ -73,7 +73,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) if err != nil { e2elog.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err) @@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } }) - It("should create prometheus metrics for volume provisioning and attach/detach", func() { + ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func() { var err error if !metricsGrabber.HasRegisteredMaster() { @@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err) - Expect(pvc).ToNot(Equal(nil)) + gomega.Expect(pvc).ToNot(gomega.Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} @@ -123,8 +123,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber) - Expect(len(updatedStorageMetrics.latencyMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics") - Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics") + gomega.Expect(len(updatedStorageMetrics.latencyMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics") + gomega.Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics") volumeOperations := []string{"volume_provision", "volume_detach", "volume_attach"} @@ -133,7 +133,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } }) - It("should create prometheus metrics for volume provisioning errors [Slow]", func() { + ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func() { var err error if !metricsGrabber.HasRegisteredMaster() { @@ -146,7 +146,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { storageOpMetrics := getControllerStorageMetrics(controllerMetrics) - By("Creating an invalid storageclass") + ginkgo.By("Creating an invalid storageclass") defaultClass, err := c.StorageV1().StorageClasses().Get(defaultScName, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting default storageclass: %v", err) @@ -165,35 +165,35 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pvc.Spec.StorageClassName = &invalidSc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) - Expect(pvc).ToNot(Equal(nil)) + gomega.Expect(pvc).ToNot(gomega.Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} - By("Creating a pod and expecting it to fail") + ginkgo.By("Creating a pod and expecting it to fail") pod := framework.MakePod(ns, nil, claims, false, "") pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) - By("Checking failure metrics") + ginkgo.By("Checking failure metrics") updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager() framework.ExpectNoError(err, "failed to get controller manager metrics") updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics) - Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics") + gomega.Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics") verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true) }) - It("should create volume metrics with the correct PVC ref", func() { + ginkgo.It("should create volume metrics with the correct PVC ref", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err) - Expect(pvc).ToNot(Equal(nil)) + gomega.Expect(pvc).ToNot(gomega.Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") @@ -239,18 +239,18 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { for _, key := range volumeStatKeys { kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) found := findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics) - Expect(found).To(BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName) + gomega.Expect(found).To(gomega.BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName) } e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) }) - It("should create metrics for total time taken in volume operations in P/V Controller", func() { + ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err) - Expect(pvc).ToNot(Equal(nil)) + gomega.Expect(pvc).ToNot(gomega.Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") @@ -271,17 +271,17 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { metricKey := "volume_operation_total_seconds_count" dimensions := []string{"operation_name", "plugin_name"} valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...) - Expect(valid).To(BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey) + gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) }) - It("should create volume metrics in Volume Manager", func() { + ginkgo.It("should create volume metrics in Volume Manager", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err) - Expect(pvc).ToNot(Equal(nil)) + gomega.Expect(pvc).ToNot(gomega.Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") @@ -301,17 +301,17 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { totalVolumesKey := "volume_manager_total_volumes" dimensions := []string{"state", "plugin_name"} valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...) - Expect(valid).To(BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey) + gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) }) - It("should create metrics for total number of volumes in A/D Controller", func() { + ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) framework.ExpectNoError(err) - Expect(pvc).ToNot(Equal(nil)) + gomega.Expect(pvc).ToNot(gomega.Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") @@ -339,7 +339,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Forced detach metric should be present forceDetachKey := "attachdetach_controller_forced_detaches" _, ok := updatedControllerMetrics[forceDetachKey] - Expect(ok).To(BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey) + gomega.Expect(ok).To(gomega.BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey) // Wait and validate totalVolumesKey := "attachdetach_controller_total_volumes" @@ -357,7 +357,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } for pluginName, numVolumes := range updatedStates[stateName] { oldNumVolumes := oldStates[stateName][pluginName] - Expect(numVolumes).To(BeNumerically(">=", oldNumVolumes), + gomega.Expect(numVolumes).To(gomega.BeNumerically(">=", oldNumVolumes), "Wrong number of volumes in state %q, plugin %q: wanted >=%d, got %d", stateName, pluginName, oldNumVolumes, numVolumes) } @@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { }) // Test for pv controller metrics, concretely: bound/unbound pv/pvc count. - Describe("PVController", func() { + ginkgo.Describe("PVController", func() { const ( classKey = "storage_class" namespaceKey = "namespace" @@ -414,7 +414,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // should be 4, and the elements should be bound pv count, unbound pv count, bound // pvc count, unbound pvc count in turn. validator := func(metricValues []map[string]int64) { - Expect(len(metricValues)).To(Equal(4), + gomega.Expect(len(metricValues)).To(gomega.Equal(4), "Wrong metric size: %d", len(metricValues)) controllerMetrics, err := metricsGrabber.GrabFromControllerManager() @@ -430,13 +430,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // test suit are equal to expectValues. actualValues := calculateRelativeValues(originMetricValues[i], getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension)) - Expect(actualValues).To(Equal(expectValues), + gomega.Expect(actualValues).To(gomega.Equal(expectValues), "Wrong pv controller metric %s(%s): wanted %v, got %v", metric.name, metric.dimension, expectValues, actualValues) } } - BeforeEach(func() { + ginkgo.BeforeEach(func() { if !metricsGrabber.HasRegisteredMaster() { framework.Skipf("Environment does not support getting controller-manager metrics - skipping") } @@ -453,7 +453,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { if err := framework.DeletePersistentVolume(c, pv.Name); err != nil { framework.Failf("Error deleting pv: %v", err) } @@ -465,11 +465,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { originMetricValues = nil }) - It("should create none metrics for pvc controller before creating any PV or PVC", func() { + ginkgo.It("should create none metrics for pvc controller before creating any PV or PVC", func() { validator([]map[string]int64{nil, nil, nil, nil}) }) - It("should create unbound pv count metrics for pvc controller after creating pv only", + ginkgo.It("should create unbound pv count metrics for pvc controller after creating pv only", func() { var err error pv, err = framework.CreatePV(c, pv) @@ -478,7 +478,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { validator([]map[string]int64{nil, {className: 1}, nil, nil}) }) - It("should create unbound pvc count metrics for pvc controller after creating pvc only", + ginkgo.It("should create unbound pvc count metrics for pvc controller after creating pvc only", func() { var err error pvc, err = framework.CreatePVC(c, ns, pvc) @@ -487,7 +487,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { validator([]map[string]int64{nil, nil, nil, {ns: 1}}) }) - It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc", + ginkgo.It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc", func() { var err error pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) @@ -578,10 +578,10 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN newLatencyCount, ok := newMetrics.latencyMetrics[metricName] if !expectFailure { - Expect(ok).To(BeTrue(), "Error getting updated latency metrics for %s", metricName) + gomega.Expect(ok).To(gomega.BeTrue(), "Error getting updated latency metrics for %s", metricName) } newStatusCounts, ok := newMetrics.statusMetrics[metricName] - Expect(ok).To(BeTrue(), "Error getting updated status metrics for %s", metricName) + gomega.Expect(ok).To(gomega.BeTrue(), "Error getting updated status metrics for %s", metricName) newStatusCount := int64(0) if expectFailure { @@ -594,9 +594,9 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN // even if the test is run serially. We really just verify if new count // is greater than old count if !expectFailure { - Expect(newLatencyCount).To(BeNumerically(">", oldLatencyCount), "New latency count %d should be more than old count %d for action %s", newLatencyCount, oldLatencyCount, metricName) + gomega.Expect(newLatencyCount).To(gomega.BeNumerically(">", oldLatencyCount), "New latency count %d should be more than old count %d for action %s", newLatencyCount, oldLatencyCount, metricName) } - Expect(newStatusCount).To(BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName) + gomega.Expect(newStatusCount).To(gomega.BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName) } func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) *storageControllerMetrics { @@ -659,7 +659,7 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string } } } - Expect(errCount).To(Equal(0), "Found invalid samples") + gomega.Expect(errCount).To(gomega.Equal(0), "Found invalid samples") return found } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 6b65055951d..a07297ad09f 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -21,8 +21,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" @@ -68,7 +68,7 @@ func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZo // with key LabelZoneFailureDomain in PV's node affinity contains zone // matchZones is used to indicate if zones should match perfectly func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) { - By("checking PV's zone label and node affinity terms match expected zone") + ginkgo.By("checking PV's zone label and node affinity terms match expected zone") if pv == nil { framework.Failf("nil pv passed") } @@ -222,7 +222,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop topoZone = getRandomClusterZone(c) addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone) } - By(action) + ginkgo.By(action) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) @@ -253,13 +253,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) - Describe("DynamicProvisioner [Slow]", func() { - It("should provision storage with different parameters", func() { + ginkgo.Describe("DynamicProvisioner [Slow]", func() { + ginkgo.It("should provision storage with different parameters", func() { // This test checks that dynamic provisioning can provision a volume // that can be used to persist data among pods. @@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkGCEPD(volume, "pd-ssd") framework.ExpectNoError(err, "checkGCEPD pd-ssd") @@ -294,7 +294,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkGCEPD(volume, "pd-standard") framework.ExpectNoError(err, "checkGCEPD pd-standard") @@ -313,7 +313,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "gp2", false) framework.ExpectNoError(err, "checkAWSEBS gp2") @@ -331,7 +331,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "4Gi", // 4 GiB is minimum for io1 PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "io1", false) framework.ExpectNoError(err, "checkAWSEBS io1") @@ -348,7 +348,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "500Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "sc1", false) framework.ExpectNoError(err, "checkAWSEBS sc1") @@ -365,7 +365,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "500Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "st1", false) framework.ExpectNoError(err, "checkAWSEBS st1") @@ -382,7 +382,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "1Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "gp2", true) framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted") @@ -454,7 +454,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // Remember the last supported test for subsequent test of beta API betaTest = &test - By("Testing " + test.Name) + ginkgo.By("Testing " + test.Name) suffix := fmt.Sprintf("%d", i) test.Client = c test.Class = newStorageClass(test, ns, suffix) @@ -465,7 +465,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // Run the last test with storage.k8s.io/v1beta1 on pvc if betaTest != nil { - By("Testing " + betaTest.Name + " with beta volume provisioning") + ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning") class := newBetaStorageClass(*betaTest, "beta") // we need to create the class manually, testDynamicProvisioning does not accept beta class class, err := c.StorageV1beta1().StorageClasses().Create(class) @@ -480,7 +480,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } }) - It("should provision storage with non-default reclaim policy Retain", func() { + ginkgo.It("should provision storage with non-default reclaim policy Retain", func() { framework.SkipUnlessProviderIs("gce", "gke") test := testsuites.StorageClassTest{ @@ -495,7 +495,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "1Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) - Expect(volume).NotTo(BeNil(), "get bound PV") + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkGCEPD(volume, "pd-standard") framework.ExpectNoError(err, "checkGCEPD") @@ -508,22 +508,22 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { test.Claim.Spec.StorageClassName = &test.Class.Name pv := test.TestDynamicProvisioning() - By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) + ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) - By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name)) + ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name)) framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName)) - By(fmt.Sprintf("deleting the PV %q", pv.Name)) + ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name)) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) }) - It("should not provision a volume in an unmanaged GCE zone.", func() { + ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() { framework.SkipUnlessProviderIs("gce", "gke") var suffix string = "unmananged" - By("Discovering an unmanaged zone") + ginkgo.By("Discovering an unmanaged zone") allZones := sets.NewString() // all zones in the project managedZones := sets.NewString() // subset of allZones @@ -550,7 +550,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.Skipf("No unmanaged zones found.") } - By("Creating a StorageClass for the unmanaged zone") + ginkgo.By("Creating a StorageClass for the unmanaged zone") test := testsuites.StorageClassTest{ Name: "unmanaged_zone", Provisioner: "kubernetes.io/gce-pd", @@ -562,7 +562,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) defer deleteStorageClass(c, sc.Name) - By("Creating a claim and expecting it to timeout") + ginkgo.By("Creating a claim and expecting it to timeout") pvc := newClaim(test, ns, suffix) pvc.Spec.StorageClassName = &sc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) @@ -573,11 +573,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) e2elog.Logf(err.Error()) }) - It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { + ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { // This case tests for the regressions of a bug fixed by PR #21268 // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV // not being deleted. @@ -587,7 +587,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { const raceAttempts int = 100 var residualPVs []*v1.PersistentVolume - By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts)) + ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts)) test := testsuites.StorageClassTest{ Name: "deletion race", Provisioner: "", // Use a native one based on current cloud provider @@ -609,7 +609,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) } - By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) + ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) framework.ExpectNoError(err) // Cleanup the test resources before breaking @@ -626,18 +626,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { e2elog.Logf("0 PersistentVolumes remain.") }) - It("deletion should be idempotent", func() { + ginkgo.It("deletion should be idempotent", func() { // This test ensures that deletion of a volume is idempotent. // It creates a PV with Retain policy, deletes underlying AWS / GCE // volume and changes the reclaim policy to Delete. // PV controller should delete the PV even though the underlying volume // is already deleted. framework.SkipUnlessProviderIs("gce", "gke", "aws") - By("creating PD") + ginkgo.By("creating PD") diskName, err := framework.CreatePDWithRetry() framework.ExpectNoError(err) - By("creating PV") + ginkgo.By("creating PV") pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "volume-idempotent-delete-", @@ -680,29 +680,29 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { pv, err = c.CoreV1().PersistentVolumes().Create(pv) framework.ExpectNoError(err) - By("waiting for the PV to get Released") + ginkgo.By("waiting for the PV to get Released") err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout) framework.ExpectNoError(err) - By("deleting the PD") + ginkgo.By("deleting the PD") err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource) framework.ExpectNoError(err) - By("changing the PV reclaim policy") + ginkgo.By("changing the PV reclaim policy") pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete pv, err = c.CoreV1().PersistentVolumes().Update(pv) framework.ExpectNoError(err) - By("waiting for the PV to get deleted") + ginkgo.By("waiting for the PV to get deleted") err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout) framework.ExpectNoError(err) }) }) - Describe("DynamicProvisioner External", func() { - It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { + ginkgo.Describe("DynamicProvisioner External", func() { + ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { // external dynamic provisioner pods need additional permissions provided by the // persistent-volume-provisioner clusterrole and a leader-locking role serviceAccountName := "default" @@ -736,11 +736,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization") - By("creating an external dynamic provisioner pod") + ginkgo.By("creating an external dynamic provisioner pod") pod := utils.StartExternalProvisioner(c, ns, externalPluginName) defer framework.DeletePodOrFail(c, ns, pod.Name) - By("creating a StorageClass") + ginkgo.By("creating a StorageClass") test := testsuites.StorageClassTest{ Client: c, Name: "external provisioner test", @@ -752,16 +752,16 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { test.Claim = newClaim(test, ns, "external") test.Claim.Spec.StorageClassName = &test.Class.Name - By("creating a claim with a external provisioning annotation") + ginkgo.By("creating a claim with a external provisioning annotation") test.TestDynamicProvisioning() }) }) - Describe("DynamicProvisioner Default", func() { - It("should create and delete default persistent volumes [Slow]", func() { + ginkgo.Describe("DynamicProvisioner Default", func() { + ginkgo.It("should create and delete default persistent volumes [Slow]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") - By("creating a claim with no annotation") + ginkgo.By("creating a claim with no annotation") test := testsuites.StorageClassTest{ Client: c, Name: "default", @@ -774,7 +774,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) // Modifying the default storage class can be disruptive to other tests that depend on it - It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() { + ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") scName, scErr := framework.GetDefaultStorageClassName(c) if scErr != nil { @@ -785,12 +785,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "2Gi", } - By("setting the is-default StorageClass annotation to false") + ginkgo.By("setting the is-default StorageClass annotation to false") verifyDefaultStorageClass(c, scName, true) defer updateDefaultStorageClass(c, scName, "true") updateDefaultStorageClass(c, scName, "false") - By("creating a claim with default storageclass and expecting it to timeout") + ginkgo.By("creating a claim with default storageclass and expecting it to timeout") claim := newClaim(test, ns, "default") claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) framework.ExpectNoError(err) @@ -800,15 +800,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) e2elog.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) + gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending)) }) // Modifying the default storage class can be disruptive to other tests that depend on it - It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() { + ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") scName, scErr := framework.GetDefaultStorageClassName(c) if scErr != nil { @@ -819,12 +819,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "2Gi", } - By("removing the is-default StorageClass annotation") + ginkgo.By("removing the is-default StorageClass annotation") verifyDefaultStorageClass(c, scName, true) defer updateDefaultStorageClass(c, scName, "true") updateDefaultStorageClass(c, scName, "") - By("creating a claim with default storageclass and expecting it to timeout") + ginkgo.By("creating a claim with default storageclass and expecting it to timeout") claim := newClaim(test, ns, "default") claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) framework.ExpectNoError(err) @@ -834,21 +834,21 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) e2elog.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) + gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending)) }) }) framework.KubeDescribe("GlusterDynamicProvisioner", func() { - It("should create and delete persistent volumes [fast]", func() { + ginkgo.It("should create and delete persistent volumes [fast]", func() { framework.SkipIfProviderIs("gke") - By("creating a Gluster DP server Pod") + ginkgo.By("creating a Gluster DP server Pod") pod := startGlusterDpServerPod(c, ns) serverUrl := "http://" + pod.Status.PodIP + ":8081" - By("creating a StorageClass") + ginkgo.By("creating a StorageClass") test := testsuites.StorageClassTest{ Client: c, Name: "Gluster Dynamic provisioner test", @@ -860,7 +860,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { suffix := fmt.Sprintf("glusterdptest") test.Class = newStorageClass(test, ns, suffix) - By("creating a claim object with a suffix for gluster dynamic provisioner") + ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") test.Claim = newClaim(test, ns, suffix) test.Claim.Spec.StorageClassName = &test.Class.Name @@ -868,8 +868,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) }) - Describe("Invalid AWS KMS key", func() { - It("should report an error and create no PV", func() { + ginkgo.Describe("Invalid AWS KMS key", func() { + ginkgo.It("should report an error and create no PV", func() { framework.SkipUnlessProviderIs("aws") test := testsuites.StorageClassTest{ Name: "AWS EBS with invalid KMS key", @@ -878,7 +878,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"}, } - By("creating a StorageClass") + ginkgo.By("creating a StorageClass") suffix := fmt.Sprintf("invalid-aws") class := newStorageClass(test, ns, suffix) class, err := c.StorageV1().StorageClasses().Create(class) @@ -888,7 +888,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil)) }() - By("creating a claim object with a suffix for gluster dynamic provisioner") + ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") claim := newClaim(test, ns, suffix) claim.Spec.StorageClassName = &class.Name claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) @@ -932,14 +932,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) }) }) - Describe("DynamicProvisioner delayed binding [Slow]", func() { - It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() { + ginkgo.Describe("DynamicProvisioner delayed binding [Slow]", func() { + ginkgo.It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() { testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/) testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/) }) }) - Describe("DynamicProvisioner allowedTopologies", func() { - It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() { + ginkgo.Describe("DynamicProvisioner allowedTopologies", func() { + ginkgo.It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() { tests := []testsuites.StorageClassTest{ { Name: "AllowedTopologies EBS storage class test", @@ -961,7 +961,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders) continue } - By("creating a claim with class with allowedTopologies set") + ginkgo.By("creating a claim with class with allowedTopologies set") suffix := "topology" test.Client = c test.Class = newStorageClass(test, ns, suffix) @@ -974,8 +974,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } }) }) - Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() { - It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() { + ginkgo.Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() { + ginkgo.It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() { testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/) testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/) }) @@ -985,7 +985,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(Equal(expectedDefault)) + gomega.Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(gomega.Equal(expectedDefault)) } func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) { @@ -1181,7 +1181,7 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod { framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) - By("locating the provisioner pod") + ginkgo.By("locating the provisioner pod") pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) return pod @@ -1231,8 +1231,8 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten func getRandomClusterZone(c clientset.Interface) string { zones, err := framework.GetClusterZones(c) - Expect(err).ToNot(HaveOccurred()) - Expect(len(zones)).ToNot(Equal(0)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(len(zones)).ToNot(gomega.Equal(0)) zonesList := zones.UnsortedList() return zonesList[rand.Intn(zones.Len())] diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index d77e11c222a..49224c31e3f 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -18,7 +18,7 @@ limitations under the License. package storage import ( - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -36,13 +36,13 @@ var _ = utils.SIGDescribe("Volumes", func() { var cs clientset.Interface var namespace *v1.Namespace - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet namespace = f.Namespace }) - Describe("ConfigMap", func() { - It("should be mountable", func() { + ginkgo.Describe("ConfigMap", func() { + ginkgo.It("should be mountable", func() { config := volume.TestConfig{ Namespace: namespace.Name, Prefix: "configmap", From 629ec7e113f08c579ec2bb501e78f100090c6f78 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 13:58:09 +0800 Subject: [PATCH 092/194] replace test error checking with more readable way --- test/e2e/storage/persistent_volumes-local.go | 8 ++++---- test/e2e/storage/testsuites/volumemode.go | 4 ++-- test/e2e/storage/volume_metrics.go | 2 +- test/e2e/storage/volume_provisioning.go | 6 +++--- .../vsphere/vsphere_volume_datastore.go | 5 ++--- .../storage/vsphere/vsphere_volume_fstype.go | 2 +- .../vsphere/vsphere_volume_vsan_policy.go | 18 +++++++++--------- .../storage/vsphere/vsphere_zone_support.go | 4 ++-- 8 files changed, 24 insertions(+), 25 deletions(-) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 5b06a95e54e..ec59ea81d51 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -313,9 +313,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By("Creating local PVC and PV") createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode) pod, err := createLocalPod(config, testVol, nil) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) }) @@ -332,7 +332,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { framework.ExpectNoError(err) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) cleanupLocalVolumes(config, []*localTestVolume{testVol}) }) @@ -932,7 +932,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod } return false, nil }) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) } } diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index f13b295916e..03fcab0d1e5 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -193,7 +193,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern defer func() { framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) }() - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) }) } else { ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { @@ -251,7 +251,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern framework.ExpectNoError(err) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) }) } else { ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index a077a5aeba9..d4be2056022 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index a07297ad09f..7838662f241 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -573,7 +573,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) e2elog.Logf(err.Error()) }) @@ -800,7 +800,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) e2elog.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -834,7 +834,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) e2elog.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 5e1d74f227b..e40776767d8 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -22,7 +22,6 @@ import ( "time" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -69,7 +68,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", scParameters[Datastore] = InvalidDatastore scParameters[DiskFormat] = ThinDisk err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found` if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -90,7 +89,7 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, ginkgo.By("Expect claim to fail provisioning volume") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 74982c083ab..158aa5ee30b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -126,7 +126,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa pvclaims = append(pvclaims, pvclaim) // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index ab587cb51db..75b6c178aae 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -159,7 +159,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "Invalid value for " + Policy_DiskStripes + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -188,7 +188,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -204,7 +204,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[Datastore] = VmfsDatastore e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " + "The policy parameters will work only with VSAN Datastore." if !strings.Contains(err.Error(), errorMsg) { @@ -236,7 +236,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -249,7 +249,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -317,7 +317,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) @@ -337,7 +337,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN pvclaims = append(pvclaims, pvclaim) ginkgo.By("Expect claim to fail provisioning volume") _, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index 03f0e0052ff..693c863d3c4 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -132,7 +132,7 @@ var _ = utils.SIGDescribe("Zone Support", func() { ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) zones = append(zones, zoneD) err := verifyPVCCreationFails(client, namespace, nil, zones) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -357,7 +357,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) e2elog.Logf("Failure message : %+q", eventList.Items[0].Message) From 081ec69386257e557c0fd3ab0ea0cad3a0f9253d Mon Sep 17 00:00:00 2001 From: Thomas Hartland Date: Wed, 8 May 2019 10:54:33 +0200 Subject: [PATCH 093/194] Abort node initialization if cloud taint was already removed If node events are received at a faster rate than they can be processed then initialization for some nodes will be delayed. Once they are eventually processed their cloud taint is removed, but there may already be several update events for those nodes with the cloud taint still on them already in the event queue. To avoid re-initializing those nodes, the cloud taint is checked for again after requesting the current state of the node. If the cloud taint is no longer on the node then nil is returned from the RetryOnConflict, as an error does not need to be logged. The logging for a successful initialization is also moved inside the RetryOnConflict so that the early nil return does not cause the aborted initialization to be logged as a success. --- pkg/controller/cloud/node_controller.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index b592ef74666..182e9bcd05d 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -256,6 +256,13 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) { return err } + cloudTaint := getCloudTaint(curNode.Spec.Taints) + if cloudTaint == nil { + // Node object received from event had the cloud taint but was outdated, + // the node has actually already been initialized. + return nil + } + if curNode.Spec.ProviderID == "" { providerID, err := cloudprovider.GetInstanceProviderID(context.TODO(), cnc.cloud, types.NodeName(curNode.Name)) if err == nil { @@ -312,14 +319,14 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) { // After adding, call UpdateNodeAddress to set the CloudProvider provided IPAddresses // So that users do not see any significant delay in IP addresses being filled into the node cnc.updateNodeAddress(curNode, instances) + + klog.Infof("Successfully initialized node %s with cloud provider", node.Name) return nil }) if err != nil { utilruntime.HandleError(err) return } - - klog.Infof("Successfully initialized node %s with cloud provider", node.Name) } func getCloudTaint(taints []v1.Taint) *v1.Taint { From 8f6619bc219e545801505f89f1ae85a1fa0f5ef9 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 10 May 2019 09:37:54 +0200 Subject: [PATCH 094/194] apiextensions: always sort structural schema violations, not only in condition --- .../pkg/apiserver/schema/validation.go | 7 +++++++ .../nonstructuralschema/nonstructuralschema_controller.go | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go index 1e59ee18303..c6af544ebbc 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go @@ -18,6 +18,7 @@ package schema import ( "reflect" + "sort" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -62,6 +63,12 @@ func ValidateStructural(s *Structural, fldPath *field.Path) field.ErrorList { allErrs = append(allErrs, validateStructuralInvariants(s, rootLevel, fldPath)...) allErrs = append(allErrs, validateStructuralCompleteness(s, fldPath)...) + // sort error messages. Otherwise, the errors slice will change every time due to + // maps in the types and randomized iteration. + sort.Slice(allErrs, func(i, j int) bool { + return allErrs[i].Error() < allErrs[j].Error() + }) + return allErrs } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go index 1986cedd2cc..e12bd20bfd1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go @@ -18,7 +18,6 @@ package nonstructuralschema import ( "fmt" - "sort" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -115,12 +114,6 @@ func calculateCondition(in *apiextensions.CustomResourceDefinition) *apiextensio return nil } - // sort error messages. Otherwise, the condition message will change every sync due to - // randomized map iteration. - sort.Slice(allErrs, func(i, j int) bool { - return allErrs[i].Error() < allErrs[j].Error() - }) - cond.Status = apiextensions.ConditionTrue cond.Reason = "Violations" cond.Message = allErrs.ToAggregate().Error() From 9c3af43c845b025850cf937b5139b9b60afe8607 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 7 May 2019 18:48:56 +0200 Subject: [PATCH 095/194] apiextensions: add structural schema visitor --- .../pkg/apiserver/schema/visitor.go | 106 ++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go new file mode 100644 index 00000000000..1f4267ddee5 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// Visitor recursively walks through a structural schema. +type Visitor struct { + // Structural is called on each Structural node in the schema, before recursing into + // the subtrees. It is allowed to mutate s. Return true if something has been changed. + // +optional + Structural func(s *Structural) bool + // NestedValueValidation is called on each NestedValueValidation node in the schema, + // before recursing into subtrees. It is allowed to mutate vv. Return true if something + // has been changed. + // +optional + NestedValueValidation func(vv *NestedValueValidation) bool +} + +// Visit recursively walks through the structural schema and calls the given callbacks +// at each node of those types. +func (m *Visitor) Visit(s *Structural) { + m.visitStructural(s) +} + +func (m *Visitor) visitStructural(s *Structural) bool { + ret := false + if m.Structural != nil { + ret = m.Structural(s) + } + + if s.Items != nil { + m.visitStructural(s.Items) + } + for k, v := range s.Properties { + if changed := m.visitStructural(&v); changed { + ret = true + s.Properties[k] = v + } + } + if s.Generic.AdditionalProperties != nil && s.Generic.AdditionalProperties.Structural != nil { + m.visitStructural(s.Generic.AdditionalProperties.Structural) + } + if s.ValueValidation != nil { + for i := range s.ValueValidation.AllOf { + m.visitNestedValueValidation(&s.ValueValidation.AllOf[i]) + } + for i := range s.ValueValidation.AnyOf { + m.visitNestedValueValidation(&s.ValueValidation.AnyOf[i]) + } + for i := range s.ValueValidation.OneOf { + m.visitNestedValueValidation(&s.ValueValidation.OneOf[i]) + } + if s.ValueValidation.Not != nil { + m.visitNestedValueValidation(s.ValueValidation.Not) + } + } + + return ret +} + +func (m *Visitor) visitNestedValueValidation(vv *NestedValueValidation) bool { + ret := false + if m.NestedValueValidation != nil { + ret = m.NestedValueValidation(vv) + } + + if vv.Items != nil { + m.visitNestedValueValidation(vv.Items) + } + for k, v := range vv.Properties { + if changed := m.visitNestedValueValidation(&v); changed { + ret = true + vv.Properties[k] = v + } + } + if vv.ForbiddenGenerics.AdditionalProperties != nil && vv.ForbiddenGenerics.AdditionalProperties.Structural != nil { + m.visitStructural(vv.ForbiddenGenerics.AdditionalProperties.Structural) + } + for i := range vv.ValueValidation.AllOf { + m.visitNestedValueValidation(&vv.ValueValidation.AllOf[i]) + } + for i := range vv.ValueValidation.AnyOf { + m.visitNestedValueValidation(&vv.ValueValidation.AnyOf[i]) + } + for i := range vv.ValueValidation.OneOf { + m.visitNestedValueValidation(&vv.ValueValidation.OneOf[i]) + } + if vv.ValueValidation.Not != nil { + m.visitNestedValueValidation(vv.ValueValidation.Not) + } + + return ret +} From d74a9a9da658e1cb9ae23ae0622089fe09d65564 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 9 May 2019 13:23:52 +0200 Subject: [PATCH 096/194] apiextensions: disallow metadata specs other than name and generateName --- .../pkg/apiserver/schema/validation.go | 46 ++++++-- .../pkg/apiserver/schema/validation_test.go | 4 +- .../test/integration/validation_test.go | 108 ++++++++++++++++++ 3 files changed, 146 insertions(+), 12 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go index 1e59ee18303..7f476241d1d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go @@ -55,6 +55,7 @@ const ( // - ... zero or more // // * every specified field or array in s is also specified outside of value validation. +// * metadata at the root can only restrict the name and generateName, and not be specified at all in nested contexts. // * additionalProperties at the root is not allowed. func ValidateStructural(s *Structural, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -99,7 +100,7 @@ func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) } } - allErrs = append(allErrs, validateValueValidation(s.ValueValidation, skipAnyOf, skipFirstAllOfAnyOf, fldPath)...) + allErrs = append(allErrs, validateValueValidation(s.ValueValidation, skipAnyOf, skipFirstAllOfAnyOf, lvl, fldPath)...) if s.XEmbeddedResource && s.Type != "object" { if len(s.Type) == 0 { @@ -122,6 +123,26 @@ func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), s.Type, "must be object at the root")) } + // restrict metadata schemas to name and generateName only + if metadata, found := s.Properties["metadata"]; found && lvl == rootLevel { + // metadata is a shallow copy. We can mutate it. + _, foundName := metadata.Properties["name"] + _, foundGenerateName := metadata.Properties["generateName"] + if foundName && foundGenerateName && len(metadata.Properties) == 2 { + metadata.Properties = nil + } else if (foundName || foundGenerateName) && len(metadata.Properties) == 1 { + metadata.Properties = nil + } + metadata.Type = "" + if metadata.ValueValidation == nil { + metadata.ValueValidation = &ValueValidation{} + } + if !reflect.DeepEqual(metadata, Structural{ValueValidation: &ValueValidation{}}) { + // TODO: this is actually a field.Invalid error, but we cannot do JSON serialization of metadata here to get a proper message + allErrs = append(allErrs, field.Forbidden(fldPath.Child("properties").Key("metadata"), "must not specify anything other than name and generateName, but metadata is implicitly specified")) + } + } + if s.XEmbeddedResource && !s.XPreserveUnknownFields && s.Properties == nil { allErrs = append(allErrs, field.Required(fldPath.Child("properties"), "must not be empty if x-kubernetes-embedded-resource is true without x-kubernetes-preserve-unknown-fields")) } @@ -164,7 +185,7 @@ func validateExtensions(x *Extensions, fldPath *field.Path) field.ErrorList { } // validateValueValidation checks the value validation in a structural schema. -func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf bool, fldPath *field.Path) field.ErrorList { +func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf bool, lvl level, fldPath *field.Path) field.ErrorList { if v == nil { return nil } @@ -173,7 +194,7 @@ func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf if !skipAnyOf { for i := range v.AnyOf { - allErrs = append(allErrs, validateNestedValueValidation(&v.AnyOf[i], false, false, fldPath.Child("anyOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidation(&v.AnyOf[i], false, false, lvl, fldPath.Child("anyOf").Index(i))...) } } @@ -182,31 +203,31 @@ func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf if skipFirstAllOfAnyOf && i == 0 { skipAnyOf = true } - allErrs = append(allErrs, validateNestedValueValidation(&v.AllOf[i], skipAnyOf, false, fldPath.Child("allOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidation(&v.AllOf[i], skipAnyOf, false, lvl, fldPath.Child("allOf").Index(i))...) } for i := range v.OneOf { - allErrs = append(allErrs, validateNestedValueValidation(&v.OneOf[i], false, false, fldPath.Child("oneOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidation(&v.OneOf[i], false, false, lvl, fldPath.Child("oneOf").Index(i))...) } - allErrs = append(allErrs, validateNestedValueValidation(v.Not, false, false, fldPath.Child("not"))...) + allErrs = append(allErrs, validateNestedValueValidation(v.Not, false, false, lvl, fldPath.Child("not"))...) return allErrs } // validateNestedValueValidation checks the nested value validation under a logic junctor in a structural schema. -func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllOfAnyOf bool, fldPath *field.Path) field.ErrorList { +func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllOfAnyOf bool, lvl level, fldPath *field.Path) field.ErrorList { if v == nil { return nil } allErrs := field.ErrorList{} - allErrs = append(allErrs, validateValueValidation(&v.ValueValidation, skipAnyOf, skipAllOfAnyOf, fldPath)...) - allErrs = append(allErrs, validateNestedValueValidation(v.Items, false, false, fldPath.Child("items"))...) + allErrs = append(allErrs, validateValueValidation(&v.ValueValidation, skipAnyOf, skipAllOfAnyOf, lvl, fldPath)...) + allErrs = append(allErrs, validateNestedValueValidation(v.Items, false, false, lvl, fldPath.Child("items"))...) for k, fld := range v.Properties { - allErrs = append(allErrs, validateNestedValueValidation(&fld, false, false, fldPath.Child("properties").Key(k))...) + allErrs = append(allErrs, validateNestedValueValidation(&fld, false, false, fieldLevel, fldPath.Child("properties").Key(k))...) } if len(v.ForbiddenGenerics.Type) > 0 { @@ -238,5 +259,10 @@ func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllO allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-int-or-string"), "must be false to be structural")) } + // forbid reasoning about metadata because it can lead to metadata restriction we don't want + if _, found := v.Properties["metadata"]; found { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("properties").Key("metadata"), "must not be specified in a nested context")) + } + return allErrs } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go index 619040771a1..3067f672f9d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation_test.go @@ -49,7 +49,7 @@ func TestValidateNestedValueValidationComplete(t *testing.T) { i := rand.Intn(x.NumField()) fuzzer.Fuzz(x.Field(i).Addr().Interface()) - errs := validateNestedValueValidation(vv, false, false, nil) + errs := validateNestedValueValidation(vv, false, false, fieldLevel, nil) if len(errs) == 0 && !reflect.DeepEqual(vv.ForbiddenGenerics, Generic{}) { t.Errorf("expected ForbiddenGenerics validation errors for: %#v", vv) } @@ -63,7 +63,7 @@ func TestValidateNestedValueValidationComplete(t *testing.T) { i := rand.Intn(x.NumField()) fuzzer.Fuzz(x.Field(i).Addr().Interface()) - errs := validateNestedValueValidation(vv, false, false, nil) + errs := validateNestedValueValidation(vv, false, false, fieldLevel, nil) if len(errs) == 0 && !reflect.DeepEqual(vv.ForbiddenExtensions, Extensions{}) { t.Errorf("expected ForbiddenExtensions validation errors for: %#v", vv) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index 59b0f22870f..54a30fc50f6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -1222,6 +1222,114 @@ not: "spec.version[v1].schema.openAPIV3Schema.properties[d]: Required value: because it is defined in spec.version[v1].schema.openAPIV3Schema.not.properties[d]", }, }, + { + desc: "metadata with non-properties", + globalSchema: ` +type: object +properties: + metadata: + minimum: 42.0 +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties[metadata]: Forbidden: must not specify anything other than name and generateName, but metadata is implicitly specified", + "spec.validation.openAPIV3Schema.properties[metadata].type: Required value: must not be empty for specified object fields", + }, + }, + { + desc: "metadata with other properties", + globalSchema: ` +type: object +properties: + metadata: + properties: + name: + pattern: "^[a-z]+$" + labels: + type: object + maxLength: 4 +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.properties[metadata]: Forbidden: must not specify anything other than name and generateName, but metadata is implicitly specified", + "spec.validation.openAPIV3Schema.properties[metadata].type: Required value: must not be empty for specified object fields", + "spec.validation.openAPIV3Schema.properties[metadata].properties[name].type: Required value: must not be empty for specified object fields", + }, + }, + { + desc: "metadata with name property", + globalSchema: ` +type: object +properties: + metadata: + type: object + properties: + name: + type: string + pattern: "^[a-z]+$" +`, + expectedViolations: []string{}, + }, + { + desc: "metadata with generateName property", + globalSchema: ` +type: object +properties: + metadata: + type: object + properties: + generateName: + type: string + pattern: "^[a-z]+$" +`, + expectedViolations: []string{}, + }, + { + desc: "metadata with name and generateName property", + globalSchema: ` +type: object +properties: + metadata: + type: object + properties: + name: + type: string + pattern: "^[a-z]+$" + generateName: + type: string + pattern: "^[a-z]+$" +`, + expectedViolations: []string{}, + }, + { + desc: "metadata under junctors", + globalSchema: ` +type: object +properties: + metadata: + type: object + properties: + name: + type: string + pattern: "^[a-z]+$" +allOf: +- properties: + metadata: {} +anyOf: +- properties: + metadata: {} +oneOf: +- properties: + metadata: {} +not: + properties: + metadata: {} +`, + expectedViolations: []string{ + "spec.validation.openAPIV3Schema.anyOf[0].properties[metadata]: Forbidden: must not be specified in a nested context", + "spec.validation.openAPIV3Schema.allOf[0].properties[metadata]: Forbidden: must not be specified in a nested context", + "spec.validation.openAPIV3Schema.oneOf[0].properties[metadata]: Forbidden: must not be specified in a nested context", + "spec.validation.openAPIV3Schema.not.properties[metadata]: Forbidden: must not be specified in a nested context", + }, + }, } for i := range tests { From 56abfb5b012eb6f978086b7da8ca136bc437f8ee Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 10 May 2019 08:15:22 -0400 Subject: [PATCH 097/194] Update beorn7/perks to Fix off-by-one error for small sample sizes Change-Id: I444b345743251a13252321df0f385503a4271234 --- go.mod | 2 +- go.sum | 4 ++-- staging/src/k8s.io/apiextensions-apiserver/go.mod | 2 +- staging/src/k8s.io/apiextensions-apiserver/go.sum | 4 ++-- staging/src/k8s.io/apiserver/go.mod | 2 +- staging/src/k8s.io/apiserver/go.sum | 4 ++-- staging/src/k8s.io/component-base/go.mod | 2 +- staging/src/k8s.io/component-base/go.sum | 4 ++-- staging/src/k8s.io/kube-aggregator/go.mod | 2 +- staging/src/k8s.io/kube-aggregator/go.sum | 4 ++-- staging/src/k8s.io/kube-controller-manager/go.mod | 2 +- staging/src/k8s.io/kube-controller-manager/go.sum | 2 +- staging/src/k8s.io/kube-proxy/go.mod | 2 +- staging/src/k8s.io/kube-proxy/go.sum | 2 +- staging/src/k8s.io/kube-scheduler/go.mod | 2 +- staging/src/k8s.io/kube-scheduler/go.sum | 2 +- staging/src/k8s.io/legacy-cloud-providers/go.mod | 2 +- staging/src/k8s.io/legacy-cloud-providers/go.sum | 4 ++-- staging/src/k8s.io/sample-apiserver/go.mod | 2 +- staging/src/k8s.io/sample-apiserver/go.sum | 4 ++-- vendor/github.com/beorn7/perks/quantile/stream.go | 2 +- vendor/modules.txt | 2 +- 22 files changed, 29 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 6345697a4cc..f7e2599dd65 100644 --- a/go.mod +++ b/go.mod @@ -218,7 +218,7 @@ replace ( github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.16.26 github.com/bazelbuild/bazel-gazelle => github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e github.com/bazelbuild/buildtools => github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a github.com/blang/semver => github.com/blang/semver v3.5.0+incompatible github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 github.com/cespare/prettybench => github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c diff --git a/go.sum b/go.sum index bfd9b4bd1ab..23afc8f834d 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e h1:k7E/Rd github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e/go.mod h1:uHBSeeATKpVazAACZBDPL/Nk/UhQDDsJWDlqYJo8/Us= github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e h1:VuTBHPJNCQ88Okm9ld5SyLCvU50soWJYQYjQFdcDxew= github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index c68a1e9f1ae..8c30b0a10ed 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -38,7 +38,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 6986c805256..508c521d1ac 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -12,8 +12,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index d5a67b43168..892c1db7052 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -70,7 +70,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 11df5623494..fcd347f636a 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -10,8 +10,8 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVk github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index 9cb7c9ef522..cb4544f0a18 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -17,7 +17,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index da5396b53f5..fc152e69439 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -1,5 +1,5 @@ -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index ad6dd4eecdc..7432f760afa 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -27,7 +27,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 3268152d6bc..d26f159490b 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -10,8 +10,8 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVk github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index a1e513a119d..7254db761ae 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -10,7 +10,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 86939f2a039..5c4f0e19c06 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -1,4 +1,4 @@ -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index 6bd8199e210..2bbe571516e 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -10,7 +10,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 86939f2a039..5c4f0e19c06 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -1,4 +1,4 @@ -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index 0c1d1700915..ffcfe6f63e4 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -10,7 +10,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 86939f2a039..5c4f0e19c06 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -1,4 +1,4 @@ -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod index 01b5b649b00..f94355e0e9e 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.mod +++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod @@ -34,7 +34,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum index 264a67b9e19..20a8ceafa69 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.sum +++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum @@ -8,8 +8,8 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20181220005116-f8e99590 github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20181220005116-f8e995905100/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/aws/aws-sdk-go v1.16.26 h1:GWkl3rkRO/JGRTWoLLIqwf7AWC4/W/1hMOUZqmX0js4= github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda h1:NyywMz59neOoVRFDz+ccfKWxn784fiHMDnZSy6T+JXY= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index e77f5676fd0..c63ade6ac40 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -17,7 +17,7 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index f044dad8976..1dcd5fdc2b4 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -10,8 +10,8 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVk github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index 587b1fc5ba8..f4cabd66956 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 { if l == 0 { return 0 } - i := int(float64(l) * q) + i := int(math.Ceil(float64(l) * q)) if i > 0 { i -= 1 } diff --git a/vendor/modules.txt b/vendor/modules.txt index 4623350d8f9..12ec4d50cc1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -127,7 +127,7 @@ github.com/bazelbuild/buildtools/file github.com/bazelbuild/buildtools/lang github.com/bazelbuild/buildtools/tables github.com/bazelbuild/buildtools/wspace -# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 => github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 +# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible => github.com/blang/semver v3.5.0+incompatible github.com/blang/semver From 61731dd1d19afb4a1483b72eafb7f0550393f0f8 Mon Sep 17 00:00:00 2001 From: loic le dru Date: Fri, 10 May 2019 14:18:55 +0200 Subject: [PATCH 098/194] e2e-test logf instead framework.logf --- test/e2e/cloud/BUILD | 1 + test/e2e/cloud/nodes.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e/cloud/BUILD b/test/e2e/cloud/BUILD index 9bc67093987..37f42118220 100644 --- a/test/e2e/cloud/BUILD +++ b/test/e2e/cloud/BUILD @@ -13,6 +13,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", ], diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 8d097082ea4..ad60585fba0 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -46,7 +47,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { nodeToDelete := nodeDeleteCandidates.Items[0] origNodes := framework.GetReadyNodesIncludingTaintedOrDie(c) - framework.Logf("Original number of ready nodes: %d", len(origNodes.Items)) + e2elog.Logf("Original number of ready nodes: %d", len(origNodes.Items)) err := framework.DeleteNodeOnCloudProvider(&nodeToDelete) if err != nil { From 96b04bfeacd09cbdf8e31537c51d1fa0455e3a06 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 9 May 2019 21:35:02 -0700 Subject: [PATCH 099/194] test/e2e/upgrades/apps/job: List Pods in failure message Currently, this test can fail with the not-very-helpful [1,2]: fail [k8s.io/kubernetes/test/e2e/upgrades/apps/job.go:58]: Expected : false to be true Since this test is the only CheckForAllJobPodsRunning consumer, and has been since CheckForAllJobPodsRunning landed in 116eda0909 (Implements an upgrade test for Job, 2017-02-22, #41271), this commit refactors the function to EnsureJobPodsRunning, dropping the opaque boolean, and constructing a useful error summarizing the divergence from the expected parallelism and the status of listed Pods. Thanks to Maciej Szulik for the fixups [3] :). [1]: https://storage.googleapis.com/origin-ci-test/logs/release-openshift-origin-installer-e2e-aws-upgrade/1434/build-log.txt [2]: https://bugzilla.redhat.com/show_bug.cgi?id=1708454#c0 [3]: https://github.com/wking/kubernetes/pull/1 --- test/e2e/framework/job/wait.go | 18 +++++++++++++----- test/e2e/upgrades/apps/job.go | 3 +-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index d74509e6cb7..8df3b60cd6c 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -17,6 +17,8 @@ limitations under the License. package job import ( + "fmt" + "strings" "time" batchv1 "k8s.io/api/batch/v1" @@ -99,22 +101,28 @@ func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Dura }) } -// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not -// nil the returned bool is true if the Job is running. -func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) { +// EnsureAllJobPodsRunning uses c to check in the Job named jobName in ns +// is running, returning an error if the expected parallelism is not +// satisfied. +func EnsureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { - return false, err + return err } + podsSummary := make([]string, 0, parallelism) count := int32(0) for _, p := range pods.Items { if p.Status.Phase == v1.PodRunning { count++ } + podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message)) } - return count == parallelism, nil + if count != parallelism { + return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", ")) + } + return nil } // WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns diff --git a/test/e2e/upgrades/apps/job.go b/test/e2e/upgrades/apps/job.go index 15eacd96356..cd836e75c05 100644 --- a/test/e2e/upgrades/apps/job.go +++ b/test/e2e/upgrades/apps/job.go @@ -55,9 +55,8 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) { func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Ensuring active pods == parallelism") - running, err := jobutil.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) + err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(running).To(gomega.BeTrue()) } // Teardown cleans up any remaining resources. From 2b69699f6765610b700ef942990cb3e8336c0c07 Mon Sep 17 00:00:00 2001 From: Marek Counts Date: Wed, 1 May 2019 22:22:06 -0400 Subject: [PATCH 100/194] updated phase runner to enable custom arg validation currently sub phases cannot have custom arg validation and container commands can have args. This removes phase container commands from taking args and enables custom args on the leaf phases --- cmd/kubeadm/app/cmd/phases/join/BUILD | 1 + .../app/cmd/phases/join/controlplanejoin.go | 26 ++-- .../cmd/phases/join/controlplaneprepare.go | 10 +- cmd/kubeadm/app/cmd/phases/workflow/phase.go | 9 +- cmd/kubeadm/app/cmd/phases/workflow/runner.go | 6 + .../app/cmd/phases/workflow/runner_test.go | 128 ++++++++++++++++++ 6 files changed, 165 insertions(+), 15 deletions(-) diff --git a/cmd/kubeadm/app/cmd/phases/join/BUILD b/cmd/kubeadm/app/cmd/phases/join/BUILD index 7113ef4c528..946ee35d435 100644 --- a/cmd/kubeadm/app/cmd/phases/join/BUILD +++ b/cmd/kubeadm/app/cmd/phases/join/BUILD @@ -37,6 +37,7 @@ go_library( "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//vendor/github.com/lithammer/dedent:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go b/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go index b423698f87a..8071f0596c2 100644 --- a/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go +++ b/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go @@ -20,6 +20,8 @@ import ( "fmt" "github.com/pkg/errors" + "github.com/spf13/cobra" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -58,6 +60,7 @@ func NewControlPlaneJoinPhase() workflow.Phase { Short: "Join a machine as a control plane instance", InheritFlags: getControlPlaneJoinPhaseFlags("all"), RunAllSiblings: true, + ArgsValidator: cobra.NoArgs, }, newEtcdLocalSubphase(), newUpdateStatusSubphase(), @@ -68,10 +71,11 @@ func NewControlPlaneJoinPhase() workflow.Phase { func newEtcdLocalSubphase() workflow.Phase { return workflow.Phase{ - Name: "etcd", - Short: "Add a new local etcd member", - Run: runEtcdPhase, - InheritFlags: getControlPlaneJoinPhaseFlags("etcd"), + Name: "etcd", + Short: "Add a new local etcd member", + Run: runEtcdPhase, + InheritFlags: getControlPlaneJoinPhaseFlags("etcd"), + ArgsValidator: cobra.NoArgs, } } @@ -83,17 +87,19 @@ func newUpdateStatusSubphase() workflow.Phase { kubeadmconstants.ClusterStatusConfigMapKey, kubeadmconstants.KubeadmConfigConfigMap, ), - Run: runUpdateStatusPhase, - InheritFlags: getControlPlaneJoinPhaseFlags("update-status"), + Run: runUpdateStatusPhase, + InheritFlags: getControlPlaneJoinPhaseFlags("update-status"), + ArgsValidator: cobra.NoArgs, } } func newMarkControlPlaneSubphase() workflow.Phase { return workflow.Phase{ - Name: "mark-control-plane", - Short: "Mark a node as a control-plane", - Run: runMarkControlPlanePhase, - InheritFlags: getControlPlaneJoinPhaseFlags("mark-control-plane"), + Name: "mark-control-plane", + Short: "Mark a node as a control-plane", + Run: runMarkControlPlanePhase, + InheritFlags: getControlPlaneJoinPhaseFlags("mark-control-plane"), + ArgsValidator: cobra.NoArgs, } } diff --git a/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go b/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go index 58e7eebf49a..4ece87efe6d 100644 --- a/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go +++ b/cmd/kubeadm/app/cmd/phases/join/controlplaneprepare.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/pkg/errors" + "github.com/spf13/cobra" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" @@ -157,10 +158,11 @@ func newControlPlanePrepareKubeconfigSubphase() workflow.Phase { func newControlPlanePrepareControlPlaneSubphase() workflow.Phase { return workflow.Phase{ - Name: "control-plane", - Short: "Generate the manifests for the new control plane components", - Run: runControlPlanePrepareControlPlaneSubphase, //NB. eventually in future we would like to break down this in sub phases for each component - InheritFlags: getControlPlanePreparePhaseFlags("control-plane"), + Name: "control-plane", + Short: "Generate the manifests for the new control plane components", + Run: runControlPlanePrepareControlPlaneSubphase, //NB. eventually in future we would like to break down this in sub phases for each component + InheritFlags: getControlPlanePreparePhaseFlags("control-plane"), + ArgsValidator: cobra.NoArgs, } } diff --git a/cmd/kubeadm/app/cmd/phases/workflow/phase.go b/cmd/kubeadm/app/cmd/phases/workflow/phase.go index 42ac7f337e7..554635ac56c 100644 --- a/cmd/kubeadm/app/cmd/phases/workflow/phase.go +++ b/cmd/kubeadm/app/cmd/phases/workflow/phase.go @@ -16,7 +16,10 @@ limitations under the License. package workflow -import "github.com/spf13/pflag" +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) // Phase provides an implementation of a workflow phase that allows // creation of new phases by simply instantiating a variable of this type. @@ -71,6 +74,10 @@ type Phase struct { // Nb. if two or phases have the same local flags, please consider using local flags in the parent command // or additional flags defined in the phase runner. LocalFlags *pflag.FlagSet + + // ArgsValidator defines the positional arg function to be used for validating args for this phase + // If not set a phase will adopt the args of the top level command. + ArgsValidator cobra.PositionalArgs } // AppendPhase adds the given phase to the nested, ordered sequence of phases. diff --git a/cmd/kubeadm/app/cmd/phases/workflow/runner.go b/cmd/kubeadm/app/cmd/phases/workflow/runner.go index 5e9dc9a562d..d98dd873ee0 100644 --- a/cmd/kubeadm/app/cmd/phases/workflow/runner.go +++ b/cmd/kubeadm/app/cmd/phases/workflow/runner.go @@ -372,6 +372,12 @@ func (e *Runner) BindToCommand(cmd *cobra.Command) { // if this phase has children (not a leaf) it doesn't accept any args if len(p.Phases) > 0 { phaseCmd.Args = cobra.NoArgs + } else { + if p.ArgsValidator == nil { + phaseCmd.Args = cmd.Args + } else { + phaseCmd.Args = p.ArgsValidator + } } // adds the command to parent diff --git a/cmd/kubeadm/app/cmd/phases/workflow/runner_test.go b/cmd/kubeadm/app/cmd/phases/workflow/runner_test.go index 2e9dcf1c732..ef546fdd367 100644 --- a/cmd/kubeadm/app/cmd/phases/workflow/runner_test.go +++ b/cmd/kubeadm/app/cmd/phases/workflow/runner_test.go @@ -297,6 +297,134 @@ func phaseBuilder5(name string, flags *pflag.FlagSet) Phase { } } +type argTest struct { + args cobra.PositionalArgs + pass []string + fail []string +} + +func phaseBuilder6(name string, args cobra.PositionalArgs, phases ...Phase) Phase { + return Phase{ + Name: name, + Short: fmt.Sprintf("long description for %s ...", name), + Phases: phases, + ArgsValidator: args, + } +} + +// customArgs is a custom cobra.PositionArgs function +func customArgs(cmd *cobra.Command, args []string) error { + for _, a := range args { + if a != "qux" { + return fmt.Errorf("arg %s does not equal qux", a) + } + } + return nil +} + +func TestBindToCommandArgRequirements(t *testing.T) { + + // because cobra.ExactArgs(1) == cobra.ExactArgs(3), it is needed + // to run test argument sets that both pass and fail to ensure the correct function was set. + var usecases = []struct { + name string + runner Runner + testCases map[string]argTest + cmd *cobra.Command + }{ + { + name: "leaf command, no defined args, follow parent", + runner: Runner{ + Phases: []Phase{phaseBuilder("foo")}, + }, + testCases: map[string]argTest{ + "phase foo": { + pass: []string{"one", "two", "three"}, + fail: []string{"one", "two"}, + args: cobra.ExactArgs(3), + }, + }, + cmd: &cobra.Command{ + Use: "init", + Args: cobra.ExactArgs(3), + }, + }, + { + name: "container cmd expect none, custom arg check for leaf", + runner: Runner{ + Phases: []Phase{phaseBuilder6("foo", cobra.NoArgs, + phaseBuilder6("bar", cobra.ExactArgs(1)), + phaseBuilder6("baz", customArgs), + )}, + }, + testCases: map[string]argTest{ + "phase foo": { + pass: []string{}, + fail: []string{"one"}, + args: cobra.NoArgs, + }, + "phase foo bar": { + pass: []string{"one"}, + fail: []string{"one", "two"}, + args: cobra.ExactArgs(1), + }, + "phase foo baz": { + pass: []string{"qux"}, + fail: []string{"one"}, + args: customArgs, + }, + }, + cmd: &cobra.Command{ + Use: "init", + Args: cobra.NoArgs, + }, + }, + } + + for _, rt := range usecases { + t.Run(rt.name, func(t *testing.T) { + + rt.runner.BindToCommand(rt.cmd) + + // Checks that cmd gets a new phase subcommand + phaseCmd := getCmd(rt.cmd, "phase") + if phaseCmd == nil { + t.Error("cmd didn't have phase subcommand\n") + return + } + + for c, args := range rt.testCases { + + cCmd := getCmd(rt.cmd, c) + if cCmd == nil { + t.Errorf("cmd didn't have %s subcommand\n", c) + continue + } + + // Ensure it is the expected function + if reflect.ValueOf(cCmd.Args).Pointer() != reflect.ValueOf(args.args).Pointer() { + t.Error("The function poiners where not equal.") + } + + // Test passing argument set + err := cCmd.Args(cCmd, args.pass) + + if err != nil { + t.Errorf("command %s should validate the args: %v\n %v", cCmd.Name(), args.pass, err) + } + + // Test failing argument set + err = cCmd.Args(cCmd, args.fail) + + if err == nil { + t.Errorf("command %s should fail to validate the args: %v\n %v", cCmd.Name(), args.pass, err) + } + } + + }) + } +} + func TestBindToCommand(t *testing.T) { var dummy string From d503cba37553dececfb066cf684024d3e3aab3c4 Mon Sep 17 00:00:00 2001 From: David Eads Date: Fri, 10 May 2019 10:11:24 -0400 Subject: [PATCH 101/194] improve e2e namespace dumping on failure --- test/e2e/framework/util.go | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 3a4c00de5eb..def1fee5fb4 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -538,6 +538,23 @@ func logPodStates(pods []v1.Pod) { Logf("") // Final empty line helps for readability. } +// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows +// why pods crashed and since it is in the API, it's fast to retrieve. +func logPodTerminationMessages(pods []v1.Pod) { + for _, pod := range pods { + for _, status := range pod.Status.InitContainerStatuses { + if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { + Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) + } + } + for _, status := range pod.Status.ContainerStatuses { + if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { + Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) + } + } + } +} + // errorBadPodsStates create error message of basic info of bad pods for debugging. func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string { errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) @@ -2422,6 +2439,8 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { return c.CoreV1().Events(ns).List(opts) }, namespace) + dumpAllPodInfoForNamespace(c, namespace) + // If cluster is large, then the following logs are basically useless, because: // 1. it takes tens of minutes or hours to grab all of them // 2. there are so many of them that working with them are mostly impossible @@ -2429,7 +2448,6 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { maxNodesForDump := TestContext.MaxNodesToGather if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { - dumpAllPodInfo(c) dumpAllNodeInfo(c) } else { Logf("skipping dumping cluster info - cluster too large") @@ -2452,12 +2470,13 @@ func (o byFirstTimestamp) Less(i, j int) bool { return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) } -func dumpAllPodInfo(c clientset.Interface) { - pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) +func dumpAllPodInfoForNamespace(c clientset.Interface, namespace string) { + pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch pod debug info: %v", err) } logPodStates(pods.Items) + logPodTerminationMessages(pods.Items) } func dumpAllNodeInfo(c clientset.Interface) { From 5d6c25854eb97a8c3ab40e7dfbae7c22743d75c9 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 7 May 2019 15:20:39 +0200 Subject: [PATCH 102/194] apiextensions: add structural schema -> go-openapi schema conversion --- .../pkg/apiserver/schema/goopenapi.go | 154 ++++++++++++++++++ .../pkg/apiserver/schema/goopenapi_test.go | 116 +++++++++++++ .../pkg/apiserver/validation/validation.go | 1 + 3 files changed, 271 insertions(+) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi.go new file mode 100644 index 00000000000..59b4c999085 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi.go @@ -0,0 +1,154 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "github.com/go-openapi/spec" +) + +// ToGoOpenAPI converts a structural schema to go-openapi schema. It is faithful and roundtrippable +// with the exception of `nullable:true` for empty type (`type:""`). +// +// WARNING: Do not use the returned schema to perform CRD validation until this restriction is solved. +// +// Nullable:true is mapped to `type:[,"null"]` +// if the structural type is non-empty, and nullable is dropped if the structural type is empty. +func (s *Structural) ToGoOpenAPI() *spec.Schema { + if s == nil { + return nil + } + + ret := &spec.Schema{} + + if s.Items != nil { + ret.Items = &spec.SchemaOrArray{Schema: s.Items.ToGoOpenAPI()} + } + if s.Properties != nil { + ret.Properties = make(map[string]spec.Schema, len(s.Properties)) + for k, v := range s.Properties { + ret.Properties[k] = *v.ToGoOpenAPI() + } + } + s.Generic.toGoOpenAPI(ret) + s.Extensions.toGoOpenAPI(ret) + s.ValueValidation.toGoOpenAPI(ret) + + return ret +} + +func (g *Generic) toGoOpenAPI(ret *spec.Schema) { + if g == nil { + return + } + + if len(g.Type) != 0 { + ret.Type = spec.StringOrArray{g.Type} + if g.Nullable { + // go-openapi does not support nullable, but multiple type values. + // Only when type is already non-empty, adding null to the types is correct though. + // If you add null as only type, you enforce null, in contrast to nullable being + // ineffective if no type is provided in a schema. + ret.Type = append(ret.Type, "null") + } + } + if g.AdditionalProperties != nil { + ret.AdditionalProperties = &spec.SchemaOrBool{ + Allows: g.AdditionalProperties.Bool, + Schema: g.AdditionalProperties.Structural.ToGoOpenAPI(), + } + } + ret.Description = g.Description + ret.Title = g.Title + ret.Default = g.Default.Object +} + +func (x *Extensions) toGoOpenAPI(ret *spec.Schema) { + if x == nil { + return + } + + if x.XPreserveUnknownFields { + ret.VendorExtensible.AddExtension("x-kubernetes-preserve-unknown-fields", true) + } + if x.XEmbeddedResource { + ret.VendorExtensible.AddExtension("x-kubernetes-embedded-resource", true) + } + if x.XIntOrString { + ret.VendorExtensible.AddExtension("x-kubernetes-int-or-string", true) + } +} + +func (v *ValueValidation) toGoOpenAPI(ret *spec.Schema) { + if v == nil { + return + } + + ret.Format = v.Format + ret.Maximum = v.Maximum + ret.ExclusiveMaximum = v.ExclusiveMaximum + ret.Minimum = v.Minimum + ret.ExclusiveMinimum = v.ExclusiveMinimum + ret.MaxLength = v.MaxLength + ret.MinLength = v.MinLength + ret.Pattern = v.Pattern + ret.MaxItems = v.MaxItems + ret.MinItems = v.MinItems + ret.UniqueItems = v.UniqueItems + ret.MultipleOf = v.MultipleOf + if v.Enum != nil { + ret.Enum = make([]interface{}, 0, len(v.Enum)) + for i := range v.Enum { + ret.Enum = append(ret.Enum, v.Enum[i].Object) + } + } + ret.MaxProperties = v.MaxProperties + ret.MinProperties = v.MinProperties + ret.Required = v.Required + for i := range v.AllOf { + ret.AllOf = append(ret.AllOf, *v.AllOf[i].toGoOpenAPI()) + } + for i := range v.AnyOf { + ret.AnyOf = append(ret.AnyOf, *v.AnyOf[i].toGoOpenAPI()) + } + for i := range v.OneOf { + ret.OneOf = append(ret.OneOf, *v.OneOf[i].toGoOpenAPI()) + } + ret.Not = v.Not.toGoOpenAPI() +} + +func (vv *NestedValueValidation) toGoOpenAPI() *spec.Schema { + if vv == nil { + return nil + } + + ret := &spec.Schema{} + + vv.ValueValidation.toGoOpenAPI(ret) + if vv.Items != nil { + ret.Items = &spec.SchemaOrArray{Schema: vv.Items.toGoOpenAPI()} + } + if vv.Properties != nil { + ret.Properties = make(map[string]spec.Schema, len(vv.Properties)) + for k, v := range vv.Properties { + ret.Properties[k] = *v.toGoOpenAPI() + } + } + vv.ForbiddenGenerics.toGoOpenAPI(ret) // normally empty. Exception: int-or-string + vv.ForbiddenExtensions.toGoOpenAPI(ret) // shouldn't do anything + + return ret +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi_test.go new file mode 100644 index 00000000000..9309c18d77c --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/goopenapi_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "math/rand" + "reflect" + "regexp" + "testing" + "time" + + fuzz "github.com/google/gofuzz" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" +) + +var nullTypeRE = regexp.MustCompile(`"type":\["([^"]*)","null"]`) + +func TestStructuralRoundtrip(t *testing.T) { + f := fuzz.New() + seed := time.Now().UnixNano() + t.Logf("seed = %v", seed) + //seed = int64(1549012506261785182) + f.RandSource(rand.New(rand.NewSource(seed))) + f.Funcs( + func(s *JSON, c fuzz.Continue) { + switch c.Intn(6) { + case 0: + s.Object = float64(42.0) + case 1: + s.Object = map[string]interface{}{"foo": "bar"} + case 2: + s.Object = "" + case 3: + s.Object = []string{} + case 4: + s.Object = map[string]interface{}{} + case 5: + s.Object = nil + } + }, + func(g *Generic, c fuzz.Continue) { + c.FuzzNoCustom(g) + + // TODO: make nullable in case of empty type survive go-openapi JSON -> API schema roundtrip + // go-openapi does not support nullable. Adding it to a type slice produces OpenAPI v3 + // incompatible JSON which we cannot unmarshal (without string-replace magic to transform + // null types back into nullable). If type is empty, nullable:true is not preserved + // at all. + if len(g.Type) == 0 { + g.Nullable = false + } + }, + ) + f.MaxDepth(3) + f.NilChance(0.5) + + for i := 0; i < 10000; i++ { + orig := &Structural{} + f.Fuzz(orig) + + // normalize Structural.ValueValidation to zero values if it was nil before + normalizer := Visitor{ + Structural: func(s *Structural) bool { + if s.ValueValidation == nil { + s.ValueValidation = &ValueValidation{} + return true + } + return false + }, + } + normalizer.Visit(orig) + + goOpenAPI := orig.ToGoOpenAPI() + bs, err := json.Marshal(goOpenAPI) + if err != nil { + t.Fatal(err) + } + str := nullTypeRE.ReplaceAllString(string(bs), `"type":"$1","nullable":true`) // unfold nullable type:[,"null"] -> type:,nullable:true + v1beta1Schema := &apiextensionsv1beta1.JSONSchemaProps{} + err = json.Unmarshal([]byte(str), v1beta1Schema) + if err != nil { + t.Fatal(err) + } + internalSchema := &apiextensions.JSONSchemaProps{} + err = apiextensionsv1beta1.Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(v1beta1Schema, internalSchema, nil) + if err != nil { + t.Fatal(err) + } + s, err := NewStructural(internalSchema) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(orig, s) { + t.Fatalf("original and result differ: %v", diff.ObjectDiff(orig, s)) + } + } +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go index 6557d88317d..00a8ac93c1b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go @@ -29,6 +29,7 @@ func NewSchemaValidator(customResourceValidation *apiextensions.CustomResourceVa // Convert CRD schema to openapi schema openapiSchema := &spec.Schema{} if customResourceValidation != nil { + // WARNING: do not replace this with Structural.ToGoOpenAPI until it supports nullable. if err := ConvertJSONSchemaProps(customResourceValidation.OpenAPIV3Schema, openapiSchema); err != nil { return nil, nil, err } From 379577768908adbd76ac91ef385d555f4bad4b19 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 9 May 2019 17:51:41 +0200 Subject: [PATCH 103/194] apiextensions: add fuzzer test that NewStructural is complete --- .../pkg/apiserver/schema/convert_test.go | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert_test.go new file mode 100644 index 00000000000..59b487bc46c --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "math/rand" + "reflect" + "testing" + "time" + + fuzz "github.com/google/gofuzz" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" +) + +func TestStructuralRoundtripOrError(t *testing.T) { + f := fuzz.New() + seed := time.Now().UnixNano() + t.Logf("seed = %v", seed) + //seed = int64(1549012506261785182) + f.RandSource(rand.New(rand.NewSource(seed))) + f.Funcs( + func(s *apiextensions.JSON, c fuzz.Continue) { + *s = apiextensions.JSON(map[string]interface{}{"foo": float64(42.2)}) + }, + func(s *apiextensions.JSONSchemaPropsOrArray, c fuzz.Continue) { + c.FuzzNoCustom(s) + if s.Schema != nil { + s.JSONSchemas = nil + } else if s.JSONSchemas == nil { + s.Schema = &apiextensions.JSONSchemaProps{} + } + }, + func(s *apiextensions.JSONSchemaPropsOrBool, c fuzz.Continue) { + c.FuzzNoCustom(s) + if s.Schema != nil { + s.Allows = false + } + }, + func(s **string, c fuzz.Continue) { + c.FuzzNoCustom(s) + if *s != nil && **s == "" { + *s = nil + } + }, + ) + + f.MaxDepth(2) + f.NilChance(0.5) + + for i := 0; i < 10000; i++ { + // fuzz a random field in JSONSchemaProps + origSchema := &apiextensions.JSONSchemaProps{} + x := reflect.ValueOf(origSchema).Elem() + n := rand.Intn(x.NumField()) + if name := x.Type().Field(n).Name; name == "Example" || name == "ExternalDocs" { + // we drop these intentionally + continue + } + f.Fuzz(x.Field(n).Addr().Interface()) + if origSchema.Nullable { + // non-empty type for nullable. nullable:true with empty type does not roundtrip because + // go-openapi does not allow to encode that (we use type slices otherwise). + origSchema.Type = "string" + } + + // it roundtrips or NewStructural errors out. We should never drop anything + orig, err := NewStructural(origSchema) + if err != nil { + continue + } + + // roundtrip through go-openapi, JSON, v1beta1 JSONSchemaProp, internal JSONSchemaProp + goOpenAPI := orig.ToGoOpenAPI() + bs, err := json.Marshal(goOpenAPI) + if err != nil { + t.Fatal(err) + } + str := nullTypeRE.ReplaceAllString(string(bs), `"type":"$1","nullable":true`) // unfold nullable type:[,"null"] -> type:,nullable:true + v1beta1Schema := &apiextensionsv1beta1.JSONSchemaProps{} + err = json.Unmarshal([]byte(str), v1beta1Schema) + if err != nil { + t.Fatal(err) + } + internalSchema := &apiextensions.JSONSchemaProps{} + err = apiextensionsv1beta1.Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(v1beta1Schema, internalSchema, nil) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(origSchema, internalSchema) { + t.Fatalf("original and result differ: %v", diff.ObjectDiff(origSchema, internalSchema)) + } + } +} From 19e29548f6b8886adc21cf59ae667f9f77183b1f Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 9 May 2019 17:07:39 +0200 Subject: [PATCH 104/194] Update generated files --- .../pkg/apiserver/schema/BUILD | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD index 2a8cee01698..34ecd395b82 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD @@ -5,8 +5,10 @@ go_library( srcs = [ "complete.go", "convert.go", + "goopenapi.go", "structural.go", "validation.go", + "visitor.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema", @@ -16,6 +18,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/github.com/go-openapi/spec:go_default_library", ], ) @@ -35,9 +38,17 @@ filegroup( go_test( name = "go_default_test", - srcs = ["validation_test.go"], + srcs = [ + "convert_test.go", + "goopenapi_test.go", + "validation_test.go", + ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", ], From 98de316436503f88204bb8e3eb49e685973d7cbe Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Fri, 10 May 2019 09:05:59 -0400 Subject: [PATCH 105/194] Implement the permit extension point in scheduler. --- pkg/scheduler/framework/v1alpha1/BUILD | 2 + pkg/scheduler/framework/v1alpha1/framework.go | 86 +++++++ pkg/scheduler/framework/v1alpha1/interface.go | 46 +++- .../framework/v1alpha1/waiting_pods_map.go | 109 ++++++++ pkg/scheduler/scheduler.go | 19 ++ test/integration/scheduler/framework_test.go | 242 +++++++++++++++++- 6 files changed, 496 insertions(+), 8 deletions(-) create mode 100644 pkg/scheduler/framework/v1alpha1/waiting_pods_map.go diff --git a/pkg/scheduler/framework/v1alpha1/BUILD b/pkg/scheduler/framework/v1alpha1/BUILD index a258da876f9..98cdc3565f3 100644 --- a/pkg/scheduler/framework/v1alpha1/BUILD +++ b/pkg/scheduler/framework/v1alpha1/BUILD @@ -7,6 +7,7 @@ go_library( "framework.go", "interface.go", "registry.go", + "waiting_pods_map.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", visibility = ["//visibility:public"], @@ -14,6 +15,7 @@ go_library( "//pkg/scheduler/internal/cache:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/framework/v1alpha1/framework.go b/pkg/scheduler/framework/v1alpha1/framework.go index d4be81c9c27..752c98cd631 100644 --- a/pkg/scheduler/framework/v1alpha1/framework.go +++ b/pkg/scheduler/framework/v1alpha1/framework.go @@ -18,9 +18,11 @@ package v1alpha1 import ( "fmt" + "time" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) @@ -30,12 +32,19 @@ import ( type framework struct { registry Registry nodeInfoSnapshot *cache.NodeInfoSnapshot + waitingPods *waitingPodsMap plugins map[string]Plugin // a map of initialized plugins. Plugin name:plugin instance. reservePlugins []ReservePlugin prebindPlugins []PrebindPlugin unreservePlugins []UnreservePlugin + permitPlugins []PermitPlugin } +const ( + // Specifies the maximum timeout a permit plugin can return. + maxTimeout time.Duration = 15 * time.Minute +) + var _ = Framework(&framework{}) // NewFramework initializes plugins given the configuration and the registry. @@ -44,6 +53,7 @@ func NewFramework(r Registry, _ *runtime.Unknown) (Framework, error) { registry: r, nodeInfoSnapshot: cache.NewNodeInfoSnapshot(), plugins: make(map[string]Plugin), + waitingPods: newWaitingPodsMap(), } // TODO: The framework needs to read the scheduler config and initialize only @@ -68,6 +78,9 @@ func NewFramework(r Registry, _ *runtime.Unknown) (Framework, error) { if up, ok := p.(UnreservePlugin); ok { f.unreservePlugins = append(f.unreservePlugins, up) } + if pr, ok := p.(PermitPlugin); ok { + f.permitPlugins = append(f.permitPlugins, pr) + } } return f, nil } @@ -117,6 +130,69 @@ func (f *framework) RunUnreservePlugins( } } +// RunPermitPlugins runs the set of configured permit plugins. If any of these +// plugins returns a status other than "Success" or "Wait", it does not continue +// running the remaining plugins and returns an error. Otherwise, if any of the +// plugins returns "Wait", then this function will block for the timeout period +// returned by the plugin, if the time expires, then it will return an error. +// Note that if multiple plugins asked to wait, then we wait for the minimum +// timeout duration. +func (f *framework) RunPermitPlugins( + pc *PluginContext, pod *v1.Pod, nodeName string) *Status { + timeout := maxTimeout + statusCode := Success + for _, pl := range f.permitPlugins { + status, d := pl.Permit(pc, pod, nodeName) + if !status.IsSuccess() { + if status.Code() == Unschedulable { + msg := fmt.Sprintf("rejected by %v at permit: %v", pl.Name(), status.Message()) + klog.V(4).Infof(msg) + return NewStatus(status.Code(), msg) + } + if status.Code() == Wait { + // Use the minimum timeout duration. + if timeout > d { + timeout = d + } + statusCode = Wait + } else { + msg := fmt.Sprintf("error while running %v permit plugin for pod %v: %v", pl.Name(), pod.Name, status.Message()) + klog.Error(msg) + return NewStatus(Error, msg) + } + } + } + + // We now wait for the minimum duration if at least one plugin asked to + // wait (and no plugin rejected the pod) + if statusCode == Wait { + w := newWaitingPod(pod) + f.waitingPods.add(w) + defer f.waitingPods.remove(pod.UID) + timer := time.NewTimer(timeout) + klog.V(4).Infof("waiting for %v for pod %v at permit", timeout, pod.Name) + select { + case <-timer.C: + msg := fmt.Sprintf("pod %v rejected due to timeout after waiting %v at permit", pod.Name, timeout) + klog.V(4).Infof(msg) + return NewStatus(Unschedulable, msg) + case s := <-w.s: + if !s.IsSuccess() { + if s.Code() == Unschedulable { + msg := fmt.Sprintf("rejected while waiting at permit: %v", s.Message()) + klog.V(4).Infof(msg) + return NewStatus(s.Code(), msg) + } + msg := fmt.Sprintf("error received while waiting at permit for pod %v: %v", pod.Name, s.Message()) + klog.Error(msg) + return NewStatus(Error, msg) + } + } + } + + return nil +} + // NodeInfoSnapshot returns the latest NodeInfo snapshot. The snapshot // is taken at the beginning of a scheduling cycle and remains unchanged until a // pod finishes "Reserve". There is no guarantee that the information remains @@ -124,3 +200,13 @@ func (f *framework) RunUnreservePlugins( func (f *framework) NodeInfoSnapshot() *cache.NodeInfoSnapshot { return f.nodeInfoSnapshot } + +// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. +func (f *framework) IterateOverWaitingPods(callback func(WaitingPod)) { + f.waitingPods.iterate(callback) +} + +// GetWaitingPod returns a reference to a WaitingPod given its UID. +func (f *framework) GetWaitingPod(uid types.UID) WaitingPod { + return f.waitingPods.get(uid) +} diff --git a/pkg/scheduler/framework/v1alpha1/interface.go b/pkg/scheduler/framework/v1alpha1/interface.go index a3764adf616..e8ce959b512 100644 --- a/pkg/scheduler/framework/v1alpha1/interface.go +++ b/pkg/scheduler/framework/v1alpha1/interface.go @@ -20,8 +20,10 @@ package v1alpha1 import ( "errors" + "time" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) @@ -38,6 +40,8 @@ const ( // Unschedulable is used when a plugin finds a pod unschedulable. // The accompanying status message should explain why the pod is unschedulable. Unschedulable Code = 2 + // Wait is used when a permit plugin finds a pod scheduling should wait. + Wait Code = 3 ) // Status indicates the result of running a plugin. It consists of a code and a @@ -86,6 +90,18 @@ func NewStatus(code Code, msg string) *Status { } } +// WaitingPod represents a pod currently waiting in the permit phase. +type WaitingPod interface { + // GetPod returns a reference to the waiting pod. + GetPod() *v1.Pod + // Allow the waiting pod to be scheduled. Returns true if the allow signal was + // successfully delivered, false otherwise. + Allow() bool + // Reject declares the waiting pod unschedulable. Returns true if the allow signal + // was successfully delivered, false otherwise. + Reject(msg string) bool +} + // Plugin is the parent type for all the scheduling framework plugins. type Plugin interface { Name() string @@ -105,7 +121,7 @@ type ReservePlugin interface { } // PrebindPlugin is an interface that must be implemented by "prebind" plugins. -// These plugins are called before a pod being scheduled +// These plugins are called before a pod being scheduled. type PrebindPlugin interface { Plugin // Prebind is called before binding a pod. All prebind plugins must return @@ -124,6 +140,19 @@ type UnreservePlugin interface { Unreserve(pc *PluginContext, p *v1.Pod, nodeName string) } +// PermitPlugin is an interface that must be implemented by "permit" plugins. +// These plugins are called before a pod is bound to a node. +type PermitPlugin interface { + Plugin + // Permit is called before binding a pod (and before prebind plugins). Permit + // plugins are used to prevent or delay the binding of a Pod. A permit plugin + // must return success or wait with timeout duration, or the pod will be rejected. + // The pod will also be rejected if the wait timeout or the pod is rejected while + // waiting. Note that if the plugin returns "wait", the framework will wait only + // after running the remaining plugins given that no other plugin rejects the pod. + Permit(pc *PluginContext, p *v1.Pod, nodeName string) (*Status, time.Duration) +} + // Framework manages the set of plugins in use by the scheduling framework. // Configured plugins are called at specified points in a scheduling context. type Framework interface { @@ -142,6 +171,15 @@ type Framework interface { // RunUnreservePlugins runs the set of configured unreserve plugins. RunUnreservePlugins(pc *PluginContext, pod *v1.Pod, nodeName string) + + // RunPermitPlugins runs the set of configured permit plugins. If any of these + // plugins returns a status other than "Success" or "Wait", it does not continue + // running the remaining plugins and returns an error. Otherwise, if any of the + // plugins returns "Wait", then this function will block for the timeout period + // returned by the plugin, if the time expires, then it will return an error. + // Note that if multiple plugins asked to wait, then we wait for the minimum + // timeout duration. + RunPermitPlugins(pc *PluginContext, pod *v1.Pod, nodeName string) *Status } // FrameworkHandle provides data and some tools that plugins can use. It is @@ -153,4 +191,10 @@ type FrameworkHandle interface { // a pod finishes "Reserve" point. There is no guarantee that the information // remains unchanged in the binding phase of scheduling. NodeInfoSnapshot() *internalcache.NodeInfoSnapshot + + // IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. + IterateOverWaitingPods(callback func(WaitingPod)) + + // GetWaitingPod returns a waiting pod given its UID. + GetWaitingPod(uid types.UID) WaitingPod } diff --git a/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go b/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go new file mode 100644 index 00000000000..842eff5e538 --- /dev/null +++ b/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// waitingPodsMap a thread-safe map used to maintain pods waiting in the permit phase. +type waitingPodsMap struct { + pods map[types.UID]WaitingPod + mu sync.RWMutex +} + +// newWaitingPodsMap returns a new waitingPodsMap. +func newWaitingPodsMap() *waitingPodsMap { + return &waitingPodsMap{ + pods: make(map[types.UID]WaitingPod), + } +} + +// add a new WaitingPod to the map. +func (m *waitingPodsMap) add(wp WaitingPod) { + m.mu.Lock() + defer m.mu.Unlock() + m.pods[wp.GetPod().UID] = wp +} + +// remove a WaitingPod from the map. +func (m *waitingPodsMap) remove(uid types.UID) { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.pods, uid) +} + +// get a WaitingPod from the map. +func (m *waitingPodsMap) get(uid types.UID) WaitingPod { + m.mu.RLock() + defer m.mu.RUnlock() + return m.pods[uid] + +} + +// iterate acquires a read lock and iterates over the WaitingPods map. +func (m *waitingPodsMap) iterate(callback func(WaitingPod)) { + m.mu.RLock() + defer m.mu.RUnlock() + for _, v := range m.pods { + callback(v) + } +} + +// waitingPod represents a pod waiting in the permit phase. +type waitingPod struct { + pod *v1.Pod + s chan *Status +} + +// newWaitingPod returns a new waitingPod instance. +func newWaitingPod(pod *v1.Pod) *waitingPod { + return &waitingPod{ + pod: pod, + s: make(chan *Status), + } +} + +// GetPod returns a reference to the waiting pod. +func (w *waitingPod) GetPod() *v1.Pod { + return w.pod +} + +// Allow the waiting pod to be scheduled. Returns true if the allow signal was +// successfully delivered, false otherwise. +func (w *waitingPod) Allow() bool { + select { + case w.s <- NewStatus(Success, ""): + return true + default: + return false + } +} + +// Reject declares the waiting pod unschedulable. Returns true if the allow signal +// was successfully delivered, false otherwise. +func (w *waitingPod) Reject(msg string) bool { + select { + case w.s <- NewStatus(Unschedulable, msg): + return true + default: + return false + } +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 770866cb2ef..cf411188a13 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -533,6 +533,25 @@ func (sched *Scheduler) scheduleOne() { } } + // Run "permit" plugins. + permitStatus := fwk.RunPermitPlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) + if !permitStatus.IsSuccess() { + var reason string + if permitStatus.Code() == framework.Unschedulable { + reason = v1.PodReasonUnschedulable + } else { + metrics.PodScheduleErrors.Inc() + reason = SchedulerError + } + if forgetErr := sched.Cache().ForgetPod(assumedPod); forgetErr != nil { + klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) + } + sched.recordSchedulingFailure(assumedPod, permitStatus.AsError(), reason, permitStatus.Message()) + // trigger un-reserve plugins to clean up state associated with the reserved Pod + fwk.RunUnreservePlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) + return + } + // Run "prebind" plugins. prebindStatus := fwk.RunPrebindPlugins(pluginContext, assumedPod, scheduleResult.SuggestedHost) if !prebindStatus.IsSuccess() { diff --git a/test/integration/scheduler/framework_test.go b/test/integration/scheduler/framework_test.go index 4b55e36a3b1..81e07cb4ac6 100644 --- a/test/integration/scheduler/framework_test.go +++ b/test/integration/scheduler/framework_test.go @@ -30,12 +30,18 @@ import ( // TesterPlugin is common ancestor for a test plugin that allows injection of // failures and some other test functionalities. type TesterPlugin struct { - numReserveCalled int - numPrebindCalled int - numUnreserveCalled int - failReserve bool - failPrebind bool - rejectPrebind bool + numReserveCalled int + numPrebindCalled int + numUnreserveCalled int + failReserve bool + failPrebind bool + rejectPrebind bool + numPermitCalled int + failPermit bool + rejectPermit bool + timeoutPermit bool + waitAndRejectPermit bool + waitAndAllowPermit bool } type ReservePlugin struct { @@ -50,15 +56,22 @@ type UnreservePlugin struct { TesterPlugin } +type PermitPlugin struct { + TesterPlugin + fh framework.FrameworkHandle +} + const ( reservePluginName = "reserve-plugin" prebindPluginName = "prebind-plugin" unreservePluginName = "unreserve-plugin" + permitPluginName = "permit-plugin" ) var _ = framework.ReservePlugin(&ReservePlugin{}) var _ = framework.PrebindPlugin(&PrebindPlugin{}) var _ = framework.UnreservePlugin(&UnreservePlugin{}) +var _ = framework.PermitPlugin(&PermitPlugin{}) // Name returns name of the plugin. func (rp *ReservePlugin) Name() string { @@ -134,6 +147,55 @@ func NewUnreservePlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framew return unresPlugin, nil } +var perPlugin = &PermitPlugin{} + +// Name returns name of the plugin. +func (pp *PermitPlugin) Name() string { + return permitPluginName +} + +// Permit implements the permit test plugin. +func (pp *PermitPlugin) Permit(pc *framework.PluginContext, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) { + pp.numPermitCalled++ + if pp.failPermit { + return framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name)), 0 + } + if pp.rejectPermit { + return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("reject pod %v", pod.Name)), 0 + } + if pp.timeoutPermit { + return framework.NewStatus(framework.Wait, ""), 3 * time.Second + } + if pp.waitAndRejectPermit || pp.waitAndAllowPermit { + if pod.Name == "waiting-pod" { + return framework.NewStatus(framework.Wait, ""), 30 * time.Second + } + // This is the signalling pod, wait until the waiting-pod is actually waiting and then either reject or allow it. + wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) { + w := false + pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true }) + return w, nil + }) + if pp.waitAndRejectPermit { + pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { + wp.Reject(fmt.Sprintf("reject pod %v", wp.GetPod().Name)) + }) + return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("reject pod %v", pod.Name)), 0 + } + if pp.waitAndAllowPermit { + pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { wp.Allow() }) + return nil, 0 + } + } + return nil, 0 +} + +// NewPermitPlugin is the factory for permit plugin. +func NewPermitPlugin(_ *runtime.Unknown, fh framework.FrameworkHandle) (framework.Plugin, error) { + perPlugin.fh = fh + return perPlugin, nil +} + // TestReservePlugin tests invocation of reserve plugins. func TestReservePlugin(t *testing.T) { // Create a plugin registry for testing. Register only a reserve plugin. @@ -181,7 +243,7 @@ func TestReservePlugin(t *testing.T) { // TestPrebindPlugin tests invocation of prebind plugins. func TestPrebindPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a reserve plugin. + // Create a plugin registry for testing. Register only a prebind plugin. registry := framework.Registry{prebindPluginName: NewPrebindPlugin} // Create the master and the scheduler with the test plugin set. @@ -336,3 +398,169 @@ func TestUnreservePlugin(t *testing.T) { cleanupPods(cs, t, []*v1.Pod{pod}) } } + +// TestPermitPlugin tests invocation of permit plugins. +func TestPermitPlugin(t *testing.T) { + // Create a plugin registry for testing. Register only a permit plugin. + registry := framework.Registry{permitPluginName: NewPermitPlugin} + + // Create the master and the scheduler with the test plugin set. + context := initTestSchedulerWithOptions(t, + initTestMaster(t, "permit-plugin", nil), + false, nil, registry, false, time.Second) + defer cleanupTest(t, context) + + cs := context.clientSet + // Add a few nodes. + _, err := createNodes(cs, "test-node", nil, 2) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) + } + + tests := []struct { + fail bool + reject bool + timeout bool + }{ + { + fail: false, + reject: false, + timeout: false, + }, + { + fail: true, + reject: false, + timeout: false, + }, + { + fail: false, + reject: true, + timeout: false, + }, + { + fail: true, + reject: true, + timeout: false, + }, + { + fail: false, + reject: false, + timeout: true, + }, + { + fail: false, + reject: false, + timeout: true, + }, + } + + for i, test := range tests { + perPlugin.failPermit = test.fail + perPlugin.rejectPermit = test.reject + perPlugin.timeoutPermit = test.timeout + perPlugin.waitAndRejectPermit = false + perPlugin.waitAndAllowPermit = false + // Create a best effort pod. + pod, err := createPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating a test pod: %v", err) + } + if test.fail { + if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(cs, pod.Namespace, pod.Name)); err != nil { + t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) + } + } else { + if test.reject || test.timeout { + if err = waitForPodUnschedulable(cs, pod); err != nil { + t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err) + } + } else { + if err = waitForPodToSchedule(cs, pod); err != nil { + t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) + } + } + } + + if perPlugin.numPermitCalled == 0 { + t.Errorf("Expected the permit plugin to be called.") + } + + cleanupPods(cs, t, []*v1.Pod{pod}) + } +} + +// TestCoSchedulingWithPermitPlugin tests invocation of permit plugins. +func TestCoSchedulingWithPermitPlugin(t *testing.T) { + // Create a plugin registry for testing. Register only a permit plugin. + registry := framework.Registry{permitPluginName: NewPermitPlugin} + + // Create the master and the scheduler with the test plugin set. + context := initTestSchedulerWithOptions(t, + initTestMaster(t, "permit-plugin", nil), + false, nil, registry, false, time.Second) + defer cleanupTest(t, context) + + cs := context.clientSet + // Add a few nodes. + _, err := createNodes(cs, "test-node", nil, 2) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) + } + + tests := []struct { + waitReject bool + waitAllow bool + }{ + { + waitReject: true, + waitAllow: false, + }, + { + waitReject: false, + waitAllow: true, + }, + } + + for i, test := range tests { + perPlugin.failPermit = false + perPlugin.rejectPermit = false + perPlugin.timeoutPermit = false + perPlugin.waitAndRejectPermit = test.waitReject + perPlugin.waitAndAllowPermit = test.waitAllow + + // Create two pods. + waitingPod, err := createPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "waiting-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating the waiting pod: %v", err) + } + signallingPod, err := createPausePod(cs, + initPausePod(cs, &pausePodConfig{Name: "signalling-pod", Namespace: context.ns.Name})) + if err != nil { + t.Errorf("Error while creating the signalling pod: %v", err) + } + + if test.waitReject { + if err = waitForPodUnschedulable(cs, waitingPod); err != nil { + t.Errorf("test #%v: Didn't expect the waiting pod to be scheduled. error: %v", i, err) + } + if err = waitForPodUnschedulable(cs, signallingPod); err != nil { + t.Errorf("test #%v: Didn't expect the signalling pod to be scheduled. error: %v", i, err) + } + } else { + if err = waitForPodToSchedule(cs, waitingPod); err != nil { + t.Errorf("test #%v: Expected the waiting pod to be scheduled. error: %v", i, err) + } + if err = waitForPodToSchedule(cs, signallingPod); err != nil { + t.Errorf("test #%v: Expected the signalling pod to be scheduled. error: %v", i, err) + } + } + + if perPlugin.numPermitCalled == 0 { + t.Errorf("Expected the permit plugin to be called.") + } + + cleanupPods(cs, t, []*v1.Pod{waitingPod, signallingPod}) + } +} From d9e4933da3e3a8b443dbc529a5b07813aec049fe Mon Sep 17 00:00:00 2001 From: "Christopher M. Luciano" Date: Wed, 1 May 2019 14:36:27 -0400 Subject: [PATCH 106/194] ingress: migrate extensions.Ingress to networking.Ingress This is a find/replace within my editor. I made the import networkingv1beta1 so that it will be easier to replace for the future v1 migration. Signed-off-by: Christopher M. Luciano --- pkg/kubectl/describe/versioned/describe.go | 12 ++-- test/e2e/framework/ingress/BUILD | 2 +- test/e2e/framework/ingress/ingress_utils.go | 70 ++++++++++----------- test/e2e/manifest/BUILD | 4 +- test/e2e/manifest/manifest.go | 10 +-- test/e2e/manifest/manifest_test.go | 4 +- test/e2e/network/ingress.go | 4 +- test/e2e/network/scale/BUILD | 2 +- test/e2e/network/scale/ingress.go | 42 ++++++------- 9 files changed, 75 insertions(+), 75 deletions(-) diff --git a/pkg/kubectl/describe/versioned/describe.go b/pkg/kubectl/describe/versioned/describe.go index f9dc8101c49..1c67f5406f2 100644 --- a/pkg/kubectl/describe/versioned/describe.go +++ b/pkg/kubectl/describe/versioned/describe.go @@ -2291,7 +2291,7 @@ type IngressDescriber struct { } func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := i.ExtensionsV1beta1().Ingresses(namespace) + c := i.NetworkingV1beta1().Ingresses(namespace) ing, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2299,7 +2299,7 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings de return i.describeIngress(ing, describerSettings) } -func (i *IngressDescriber) describeBackend(ns string, backend *extensionsv1beta1.IngressBackend) string { +func (i *IngressDescriber) describeBackend(ns string, backend *networkingv1beta1.IngressBackend) string { endpoints, _ := i.CoreV1().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) service, _ := i.CoreV1().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) spName := "" @@ -2319,7 +2319,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensionsv1beta1 return formatEndpoints(endpoints, sets.NewString(spName)) } -func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) { +func (i *IngressDescriber) describeIngress(ing *networkingv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%v\n", ing.Name) @@ -2330,7 +2330,7 @@ func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, descr if def == nil { // Ingresses that don't specify a default backend inherit the // default backend in the kube-system namespace. - def = &extensionsv1beta1.IngressBackend{ + def = &networkingv1beta1.IngressBackend{ ServiceName: "default-http-backend", ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, } @@ -2372,7 +2372,7 @@ func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, descr }) } -func describeIngressTLS(w PrefixWriter, ingTLS []extensionsv1beta1.IngressTLS) { +func describeIngressTLS(w PrefixWriter, ingTLS []networkingv1beta1.IngressTLS) { w.Write(LEVEL_0, "TLS:\n") for _, t := range ingTLS { if t.SecretName == "" { @@ -4494,7 +4494,7 @@ func extractCSRStatus(csr *certificatesv1beta1.CertificateSigningRequest) (strin } // backendStringer behaves just like a string interface and converts the given backend to a string. -func backendStringer(backend *extensionsv1beta1.IngressBackend) string { +func backendStringer(backend *networkingv1beta1.IngressBackend) string { if backend == nil { return "" } diff --git a/test/e2e/framework/ingress/BUILD b/test/e2e/framework/ingress/BUILD index 4df49ca9f3e..6b5b8238b69 100644 --- a/test/e2e/framework/ingress/BUILD +++ b/test/e2e/framework/ingress/BUILD @@ -8,7 +8,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/networking/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index ec8cba5f9d2..c7ec2195b23 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -39,7 +39,7 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -186,8 +186,8 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath), func() { var pathToFail string - jig.Update(func(ing *extensions.Ingress) { - newRules := []extensions.IngressRule{} + jig.Update(func(ing *networkingv1beta1.Ingress) { + newRules := []networkingv1beta1.IngressRule{} for _, rule := range ing.Spec.Rules { if rule.Host != updateURLMapHost { newRules = append(newRules, rule) @@ -195,11 +195,11 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri } existingPath := rule.IngressRuleValue.HTTP.Paths[0] pathToFail = existingPath.Path - newRules = append(newRules, extensions.IngressRule{ + newRules = append(newRules, networkingv1beta1.IngressRule{ Host: updateURLMapHost, - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ { Path: updateURLMapPath, Backend: existingPath.Backend, @@ -223,14 +223,14 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri tests = append(tests, ConformanceTests{ fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost), func() { - jig.Update(func(ing *extensions.Ingress) { - newRules := []extensions.IngressRule{} + jig.Update(func(ing *networkingv1beta1.Ingress) { + newRules := []networkingv1beta1.IngressRule{} for _, rule := range ing.Spec.Rules { if rule.Host != tlsHost { newRules = append(newRules, rule) continue } - newRules = append(newRules, extensions.IngressRule{ + newRules = append(newRules, networkingv1beta1.IngressRule{ Host: updatedTLSHost, IngressRuleValue: rule.IngressRuleValue, }) @@ -368,7 +368,7 @@ type TestJig struct { RootCAs map[string][]byte Address string - Ingress *extensions.Ingress + Ingress *networkingv1beta1.Ingress // class is the value of the annotation keyed under // `kubernetes.io/ingress.class`. It's added to all ingresses created by // this jig. @@ -436,9 +436,9 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri } // runCreate runs the required command to create the given ingress. -func (j *TestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress, error) { +func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(ing) } // Use kubemci to create a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -450,9 +450,9 @@ func (j *TestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress, error } // runUpdate runs the required command to update the given ingress. -func (j *TestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress, error) { +func (j *TestJig) runUpdate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Update(ing) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(ing) } // Use kubemci to update a multicluster ingress. // kubemci does not have an update command. We use "create --force" to update an existing ingress. @@ -465,11 +465,11 @@ func (j *TestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress, error } // Update retrieves the ingress, performs the passed function, and then updates it. -func (j *TestJig) Update(update func(ing *extensions.Ingress)) { +func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) { var err error ns, name := j.Ingress.Namespace, j.Ingress.Name for i := 0; i < 3; i++ { - j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) if err != nil { framework.Failf("failed to get ingress %s/%s: %v", ns, name, err) } @@ -493,8 +493,8 @@ func (j *TestJig) AddHTTPS(secretName string, hosts ...string) { _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to also use secret %v for TLS termination", j.Ingress.Name, secretName) - j.Update(func(ing *extensions.Ingress) { - ing.Spec.TLS = append(ing.Spec.TLS, extensions.IngressTLS{Hosts: hosts, SecretName: secretName}) + j.Update(func(ing *networkingv1beta1.Ingress) { + ing.Spec.TLS = append(ing.Spec.TLS, networkingv1beta1.IngressTLS{Hosts: hosts, SecretName: secretName}) }) j.RootCAs[secretName] = cert } @@ -504,8 +504,8 @@ func (j *TestJig) SetHTTPS(secretName string, hosts ...string) { _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName) - j.Update(func(ing *extensions.Ingress) { - ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}} + j.Update(func(ing *networkingv1beta1.Ingress) { + ing.Spec.TLS = []networkingv1beta1.IngressTLS{{Hosts: hosts, SecretName: secretName}} }) j.RootCAs = map[string][]byte{secretName: cert} } @@ -513,14 +513,14 @@ func (j *TestJig) SetHTTPS(secretName string, hosts ...string) { // RemoveHTTPS updates the ingress to not use this secret for TLS. // Note: Does not delete the secret. func (j *TestJig) RemoveHTTPS(secretName string) { - newTLS := []extensions.IngressTLS{} + newTLS := []networkingv1beta1.IngressTLS{} for _, ingressTLS := range j.Ingress.Spec.TLS { if secretName != ingressTLS.SecretName { newTLS = append(newTLS, ingressTLS) } } j.Logger.Infof("Updating ingress %v to not use secret %v for TLS termination", j.Ingress.Name, secretName) - j.Update(func(ing *extensions.Ingress) { + j.Update(func(ing *networkingv1beta1.Ingress) { ing.Spec.TLS = newTLS }) delete(j.RootCAs, secretName) @@ -551,16 +551,16 @@ func (j *TestJig) TryDeleteIngress() { j.tryDeleteGivenIngress(j.Ingress) } -func (j *TestJig) tryDeleteGivenIngress(ing *extensions.Ingress) { +func (j *TestJig) tryDeleteGivenIngress(ing *networkingv1beta1.Ingress) { if err := j.runDelete(ing); err != nil { j.Logger.Infof("Error while deleting the ingress %v/%v with class %s: %v", ing.Namespace, ing.Name, j.Class, err) } } // runDelete runs the required command to delete the given ingress. -func (j *TestJig) runDelete(ing *extensions.Ingress) error { +func (j *TestJig) runDelete(ing *networkingv1beta1.Ingress) error { if j.Class != MulticlusterIngressClassValue { - return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil) } // Use kubemci to delete a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -600,7 +600,7 @@ func getIngressAddress(client clientset.Interface, ns, name, class string) ([]st if class == MulticlusterIngressClassValue { return getIngressAddressFromKubemci(name) } - ing, err := client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + ing, err := client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -635,7 +635,7 @@ func (j *TestJig) WaitForIngressAddress(c clientset.Interface, ns, ingName strin return address, err } -func (j *TestJig) pollIngressWithCert(ing *extensions.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error { +func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error { // Check that all rules respond to a simple GET. knownHostsSet := sets.NewString(knownHosts...) for _, rules := range ing.Spec.Rules { @@ -695,7 +695,7 @@ func (j *TestJig) WaitForIngressToStable() { // http or https). If waitForNodePort is true, the NodePort of the Service // is verified before verifying the Ingress. NodePort is currently a // requirement for cloudprovider Ingress. -func (j *TestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress, waitForNodePort bool, timeout time.Duration) error { +func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1beta1.Ingress, waitForNodePort bool, timeout time.Duration) error { // Wait for the loadbalancer IP. address, err := j.WaitForIngressAddress(j.Client, ing.Namespace, ing.Name, timeout) if err != nil { @@ -864,15 +864,15 @@ func (cont *NginxIngressController) Init() { framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP) } -func generateBacksideHTTPSIngressSpec(ns string) *extensions.Ingress { - return &extensions.Ingress{ +func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress { + return &networkingv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "echoheaders-https", Namespace: ns, }, - Spec: extensions.IngressSpec{ + Spec: networkingv1beta1.IngressSpec{ // Note kubemci requires a default backend. - Backend: &extensions.IngressBackend{ + Backend: &networkingv1beta1.IngressBackend{ ServiceName: "echoheaders-https", ServicePort: intstr.IntOrString{ Type: intstr.Int, @@ -939,7 +939,7 @@ func generateBacksideHTTPSDeploymentSpec() *apps.Deployment { } // SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured. -func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *extensions.Ingress, error) { +func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) { deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec()) if err != nil { return nil, nil, nil, err @@ -963,7 +963,7 @@ func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace st } // DeleteTestResource deletes given deployment, service and ingress. -func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *extensions.Ingress) []error { +func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *networkingv1beta1.Ingress) []error { var errs []error if ing != nil { if err := j.runDelete(ing); err != nil { diff --git a/test/e2e/manifest/BUILD b/test/e2e/manifest/BUILD index 93433648e45..4ee823ecd62 100644 --- a/test/e2e/manifest/BUILD +++ b/test/e2e/manifest/BUILD @@ -13,7 +13,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/networking/v1beta1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -41,5 +41,5 @@ go_test( name = "go_default_test", srcs = ["manifest_test.go"], embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/api/extensions/v1beta1:go_default_library"], + deps = ["//staging/src/k8s.io/api/networking/v1beta1:go_default_library"], ) diff --git a/test/e2e/manifest/manifest.go b/test/e2e/manifest/manifest.go index 0b0da3930fa..6e66e1aed92 100644 --- a/test/e2e/manifest/manifest.go +++ b/test/e2e/manifest/manifest.go @@ -22,7 +22,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -87,8 +87,8 @@ func SvcFromManifest(fileName string) (*v1.Service, error) { } // IngressFromManifest reads a .json/yaml file and returns the ingress in it. -func IngressFromManifest(fileName string) (*extensions.Ingress, error) { - var ing extensions.Ingress +func IngressFromManifest(fileName string) (*networkingv1beta1.Ingress, error) { + var ing networkingv1beta1.Ingress data, err := testfiles.Read(fileName) if err != nil { return nil, err @@ -106,8 +106,8 @@ func IngressFromManifest(fileName string) (*extensions.Ingress, error) { // IngressToManifest generates a yaml file in the given path with the given ingress. // Assumes that a directory exists at the given path. -func IngressToManifest(ing *extensions.Ingress, path string) error { - serialized, err := marshalToYaml(ing, extensions.SchemeGroupVersion) +func IngressToManifest(ing *networkingv1beta1.Ingress, path string) error { + serialized, err := marshalToYaml(ing, networkingv1beta1.SchemeGroupVersion) if err != nil { return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err) } diff --git a/test/e2e/manifest/manifest_test.go b/test/e2e/manifest/manifest_test.go index 91ac882d5ba..6eeac5382e6 100644 --- a/test/e2e/manifest/manifest_test.go +++ b/test/e2e/manifest/manifest_test.go @@ -22,11 +22,11 @@ import ( "path/filepath" "testing" - extensions "k8s.io/api/extensions/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" ) func TestIngressToManifest(t *testing.T) { - ing := &extensions.Ingress{} + ing := &networkingv1beta1.Ingress{} // Create a temp dir. tmpDir, err := ioutil.TempDir("", "kubemci") if err != nil { diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index ea11aa7024b..7279ac0cec1 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -170,7 +170,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { - ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations if annotations == nil || annotations[instanceGroupAnnotation] == "" { @@ -193,7 +193,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { scKey := ingress.StatusPrefix + "/ssl-cert" beKey := ingress.StatusPrefix + "/backends" wait.Poll(2*time.Second, time.Minute, func() (bool, error) { - ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" || diff --git a/test/e2e/network/scale/BUILD b/test/e2e/network/scale/BUILD index 5bfd0cee49c..ac3073fff90 100644 --- a/test/e2e/network/scale/BUILD +++ b/test/e2e/network/scale/BUILD @@ -8,7 +8,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/networking/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 4b68bfe0391..6f4c342272c 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -24,7 +24,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" clientset "k8s.io/client-go/kubernetes" @@ -74,7 +74,7 @@ type IngressScaleFramework struct { ScaleTestDeploy *apps.Deployment ScaleTestSvcs []*v1.Service - ScaleTestIngs []*extensions.Ingress + ScaleTestIngs []*networkingv1beta1.Ingress // BatchCreateLatencies stores all ingress creation latencies, in different // batches. @@ -121,7 +121,7 @@ func (f *IngressScaleFramework) PrepareScaleTest() error { } f.ScaleTestSvcs = []*v1.Service{} - f.ScaleTestIngs = []*extensions.Ingress{} + f.ScaleTestIngs = []*networkingv1beta1.Ingress{} return nil } @@ -133,7 +133,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { f.Logger.Infof("Cleaning up ingresses...") for _, ing := range f.ScaleTestIngs { if ing != nil { - if err := f.Clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil { + if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil { errs = append(errs, fmt.Errorf("Error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) } } @@ -190,7 +190,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { numIngsToCreate := numIngsNeeded - numIngsCreated ingWg.Add(numIngsToCreate) svcQueue := make(chan *v1.Service, numIngsToCreate) - ingQueue := make(chan *extensions.Ingress, numIngsToCreate) + ingQueue := make(chan *networkingv1beta1.Ingress, numIngsToCreate) errQueue := make(chan error, numIngsToCreate) latencyQueue := make(chan time.Duration, numIngsToCreate) start := time.Now() @@ -270,14 +270,14 @@ func (f *IngressScaleFramework) RunScaleTest() []error { f.StepCreateLatencies = append(f.StepCreateLatencies, elapsed) f.Logger.Infof("Updating ingress and wait for change to take effect") - ingToUpdate, err := f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Get(ingCreated.Name, metav1.GetOptions{}) + ingToUpdate, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Get(ingCreated.Name, metav1.GetOptions{}) if err != nil { errs = append(errs, err) return } addTestPathToIngress(ingToUpdate) start = time.Now() - ingToUpdate, err = f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingToUpdate) + ingToUpdate, err = f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Update(ingToUpdate) if err != nil { errs = append(errs, err) return @@ -357,45 +357,45 @@ func (f *IngressScaleFramework) GetFormattedLatencies() string { return res } -func addTestPathToIngress(ing *extensions.Ingress) { +func addTestPathToIngress(ing *networkingv1beta1.Ingress) { ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append( ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths, - extensions.HTTPIngressPath{ + networkingv1beta1.HTTPIngressPath{ Path: "/test", Backend: ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend, }) } -func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *extensions.Ingress, error) { +func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *networkingv1beta1.Ingress, error) { svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(generateScaleTestServiceSpec(suffix)) if err != nil { return nil, nil, err } - ingCreated, err := f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Create(generateScaleTestIngressSpec(suffix, enableTLS)) + ingCreated, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Create(generateScaleTestIngressSpec(suffix, enableTLS)) if err != nil { return nil, nil, err } return svcCreated, ingCreated, nil } -func generateScaleTestIngressSpec(suffix string, enableTLS bool) *extensions.Ingress { - ing := &extensions.Ingress{ +func generateScaleTestIngressSpec(suffix string, enableTLS bool) *networkingv1beta1.Ingress { + ing := &networkingv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", scaleTestIngressNamePrefix, suffix), }, - Spec: extensions.IngressSpec{ - TLS: []extensions.IngressTLS{ + Spec: networkingv1beta1.IngressSpec{ + TLS: []networkingv1beta1.IngressTLS{ {SecretName: scaleTestSecretName}, }, - Rules: []extensions.IngressRule{ + Rules: []networkingv1beta1.IngressRule{ { Host: scaleTestHostname, - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{ { Path: "/scale", - Backend: extensions.IngressBackend{ + Backend: networkingv1beta1.IngressBackend{ ServiceName: fmt.Sprintf("%s-%s", scaleTestBackendName, suffix), ServicePort: intstr.IntOrString{ Type: intstr.Int, @@ -411,7 +411,7 @@ func generateScaleTestIngressSpec(suffix string, enableTLS bool) *extensions.Ing }, } if enableTLS { - ing.Spec.TLS = []extensions.IngressTLS{ + ing.Spec.TLS = []networkingv1beta1.IngressTLS{ {SecretName: scaleTestSecretName}, } } From 33a3e325f754d179b25558dee116fca1c67d353a Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 4 May 2019 16:55:49 -0400 Subject: [PATCH 107/194] API server should offer metav1 Table/Partial transforms Now that internal types are equivalent, allow the apiserver to serve metav1 and metav1beta1 depending on the client. Test that in the apiserver integration test and ensure we get the appropriate responses. Register the metav1 type in the appropriate external locations. --- pkg/kubectl/scheme/install.go | 3 +- pkg/printers/internalversion/printers.go | 12 +- .../test/integration/table_test.go | 4 +- .../k8s.io/apimachinery/pkg/api/meta/BUILD | 1 - .../k8s.io/apimachinery/pkg/api/meta/meta.go | 7 +- .../apimachinery/pkg/apis/meta/v1/register.go | 3 - .../apiserver/pkg/endpoints/apiserver_test.go | 165 +++++-- .../pkg/endpoints/handlers/response.go | 107 +++-- .../apiserver/pkg/endpoints/handlers/rest.go | 2 +- test/integration/apiserver/apiserver_test.go | 401 ++++++++++++++++-- 10 files changed, 594 insertions(+), 111 deletions(-) diff --git a/pkg/kubectl/scheme/install.go b/pkg/kubectl/scheme/install.go index fd25c730e67..76a0578dc19 100644 --- a/pkg/kubectl/scheme/install.go +++ b/pkg/kubectl/scheme/install.go @@ -57,7 +57,8 @@ import ( func init() { // Register external types for Scheme metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(metav1beta1.AddToScheme(Scheme)) + utilruntime.Must(metav1beta1.AddMetaToScheme(Scheme)) + utilruntime.Must(metav1.AddMetaToScheme(Scheme)) utilruntime.Must(scheme.AddToScheme(Scheme)) utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion)) diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index dbb81ca8d6f..cc50fb9547c 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -539,12 +539,12 @@ func translateTimestampUntil(timestamp metav1.Time) string { } var ( - podSuccessConditions = []metav1beta1.TableRowCondition{{Type: metav1beta1.RowCompleted, Status: metav1beta1.ConditionTrue, Reason: string(api.PodSucceeded), Message: "The pod has completed successfully."}} - podFailedConditions = []metav1beta1.TableRowCondition{{Type: metav1beta1.RowCompleted, Status: metav1beta1.ConditionTrue, Reason: string(api.PodFailed), Message: "The pod failed."}} + podSuccessConditions = []metav1.TableRowCondition{{Type: metav1.RowCompleted, Status: metav1.ConditionTrue, Reason: string(api.PodSucceeded), Message: "The pod has completed successfully."}} + podFailedConditions = []metav1.TableRowCondition{{Type: metav1.RowCompleted, Status: metav1.ConditionTrue, Reason: string(api.PodFailed), Message: "The pod failed."}} ) -func printPodList(podList *api.PodList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(podList.Items)) +func printPodList(podList *api.PodList, options printers.PrintOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(podList.Items)) for i := range podList.Items { r, err := printPod(&podList.Items[i], options) if err != nil { @@ -555,7 +555,7 @@ func printPodList(podList *api.PodList, options printers.PrintOptions) ([]metav1 return rows, nil } -func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1.TableRow, error) { restarts := 0 totalContainers := len(pod.Spec.Containers) readyContainers := 0 @@ -565,7 +565,7 @@ func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1beta1.TableR reason = pod.Status.Reason } - row := metav1beta1.TableRow{ + row := metav1.TableRow{ Object: runtime.RawExtension{Object: pod}, } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go index 14645504ac7..f2aa2c6a773 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go @@ -148,8 +148,10 @@ func TestTableGet(t *testing.T) { codecs := serializer.NewCodecFactory(scheme) parameterCodec := runtime.NewParameterCodec(scheme) metav1.AddToGroupVersion(scheme, gv) - scheme.AddKnownTypes(gv, &metav1beta1.Table{}, &metav1beta1.TableOptions{}) + scheme.AddKnownTypes(gv, &metav1beta1.TableOptions{}) + scheme.AddKnownTypes(gv, &metav1.TableOptions{}) scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, &metav1beta1.Table{}, &metav1beta1.TableOptions{}) + scheme.AddKnownTypes(metav1.SchemeGroupVersion, &metav1.Table{}, &metav1.TableOptions{}) crConfig := *config crConfig.GroupVersion = &gv diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD index 4ced3ce5663..a424f4839d5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -42,7 +42,6 @@ go_library( importpath = "k8s.io/apimachinery/pkg/api/meta", deps = [ "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go index b50337e13f4..086bce04b0a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -21,7 +21,6 @@ import ( "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -114,12 +113,12 @@ func Accessor(obj interface{}) (metav1.Object, error) { // AsPartialObjectMetadata takes the metav1 interface and returns a partial object. // TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { +func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { switch t := m.(type) { case *metav1.ObjectMeta: - return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} + return &metav1.PartialObjectMetadata{ObjectMeta: *t} default: - return &metav1beta1.PartialObjectMetadata{ + return &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: m.GetName(), GenerateName: m.GetGenerateName(), diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 24fc134150f..368efe1efd9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -113,7 +113,4 @@ func AddMetaToScheme(scheme *runtime.Scheme) error { return scheme.AddConversionFuncs( Convert_Slice_string_To_v1_IncludeObjectPolicy, ) - - // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index 61583fab34b..594576ba983 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -1749,14 +1749,14 @@ func TestGetPretty(t *testing.T) { pretty bool }{ {accept: runtime.ContentTypeJSON}, - {accept: runtime.ContentTypeJSON + ";pretty=0"}, + {accept: "application/json;pretty=0"}, {accept: runtime.ContentTypeJSON, userAgent: "kubectl"}, {accept: runtime.ContentTypeJSON, params: url.Values{"pretty": {"0"}}}, {pretty: true, accept: runtime.ContentTypeJSON, userAgent: "curl"}, {pretty: true, accept: runtime.ContentTypeJSON, userAgent: "Mozilla/5.0"}, {pretty: true, accept: runtime.ContentTypeJSON, userAgent: "Wget"}, - {pretty: true, accept: runtime.ContentTypeJSON + ";pretty=1"}, + {pretty: true, accept: "application/json;pretty=1"}, {pretty: true, accept: runtime.ContentTypeJSON, params: url.Values{"pretty": {"1"}}}, {pretty: true, accept: runtime.ContentTypeJSON, params: url.Values{"pretty": {"true"}}}, } @@ -1818,14 +1818,28 @@ func TestGetTable(t *testing.T) { if err != nil { t.Fatal(err) } - partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) - encodedBody, err := runtime.Encode(metainternalversion.Codecs.LegacyCodec(metav1beta1.SchemeGroupVersion), partial) - if err != nil { - t.Fatal(err) + var encodedV1Beta1Body []byte + { + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + encodedBody, err := runtime.Encode(metainternalversion.Codecs.LegacyCodec(metav1beta1.SchemeGroupVersion), partial) + if err != nil { + t.Fatal(err) + } + // the codec includes a trailing newline that is not present during decode + encodedV1Beta1Body = bytes.TrimSpace(encodedBody) + } + var encodedV1Body []byte + { + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + encodedBody, err := runtime.Encode(metainternalversion.Codecs.LegacyCodec(metav1.SchemeGroupVersion), partial) + if err != nil { + t.Fatal(err) + } + // the codec includes a trailing newline that is not present during decode + encodedV1Body = bytes.TrimSpace(encodedBody) } - // the codec includes a trailing newline that is not present during decode - encodedBody = bytes.TrimSpace(encodedBody) metaDoc := metav1.ObjectMeta{}.SwaggerDoc() @@ -1838,16 +1852,36 @@ func TestGetTable(t *testing.T) { item bool }{ { - accept: runtime.ContentTypeJSON + ";as=Table;v=v1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1alpha1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, { accept: runtime.ContentTypeProtobuf + ";as=Table;v=v1beta1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, + { + accept: runtime.ContentTypeProtobuf + ";as=Table;v=v1;g=meta.k8s.io", + statusCode: http.StatusNotAcceptable, + }, + { item: true, - accept: runtime.ContentTypeJSON + ";as=Table;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1;g=meta.k8s.io", + expected: &metav1.Table{ + TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1"}, + ListMeta: metav1.ListMeta{ResourceVersion: "10", SelfLink: "/blah"}, + ColumnDefinitions: []metav1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metaDoc["name"]}, + {Name: "Created At", Type: "date", Description: metaDoc["creationTimestamp"]}, + }, + Rows: []metav1.TableRow{ + {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedV1Body}}, + }, + }, + }, + { + item: true, + accept: "application/json;as=Table;v=v1beta1;g=meta.k8s.io", expected: &metav1beta1.Table{ TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1beta1"}, ListMeta: metav1.ListMeta{ResourceVersion: "10", SelfLink: "/blah"}, @@ -1856,7 +1890,7 @@ func TestGetTable(t *testing.T) { {Name: "Created At", Type: "date", Description: metaDoc["creationTimestamp"]}, }, Rows: []metav1beta1.TableRow{ - {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedBody}}, + {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedV1Beta1Body}}, }, }, }, @@ -1864,7 +1898,7 @@ func TestGetTable(t *testing.T) { item: true, accept: strings.Join([]string{ runtime.ContentTypeProtobuf + ";as=Table;v=v1beta1;g=meta.k8s.io", - runtime.ContentTypeJSON + ";as=Table;v=v1beta1;g=meta.k8s.io", + "application/json;as=Table;v=v1beta1;g=meta.k8s.io", }, ","), expected: &metav1beta1.Table{ TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1beta1"}, @@ -1874,13 +1908,13 @@ func TestGetTable(t *testing.T) { {Name: "Created At", Type: "date", Description: metaDoc["creationTimestamp"]}, }, Rows: []metav1beta1.TableRow{ - {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedBody}}, + {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedV1Beta1Body}}, }, }, }, { item: true, - accept: runtime.ContentTypeJSON + ";as=Table;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1beta1;g=meta.k8s.io", params: url.Values{"includeObject": []string{"Metadata"}}, expected: &metav1beta1.Table{ TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1beta1"}, @@ -1890,12 +1924,12 @@ func TestGetTable(t *testing.T) { {Name: "Created At", Type: "date", Description: metaDoc["creationTimestamp"]}, }, Rows: []metav1beta1.TableRow{ - {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedBody}}, + {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedV1Beta1Body}}, }, }, }, { - accept: runtime.ContentTypeJSON + ";as=Table;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1beta1;g=meta.k8s.io", params: url.Values{"includeObject": []string{"Metadata"}}, expected: &metav1beta1.Table{ TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1beta1"}, @@ -1905,7 +1939,7 @@ func TestGetTable(t *testing.T) { {Name: "Created At", Type: "date", Description: metaDoc["creationTimestamp"]}, }, Rows: []metav1beta1.TableRow{ - {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedBody}}, + {Cells: []interface{}{"foo1", now.Time.UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedV1Beta1Body}}, }, }, }, @@ -1996,6 +2030,13 @@ func TestWatchTable(t *testing.T) { // the codec includes a trailing newline that is not present during decode encodedBody = bytes.TrimSpace(encodedBody) + encodedBodyV1, err := runtime.Encode(metainternalversion.Codecs.LegacyCodec(metav1.SchemeGroupVersion), partial) + if err != nil { + t.Fatal(err) + } + // the codec includes a trailing newline that is not present during decode + encodedBodyV1 = bytes.TrimSpace(encodedBodyV1) + metaDoc := metav1.ObjectMeta{}.SwaggerDoc() s := metainternalversion.Codecs.SupportedMediaTypes()[0].Serializer @@ -2011,11 +2052,11 @@ func TestWatchTable(t *testing.T) { item bool }{ { - accept: runtime.ContentTypeJSON + ";as=Table;v=v1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1alpha1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, { - accept: runtime.ContentTypeJSON + ";as=Table;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1beta1;g=meta.k8s.io", send: func(w *watch.FakeWatcher) { w.Add(&obj) }, @@ -2039,7 +2080,7 @@ func TestWatchTable(t *testing.T) { }, }, { - accept: runtime.ContentTypeJSON + ";as=Table;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=Table;v=v1beta1;g=meta.k8s.io", send: func(w *watch.FakeWatcher) { w.Add(&obj) w.Modify(&obj) @@ -2075,6 +2116,43 @@ func TestWatchTable(t *testing.T) { }, }, }, + { + accept: "application/json;as=Table;v=v1;g=meta.k8s.io", + send: func(w *watch.FakeWatcher) { + w.Add(&obj) + w.Modify(&obj) + }, + expected: []*metav1.WatchEvent{ + { + Type: "ADDED", + Object: runtime.RawExtension{ + Raw: []byte(strings.TrimSpace(runtime.EncodeOrDie(s, &metav1.Table{ + TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1"}, + ListMeta: metav1.ListMeta{ResourceVersion: "10", SelfLink: "/blah"}, + ColumnDefinitions: []metav1beta1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metaDoc["name"]}, + {Name: "Created At", Type: "date", Description: metaDoc["creationTimestamp"]}, + }, + Rows: []metav1.TableRow{ + {Cells: []interface{}{"foo1", time.Unix(1, 0).UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedBodyV1}}, + }, + }))), + }, + }, + { + Type: "MODIFIED", + Object: runtime.RawExtension{ + Raw: []byte(strings.TrimSpace(runtime.EncodeOrDie(s, &metav1.Table{ + TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "meta.k8s.io/v1"}, + ListMeta: metav1.ListMeta{ResourceVersion: "10", SelfLink: "/blah"}, + Rows: []metav1.TableRow{ + {Cells: []interface{}{"foo1", time.Unix(1, 0).UTC().Format(time.RFC3339)}, Object: runtime.RawExtension{Raw: encodedBodyV1}}, + }, + }))), + }, + }, + }, + }, } for i, test := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { @@ -2122,6 +2200,7 @@ func TestWatchTable(t *testing.T) { if err != nil { t.Fatal(err) } + defer resp.Body.Close() if test.statusCode != 0 { if resp.StatusCode != test.statusCode { t.Fatalf("%d: unexpected response: %#v", i, resp) @@ -2228,46 +2307,72 @@ func TestGetPartialObjectMetadata(t *testing.T) { statusCode int }{ { - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1;g=meta.k8s.io", + accept: "application/json;as=PartialObjectMetadata;v=v1alpha1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, { - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json", + accept: "application/json;as=PartialObjectMetadata;v=v1alpha1;g=meta.k8s.io, application/json", expectKind: schema.GroupVersionKind{Kind: "Simple", Group: testGroupVersion.Group, Version: testGroupVersion.Version}, }, { - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io, application/json", + accept: "application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io, application/json", expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1beta1"}, }, { list: true, - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, + + // verify preferred version overrides supported version + { + accept: "application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io, application/json;as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json", + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1beta1"}, + }, + { + accept: "application/json;as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io, application/json", + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1"}, + }, + { + accept: "application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io, application/json;as=PartialObjectMetadata;v=v1;g=meta.k8s.io", + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1beta1"}, + }, + { + accept: "application/json;as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io", + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1"}, + }, + { list: true, - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json", + accept: "application/json;as=PartialObjectMetadata;v=v1alpha1;g=meta.k8s.io, application/json", expectKind: schema.GroupVersionKind{Kind: "SimpleList", Group: testGroupVersion.Group, Version: testGroupVersion.Version}, }, { list: true, - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io, application/json", + accept: "application/json;as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io, application/json", expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadataList", Group: "meta.k8s.io", Version: "v1beta1"}, }, { - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, { - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io", expected: &metav1beta1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "ns1", CreationTimestamp: now, UID: types.UID("abcdef0123")}, }, expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1beta1"}, }, + { + accept: "application/json;as=PartialObjectMetadata;v=v1;g=meta.k8s.io", + expected: &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "ns1", CreationTimestamp: now, UID: types.UID("abcdef0123")}, + }, + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1"}, + }, { list: true, - accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io", + accept: "application/json;as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io", expected: &metav1beta1.PartialObjectMetadataList{ ListMeta: metav1.ListMeta{ ResourceVersion: "10", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go index 214dbc1e7d2..0fe8a71c728 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -49,18 +49,18 @@ func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, case target == nil: return obj, nil - case target.Kind == "PartialObjectMetadata" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: - return asV1Beta1PartialObjectMetadata(obj) + case target.Kind == "PartialObjectMetadata": + return asPartialObjectMetadata(obj, target.GroupVersion()) - case target.Kind == "PartialObjectMetadataList" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: - return asV1Beta1PartialObjectMetadataList(obj) + case target.Kind == "PartialObjectMetadataList": + return asPartialObjectMetadataList(obj, target.GroupVersion()) - case target.Kind == "Table" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: + case target.Kind == "Table": options, ok := opts.(*metav1beta1.TableOptions) if !ok { return nil, fmt.Errorf("unexpected TableOptions, got %T", opts) } - return asV1Beta1Table(ctx, obj, options, scope) + return asTable(ctx, obj, options, scope, target.GroupVersion()) default: accepted, _ := negotiation.MediaTypesForSerializer(metainternalversion.Codecs) @@ -74,7 +74,7 @@ func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, func optionsForTransform(mediaType negotiation.MediaTypeOptions, req *http.Request) (interface{}, error) { switch target := mediaType.Convert; { case target == nil: - case target.Kind == "Table" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: + case target.Kind == "Table" && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): opts := &metav1beta1.TableOptions{} if err := metav1beta1.ParameterCodec.DecodeParameters(req.URL.Query(), metav1beta1.SchemeGroupVersion, opts); err != nil { return nil, err @@ -95,9 +95,8 @@ func optionsForTransform(mediaType negotiation.MediaTypeOptions, req *http.Reque func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.MediaTypeOptions, req *http.Request) (schema.GroupVersionKind, runtime.NegotiatedSerializer, bool) { switch target := mediaType.Convert; { case target == nil: - case target.Kind == "PartialObjectMetadata" && target.GroupVersion() == metav1beta1.SchemeGroupVersion, - target.Kind == "PartialObjectMetadataList" && target.GroupVersion() == metav1beta1.SchemeGroupVersion, - target.Kind == "Table" && target.GroupVersion() == metav1beta1.SchemeGroupVersion: + case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") && + (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): return *target, metainternalversion.Codecs, true } return scope.Kind, scope.Serializer, false @@ -142,31 +141,39 @@ func (e errNotAcceptable) Status() metav1.Status { } } -func asV1Beta1Table(ctx context.Context, result runtime.Object, opts *metav1beta1.TableOptions, scope *RequestScope) (runtime.Object, error) { - table, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) +func asTable(ctx context.Context, result runtime.Object, opts *metav1beta1.TableOptions, scope *RequestScope, groupVersion schema.GroupVersion) (runtime.Object, error) { + switch groupVersion { + case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion: + default: + return nil, newNotAcceptableError(fmt.Sprintf("no Table exists in group version %s", groupVersion)) + } + + obj, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) if err != nil { return nil, err } + table := (*metav1.Table)(obj) + for i := range table.Rows { item := &table.Rows[i] switch opts.IncludeObject { - case metav1beta1.IncludeObject: + case metav1.IncludeObject: item.Object.Object, err = scope.Convertor.ConvertToVersion(item.Object.Object, scope.Kind.GroupVersion()) if err != nil { return nil, err } // TODO: rely on defaulting for the value here? - case metav1beta1.IncludeMetadata, "": + case metav1.IncludeMetadata, "": m, err := meta.Accessor(item.Object.Object) if err != nil { return nil, err } // TODO: turn this into an internal type and do conversion in order to get object kind automatically set? partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata")) item.Object.Object = partial - case metav1beta1.IncludeNone: + case metav1.IncludeNone: item.Object.Object = nil default: err = errors.NewBadRequest(fmt.Sprintf("unrecognized includeObject value: %q", opts.IncludeObject)) @@ -177,42 +184,74 @@ func asV1Beta1Table(ctx context.Context, result runtime.Object, opts *metav1beta return table, nil } -func asV1Beta1PartialObjectMetadata(result runtime.Object) (runtime.Object, error) { +func asPartialObjectMetadata(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) { if meta.IsListType(result) { err := newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadata, but the requested object is a list (%T)", result)) return nil, err } + switch groupVersion { + case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion: + default: + return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) + } m, err := meta.Accessor(result) if err != nil { return nil, err } partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) + partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata")) return partial, nil } -func asV1Beta1PartialObjectMetadataList(result runtime.Object) (runtime.Object, error) { - if !meta.IsListType(result) { +func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) { + li, ok := result.(metav1.ListInterface) + if !ok { return nil, newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadataList, but the requested object is not a list (%T)", result)) } - list := &metav1beta1.PartialObjectMetadataList{} - if li, ok := result.(metav1.ListInterface); ok { + + gvk := groupVersion.WithKind("PartialObjectMetadata") + switch { + case groupVersion == metav1beta1.SchemeGroupVersion: + list := &metav1beta1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(gvk) + list.Items = append(list.Items, partial) + return nil + }) + if err != nil { + return nil, err + } list.SelfLink = li.GetSelfLink() list.ResourceVersion = li.GetResourceVersion() list.Continue = li.GetContinue() - } - err := meta.EachListItem(result, func(obj runtime.Object) error { - m, err := meta.Accessor(obj) + return list, nil + + case groupVersion == metav1.SchemeGroupVersion: + list := &metav1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(gvk) + list.Items = append(list.Items, partial) + return nil + }) if err != nil { - return err + return nil, err } - partial := meta.AsPartialObjectMetadata(m) - partial.GetObjectKind().SetGroupVersionKind(metav1beta1.SchemeGroupVersion.WithKind("PartialObjectMetadata")) - list.Items = append(list.Items, partial) - return nil - }) - if err != nil { - return nil, err + list.SelfLink = li.GetSelfLink() + list.ResourceVersion = li.GetResourceVersion() + list.Continue = li.GetContinue() + return list, nil + + default: + return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) } - return list, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 6adbf84955d..4db0c067627 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -78,7 +78,7 @@ func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Reque func (scope *RequestScope) AllowsConversion(gvk schema.GroupVersionKind, mimeType, mimeSubType string) bool { // TODO: this is temporary, replace with an abstraction calculated at endpoint installation time - if gvk.GroupVersion() == metav1beta1.SchemeGroupVersion { + if gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion { switch gvk.Kind { case "Table": return scope.TableConvertor != nil && diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 22ae8ca317c..d62a725e69d 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -548,7 +548,7 @@ func TestAPICRDProtobuf(t *testing.T) { } } -func TestTransformOnWatch(t *testing.T) { +func TestTransform(t *testing.T) { tearDown, config, _, err := fixtures.StartDefaultServer(t) if err != nil { t.Fatal(err) @@ -592,23 +592,23 @@ func TestTransformOnWatch(t *testing.T) { testcases := []struct { name string accept string - includeObject metav1beta1.IncludeObjectPolicy + includeObject metav1.IncludeObjectPolicy object func(*testing.T) (metav1.Object, string, string) wantErr func(*testing.T, error) wantBody func(*testing.T, io.Reader) }{ { - name: "verify columns on cluster scoped resources", + name: "v1beta1 verify columns on cluster scoped resources", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "default", Namespace: ""}, "", "namespaces" }, wantBody: func(t *testing.T, w io.Reader) { - expectTableWatchEvents(t, 1, 3, metav1beta1.IncludeMetadata, json.NewDecoder(w)) + expectTableWatchEvents(t, 1, 3, metav1.IncludeMetadata, json.NewDecoder(w)) }, }, { - name: "verify columns on CRDs in json", + name: "v1beta1 verify columns on CRDs in json", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { cr, err := crclient.Create(&unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) @@ -621,11 +621,11 @@ func TestTransformOnWatch(t *testing.T) { return cr, crdGVR.Group, "foos" }, wantBody: func(t *testing.T, w io.Reader) { - expectTableWatchEvents(t, 2, 2, metav1beta1.IncludeMetadata, json.NewDecoder(w)) + expectTableWatchEvents(t, 2, 2, metav1.IncludeMetadata, json.NewDecoder(w)) }, }, { - name: "verify columns on CRDs in json;stream=watch", + name: "v1beta1 verify columns on CRDs in json;stream=watch", accept: "application/json;stream=watch;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { cr, err := crclient.Create(&unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) @@ -638,11 +638,11 @@ func TestTransformOnWatch(t *testing.T) { return cr, crdGVR.Group, "foos" }, wantBody: func(t *testing.T, w io.Reader) { - expectTableWatchEvents(t, 2, 2, metav1beta1.IncludeMetadata, json.NewDecoder(w)) + expectTableWatchEvents(t, 2, 2, metav1.IncludeMetadata, json.NewDecoder(w)) }, }, { - name: "verify columns on CRDs in yaml", + name: "v1beta1 verify columns on CRDs in yaml", accept: "application/yaml;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { cr, err := crclient.Create(&unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) @@ -665,7 +665,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify columns on services", + name: "v1beta1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { ns := "default" @@ -679,13 +679,13 @@ func TestTransformOnWatch(t *testing.T) { return svc, "", "services" }, wantBody: func(t *testing.T, w io.Reader) { - expectTableWatchEvents(t, 2, 7, metav1beta1.IncludeMetadata, json.NewDecoder(w)) + expectTableWatchEvents(t, 2, 7, metav1.IncludeMetadata, json.NewDecoder(w)) }, }, { - name: "verify columns on services with no object", + name: "v1beta1 verify columns on services with no object", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", - includeObject: metav1beta1.IncludeNone, + includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { ns := "default" obj, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) @@ -698,13 +698,13 @@ func TestTransformOnWatch(t *testing.T) { return obj, "", "services" }, wantBody: func(t *testing.T, w io.Reader) { - expectTableWatchEvents(t, 2, 7, metav1beta1.IncludeNone, json.NewDecoder(w)) + expectTableWatchEvents(t, 2, 7, metav1.IncludeNone, json.NewDecoder(w)) }, }, { - name: "verify columns on services with full object", + name: "v1beta1 verify columns on services with full object", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", - includeObject: metav1beta1.IncludeObject, + includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { ns := "default" obj, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) @@ -717,7 +717,7 @@ func TestTransformOnWatch(t *testing.T) { return obj, "", "services" }, wantBody: func(t *testing.T, w io.Reader) { - objects := expectTableWatchEvents(t, 2, 7, metav1beta1.IncludeObject, json.NewDecoder(w)) + objects := expectTableWatchEvents(t, 2, 7, metav1.IncludeObject, json.NewDecoder(w)) var svc v1.Service if err := json.Unmarshal(objects[1], &svc); err != nil { t.Fatal(err) @@ -728,7 +728,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify partial metadata object on config maps", + name: "v1beta1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { ns := "default" @@ -746,7 +746,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify partial metadata object on config maps in protobuf", + name: "v1beta1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { ns := "default" @@ -764,7 +764,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify error on unsupported mimetype protobuf for table conversion", + name: "v1beta1 verify error on unsupported mimetype protobuf for table conversion", accept: "application/vnd.kubernetes.protobuf;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" @@ -781,7 +781,7 @@ func TestTransformOnWatch(t *testing.T) { }, { name: "verify error on invalid mimetype - bad version", - accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", + accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1alpha1", object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" }, @@ -792,7 +792,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify error on invalid mimetype - bad group", + name: "v1beta1 verify error on invalid mimetype - bad group", accept: "application/json;as=PartialObjectMetadata;g=k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" @@ -804,7 +804,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify error on invalid mimetype - bad kind", + name: "v1beta1 verify error on invalid mimetype - bad kind", accept: "application/json;as=PartialObject;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" @@ -816,7 +816,7 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify error on invalid mimetype - missing kind", + name: "v1beta1 verify error on invalid mimetype - missing kind", accept: "application/json;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" @@ -828,9 +828,241 @@ func TestTransformOnWatch(t *testing.T) { }, }, { - name: "verify error on invalid transform parameter", + name: "v1beta1 verify error on invalid transform parameter", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", - includeObject: metav1beta1.IncludeObjectPolicy("unrecognized"), + includeObject: metav1.IncludeObjectPolicy("unrecognized"), + object: func(t *testing.T) (metav1.Object, string, string) { + return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" + }, + wantErr: func(t *testing.T, err error) { + if !apierrors.IsBadRequest(err) || !strings.Contains(err.Error(), `Invalid value: "unrecognized": must be 'Metadata', 'Object', 'None', or empty`) { + t.Fatal(err) + } + }, + }, + + { + name: "v1 verify columns on cluster scoped resources", + accept: "application/json;as=Table;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + return &metav1.ObjectMeta{Name: "default", Namespace: ""}, "", "namespaces" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectTableV1WatchEvents(t, 1, 3, metav1.IncludeMetadata, json.NewDecoder(w)) + }, + }, + { + name: "v1 verify columns on CRDs in json", + accept: "application/json;as=Table;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + cr, err := crclient.Create(&unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-4"}}}, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("unable to create cr: %v", err) + } + if _, err := crclient.Patch("test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + t.Fatalf("unable to patch cr: %v", err) + } + return cr, crdGVR.Group, "foos" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectTableV1WatchEvents(t, 2, 2, metav1.IncludeMetadata, json.NewDecoder(w)) + }, + }, + { + name: "v1 verify columns on CRDs in json;stream=watch", + accept: "application/json;stream=watch;as=Table;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + cr, err := crclient.Create(&unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-5"}}}, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("unable to create cr: %v", err) + } + if _, err := crclient.Patch("test-5", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + t.Fatalf("unable to patch cr: %v", err) + } + return cr, crdGVR.Group, "foos" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectTableV1WatchEvents(t, 2, 2, metav1.IncludeMetadata, json.NewDecoder(w)) + }, + }, + { + name: "v1 verify columns on CRDs in yaml", + accept: "application/yaml;as=Table;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + cr, err := crclient.Create(&unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-6"}}}, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("unable to create cr: %v", err) + } + if _, err := crclient.Patch("test-6", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + t.Fatalf("unable to patch cr: %v", err) + } + return cr, crdGVR.Group, "foos" + }, + wantErr: func(t *testing.T, err error) { + if !apierrors.IsNotAcceptable(err) { + t.Fatal(err) + } + // TODO: this should be a more specific error + if err.Error() != "only the following media types are accepted: application/json;stream=watch" { + t.Fatal(err) + } + }, + }, + { + name: "v1 verify columns on services", + accept: "application/json;as=Table;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + ns := "default" + svc, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-4"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + if err != nil { + t.Fatalf("unable to create service: %v", err) + } + if _, err := clientset.CoreV1().Services(ns).Patch(svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + t.Fatalf("unable to update service: %v", err) + } + return svc, "", "services" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectTableV1WatchEvents(t, 2, 7, metav1.IncludeMetadata, json.NewDecoder(w)) + }, + }, + { + name: "v1 verify columns on services with no object", + accept: "application/json;as=Table;g=meta.k8s.io;v=v1", + includeObject: metav1.IncludeNone, + object: func(t *testing.T) (metav1.Object, string, string) { + ns := "default" + obj, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + if err != nil { + t.Fatalf("unable to create object: %v", err) + } + if _, err := clientset.CoreV1().Services(ns).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + t.Fatalf("unable to update object: %v", err) + } + return obj, "", "services" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectTableV1WatchEvents(t, 2, 7, metav1.IncludeNone, json.NewDecoder(w)) + }, + }, + { + name: "v1 verify columns on services with full object", + accept: "application/json;as=Table;g=meta.k8s.io;v=v1", + includeObject: metav1.IncludeObject, + object: func(t *testing.T) (metav1.Object, string, string) { + ns := "default" + obj, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + if err != nil { + t.Fatalf("unable to create object: %v", err) + } + if _, err := clientset.CoreV1().Services(ns).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + t.Fatalf("unable to update object: %v", err) + } + return obj, "", "services" + }, + wantBody: func(t *testing.T, w io.Reader) { + objects := expectTableV1WatchEvents(t, 2, 7, metav1.IncludeObject, json.NewDecoder(w)) + var svc v1.Service + if err := json.Unmarshal(objects[1], &svc); err != nil { + t.Fatal(err) + } + if svc.Annotations["test"] != "1" || svc.Spec.Ports[0].Port != 1000 { + t.Fatalf("unexpected object: %#v", svc) + } + }, + }, + { + name: "v1 verify partial metadata object on config maps", + accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + ns := "default" + obj, err := clientset.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}) + if err != nil { + t.Fatalf("unable to create object: %v", err) + } + if _, err := clientset.CoreV1().ConfigMaps(ns).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + t.Fatalf("unable to update object: %v", err) + } + return obj, "", "configmaps" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectPartialObjectMetaV1Events(t, json.NewDecoder(w), "0", "1") + }, + }, + { + name: "v1 verify partial metadata object on config maps in protobuf", + accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + ns := "default" + obj, err := clientset.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}) + if err != nil { + t.Fatalf("unable to create object: %v", err) + } + if _, err := clientset.CoreV1().ConfigMaps(ns).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + t.Fatalf("unable to update object: %v", err) + } + return obj, "", "configmaps" + }, + wantBody: func(t *testing.T, w io.Reader) { + expectPartialObjectMetaV1EventsProtobuf(t, w, "0", "1") + }, + }, + { + name: "v1 verify error on unsupported mimetype protobuf for table conversion", + accept: "application/vnd.kubernetes.protobuf;as=Table;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" + }, + wantErr: func(t *testing.T, err error) { + if !apierrors.IsNotAcceptable(err) { + t.Fatal(err) + } + // TODO: this should be a more specific error + if err.Error() != "only the following media types are accepted: application/json, application/yaml, application/vnd.kubernetes.protobuf" { + t.Fatal(err) + } + }, + }, + { + name: "v1 verify error on invalid mimetype - bad group", + accept: "application/json;as=PartialObjectMetadata;g=k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" + }, + wantErr: func(t *testing.T, err error) { + if !apierrors.IsNotAcceptable(err) { + t.Fatal(err) + } + }, + }, + { + name: "v1 verify error on invalid mimetype - bad kind", + accept: "application/json;as=PartialObject;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" + }, + wantErr: func(t *testing.T, err error) { + if !apierrors.IsNotAcceptable(err) { + t.Fatal(err) + } + }, + }, + { + name: "v1 verify error on invalid mimetype - missing kind", + accept: "application/json;g=meta.k8s.io;v=v1", + object: func(t *testing.T) (metav1.Object, string, string) { + return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" + }, + wantErr: func(t *testing.T, err error) { + if !apierrors.IsNotAcceptable(err) { + t.Fatal(err) + } + }, + }, + { + name: "v1 verify error on invalid transform parameter", + accept: "application/json;as=Table;g=meta.k8s.io;v=v1", + includeObject: metav1.IncludeObjectPolicy("unrecognized"), object: func(t *testing.T) (metav1.Object, string, string) { return &metav1.ObjectMeta{Name: "kubernetes", Namespace: "default"}, "", "services" }, @@ -892,7 +1124,7 @@ func TestTransformOnWatch(t *testing.T) { } } -func expectTableWatchEvents(t *testing.T, count, columns int, policy metav1beta1.IncludeObjectPolicy, d *json.Decoder) [][]byte { +func expectTableWatchEvents(t *testing.T, count, columns int, policy metav1.IncludeObjectPolicy, d *json.Decoder) [][]byte { t.Helper() var objects [][]byte @@ -923,7 +1155,7 @@ func expectTableWatchEvents(t *testing.T, count, columns int, policy metav1beta1 t.Fatalf("Invalid row width: %#v", row.Cells) } switch policy { - case metav1beta1.IncludeMetadata: + case metav1.IncludeMetadata: var meta metav1beta1.PartialObjectMetadata if err := json.Unmarshal(row.Object.Raw, &meta); err != nil { t.Fatalf("expected partial object: %v", err) @@ -932,11 +1164,11 @@ func expectTableWatchEvents(t *testing.T, count, columns int, policy metav1beta1 if meta.TypeMeta != partialObj { t.Fatalf("expected partial object: %#v", meta) } - case metav1beta1.IncludeNone: + case metav1.IncludeNone: if len(row.Object.Raw) != 0 { t.Fatalf("Expected no object: %s", string(row.Object.Raw)) } - case metav1beta1.IncludeObject: + case metav1.IncludeObject: if len(row.Object.Raw) == 0 { t.Fatalf("Expected object: %s", string(row.Object.Raw)) } @@ -1000,3 +1232,112 @@ func expectPartialObjectMetaEventsProtobuf(t *testing.T, r io.Reader, values ... } } } + +func expectTableV1WatchEvents(t *testing.T, count, columns int, policy metav1.IncludeObjectPolicy, d *json.Decoder) [][]byte { + t.Helper() + + var objects [][]byte + + for i := 0; i < count; i++ { + var evt metav1.WatchEvent + if err := d.Decode(&evt); err != nil { + t.Fatal(err) + } + var table metav1.Table + if err := json.Unmarshal(evt.Object.Raw, &table); err != nil { + t.Fatal(err) + } + if i == 0 { + if len(table.ColumnDefinitions) != columns { + t.Fatalf("Got unexpected columns on first watch event: %d vs %#v", columns, table.ColumnDefinitions) + } + } else { + if len(table.ColumnDefinitions) != 0 { + t.Fatalf("Expected no columns on second watch event: %#v", table.ColumnDefinitions) + } + } + if len(table.Rows) != 1 { + t.Fatalf("Invalid rows: %#v", table.Rows) + } + row := table.Rows[0] + if len(row.Cells) != columns { + t.Fatalf("Invalid row width: %#v", row.Cells) + } + switch policy { + case metav1.IncludeMetadata: + var meta metav1.PartialObjectMetadata + if err := json.Unmarshal(row.Object.Raw, &meta); err != nil { + t.Fatalf("expected partial object: %v", err) + } + partialObj := metav1.TypeMeta{Kind: "PartialObjectMetadata", APIVersion: "meta.k8s.io/v1"} + if meta.TypeMeta != partialObj { + t.Fatalf("expected partial object: %#v", meta) + } + case metav1.IncludeNone: + if len(row.Object.Raw) != 0 { + t.Fatalf("Expected no object: %s", string(row.Object.Raw)) + } + case metav1.IncludeObject: + if len(row.Object.Raw) == 0 { + t.Fatalf("Expected object: %s", string(row.Object.Raw)) + } + objects = append(objects, row.Object.Raw) + } + } + return objects +} + +func expectPartialObjectMetaV1Events(t *testing.T, d *json.Decoder, values ...string) { + t.Helper() + + for i, value := range values { + var evt metav1.WatchEvent + if err := d.Decode(&evt); err != nil { + t.Fatal(err) + } + var meta metav1.PartialObjectMetadata + if err := json.Unmarshal(evt.Object.Raw, &meta); err != nil { + t.Fatal(err) + } + typeMeta := metav1.TypeMeta{Kind: "PartialObjectMetadata", APIVersion: "meta.k8s.io/v1"} + if meta.TypeMeta != typeMeta { + t.Fatalf("expected partial object: %#v", meta) + } + if meta.Annotations["test"] != value { + t.Fatalf("expected event %d to have value %q instead of %q", i+1, value, meta.Annotations["test"]) + } + } +} + +func expectPartialObjectMetaV1EventsProtobuf(t *testing.T, r io.Reader, values ...string) { + scheme := runtime.NewScheme() + metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + rs := protobuf.NewRawSerializer(scheme, scheme) + d := streaming.NewDecoder( + protobuf.LengthDelimitedFramer.NewFrameReader(ioutil.NopCloser(r)), + rs, + ) + ds := metainternalversion.Codecs.UniversalDeserializer() + + for i, value := range values { + var evt metav1.WatchEvent + if _, _, err := d.Decode(nil, &evt); err != nil { + t.Fatal(err) + } + obj, gvk, err := ds.Decode(evt.Object.Raw, nil, nil) + if err != nil { + t.Fatal(err) + } + meta, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + t.Fatalf("unexpected watch object %T", obj) + } + expected := &schema.GroupVersionKind{Kind: "PartialObjectMetadata", Version: "v1", Group: "meta.k8s.io"} + if !reflect.DeepEqual(expected, gvk) { + t.Fatalf("expected partial object: %#v", meta) + } + if meta.Annotations["test"] != value { + t.Fatalf("expected event %d to have value %q instead of %q", i+1, value, meta.Annotations["test"]) + } + } +} From d9d5968947b09d33074b4158c64435c28f48ba8e Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 10 May 2019 14:56:50 -0400 Subject: [PATCH 108/194] Prune matching replace directives in staging repos more effectively --- hack/update-vendor.sh | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/hack/update-vendor.sh b/hack/update-vendor.sh index 6f4ee965d0c..f351f6c9eb9 100755 --- a/hack/update-vendor.sh +++ b/hack/update-vendor.sh @@ -66,7 +66,9 @@ function ensure_require_replace_directives_for_all_dependencies() { go mod edit -json | jq -r ".Require // [] | sort | .[] | select(${require_filter})" > "${require_json}" go mod edit -json | jq -r ".Replace // [] | sort | .[] | select(${replace_filter})" > "${replace_json}" - # 1. Ensure require directives have a corresponding replace directive pinning a version + # 1a. Ensure replace directives have an explicit require directive + cat "${replace_json}" | jq -r '"-require \(.Old.Path)@\(.New.Version)"' | xargs -L 100 go mod edit -fmt + # 1b. Ensure require directives have a corresponding replace directive pinning a version cat "${require_json}" | jq -r '"-replace \(.Path)=\(.Path)@\(.Version)"' | xargs -L 100 go mod edit -fmt cat "${replace_json}" | jq -r '"-replace \(.Old.Path)=\(.New.Path)@\(.New.Version)"'| xargs -L 100 go mod edit -fmt @@ -238,7 +240,20 @@ kube::log::status "go.mod: tidying" for repo in $(tsort "${TMP_DIR}/tidy_deps.txt"); do pushd "${KUBE_ROOT}/staging/src/${repo}" >/dev/null 2>&1 echo "=== tidying ${repo}" >> "${LOG_FILE}" - go mod tidy >>"${LOG_FILE}" 2>&1 + + # prune replace directives that pin to the naturally selected version. + # do this before tidying, since tidy removes unused modules that + # don't provide any relevant packages, which forgets which version of the + # unused transitive dependency we had a require directive for, + # and prevents pruning the matching replace directive after tidying. + go list -m -json all | + jq -r 'select(.Replace != null) | + select(.Path == .Replace.Path) | + select(.Version == .Replace.Version) | + "-dropreplace \(.Replace.Path)"' | + xargs -L 100 go mod edit -fmt + + go mod tidy -v >>"${LOG_FILE}" 2>&1 # disallow transitive dependencies on k8s.io/kubernetes loopback_deps="$(go list all 2>/dev/null | grep k8s.io/kubernetes/ || true)" From 2e34b22d28e5be2ca94f7355f70cbc549296cf04 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 10 May 2019 15:40:43 -0400 Subject: [PATCH 109/194] review staging go.mod files --- .gitattributes | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index b7a3a3981e1..022e4cde51e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -7,5 +7,4 @@ test/test_owners.csv merge=union **/generated.proto **/types_swagger_doc_generated.go linguist-generated=true api/openapi-spec/*.json linguist-generated=true -staging/**/go.mod linguist-generated=true staging/**/go.sum linguist-generated=true From eb82dddfdd504c2956ec438b739e01230067e90f Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 10 May 2019 15:41:34 -0400 Subject: [PATCH 110/194] generated files --- staging/src/k8s.io/api/go.sum | 2 ++ .../src/k8s.io/apiextensions-apiserver/go.sum | 21 +++++++++++++++++++ staging/src/k8s.io/apimachinery/go.sum | 2 ++ staging/src/k8s.io/apiserver/go.sum | 11 ++++++++++ staging/src/k8s.io/cli-runtime/go.sum | 9 ++++++++ staging/src/k8s.io/client-go/go.sum | 5 +++++ staging/src/k8s.io/cloud-provider/go.sum | 5 +++++ staging/src/k8s.io/cluster-bootstrap/go.sum | 2 ++ staging/src/k8s.io/code-generator/go.mod | 1 - staging/src/k8s.io/code-generator/go.sum | 4 +++- staging/src/k8s.io/component-base/go.sum | 3 +++ staging/src/k8s.io/csi-translation-lib/go.sum | 5 +++++ staging/src/k8s.io/kube-aggregator/go.sum | 13 ++++++++++++ .../src/k8s.io/kube-controller-manager/go.sum | 3 +++ staging/src/k8s.io/kube-proxy/go.sum | 3 +++ staging/src/k8s.io/kube-scheduler/go.sum | 3 +++ staging/src/k8s.io/kubelet/go.sum | 2 ++ .../src/k8s.io/legacy-cloud-providers/go.sum | 6 ++++++ staging/src/k8s.io/metrics/go.sum | 7 +++++++ staging/src/k8s.io/node-api/go.sum | 7 +++++++ staging/src/k8s.io/sample-apiserver/go.sum | 13 ++++++++++++ staging/src/k8s.io/sample-cli-plugin/go.sum | 9 ++++++++ staging/src/k8s.io/sample-controller/go.sum | 7 +++++++ 23 files changed, 141 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index 867b39b2fd6..97da973bff2 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -30,11 +30,13 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 6986c805256..185ce57cdd6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -42,26 +42,37 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2 h1:eYp14J1o8TTSCzndHBtsNuckikV1PfZOSnx4BcBeu0c= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.2 h1:azEQ8Fnx0jmtFF2fxsnmd6I0x6rsweUF63qqSO1NmKk= github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2 h1:tEXYu6Xc0pevpzzQx5ghrMN9F7IVpN/+u4iD3rkYE5o= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.17.2 h1:/ZK67ikFhQAMFFH/aPu2MaGH7QjP4wHBvHYOVIzDAw0= github.com/go-openapi/runtime v0.17.2/go.mod h1:QO936ZXeisByFmZEO1IS1Dqhtf4QV1sYYFtIq6Ld86Q= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/strfmt v0.17.0 h1:1isAxYf//QDTnVzbLAMrUK++0k1EjeLJU/gTOR0o3Mc= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0 h1:PVXYcP1GkTl+XIAJnyJxOmK6CSG5Q1UcvoCvNO++5Kg= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= @@ -162,12 +173,19 @@ go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df h1:shvkWr0NAZkg4nPuE3XrK go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 h1:Z2sc4+v0JHV6Mn4kX1f2a5nruNjmV+Th32sugE8zwz8= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -176,6 +194,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -184,8 +203,10 @@ golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFh golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6 h1:72GtwBPfq6av9X0Ru2HtAopsPW+d+vh1K1zaxanTdE8= diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index a090719bf51..fd89f50abad 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -42,12 +42,14 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 11df5623494..b919b2e90ba 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -41,12 +41,15 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= @@ -142,8 +145,14 @@ go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df h1:shvkWr0NAZkg4nPuE3XrK go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 h1:Z2sc4+v0JHV6Mn4kX1f2a5nruNjmV+Th32sugE8zwz8= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -152,11 +161,13 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6 h1:72GtwBPfq6av9X0Ru2HtAopsPW+d+vh1K1zaxanTdE8= diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index 6f365e002f2..593d5ff209e 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -17,12 +17,15 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= @@ -74,6 +77,10 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -82,11 +89,13 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index c1477ffc674..1de81bd91bd 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -58,6 +58,9 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -66,11 +69,13 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index 9112c7aee36..fa7acb50662 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -47,6 +47,9 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -55,11 +58,13 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 75ea41f0387..9d012777040 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -29,10 +29,12 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index 6d5e45a7518..37b7b16e75c 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -16,7 +16,6 @@ require ( replace ( golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 - golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/code-generator => ../code-generator ) diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum index c2d8a90dfe9..f40a1f4059f 100644 --- a/staging/src/k8s.io/code-generator/go.sum +++ b/staging/src/k8s.io/code-generator/go.sum @@ -4,16 +4,18 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5 github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFhxXGKWHMIRUI/T5x1GP90= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index da5396b53f5..37e1e1b8af9 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -44,11 +44,14 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index 30423642d1f..9999e119937 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -38,15 +38,20 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 3268152d6bc..f00f52b84d6 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -41,12 +41,15 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= @@ -146,12 +149,19 @@ go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df h1:shvkWr0NAZkg4nPuE3XrK go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 h1:Z2sc4+v0JHV6Mn4kX1f2a5nruNjmV+Th32sugE8zwz8= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -160,6 +170,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -168,8 +179,10 @@ golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFh golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6 h1:72GtwBPfq6av9X0Ru2HtAopsPW+d+vh1K1zaxanTdE8= diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 86939f2a039..8f1ee98efb4 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -36,10 +36,13 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 86939f2a039..8f1ee98efb4 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -36,10 +36,13 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 86939f2a039..8f1ee98efb4 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -36,10 +36,13 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 75ea41f0387..9d012777040 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -29,10 +29,12 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum index 264a67b9e19..3b380bffa31 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.sum +++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum @@ -84,6 +84,10 @@ github.com/vmware/govmomi v0.20.0 h1:+1IyhvoVb5JET2Wvgw9J3ZDv6CK4sxzUunpH8LhQqm4 github.com/vmware/govmomi v0.20.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -92,6 +96,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -99,6 +104,7 @@ golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/api v0.0.0-20181220000619-583d854617af h1:iQMS7JKv/0w/iiWf1M49Cg3dmOkBoBZT5KheqPDpaac= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index 456fb133e33..6883f1e3729 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -48,10 +48,14 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -60,6 +64,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -68,8 +73,10 @@ golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFh golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/node-api/go.sum b/staging/src/k8s.io/node-api/go.sum index d77e00deda4..0ad3b265c51 100644 --- a/staging/src/k8s.io/node-api/go.sum +++ b/staging/src/k8s.io/node-api/go.sum @@ -50,10 +50,14 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -62,6 +66,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -70,8 +75,10 @@ golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFh golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index f044dad8976..3d8f3f9a6e8 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -39,12 +39,15 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= @@ -143,12 +146,19 @@ go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df h1:shvkWr0NAZkg4nPuE3XrK go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 h1:Z2sc4+v0JHV6Mn4kX1f2a5nruNjmV+Th32sugE8zwz8= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -157,6 +167,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -165,8 +176,10 @@ golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFh golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6 h1:72GtwBPfq6av9X0Ru2HtAopsPW+d+vh1K1zaxanTdE8= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index 6f365e002f2..593d5ff209e 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -17,12 +17,15 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= @@ -74,6 +77,10 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -82,11 +89,13 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index c461abf499e..75b80670838 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -51,10 +51,14 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -63,6 +67,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= @@ -71,8 +76,10 @@ golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFh golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From d35851724eb680c23cf624d6def291b3b82fb088 Mon Sep 17 00:00:00 2001 From: Akihito INOH Date: Sat, 11 May 2019 05:04:05 +0900 Subject: [PATCH 111/194] Use framework.ExpectNoError() for e2e/lifecycle/bootstrap The e2e test framework has ExpectNoError() for readable test code. This replaces Expect(err).NotTo(HaveOccurred()) with it for e2e/lifecycle/bootstrap. --- .../lifecycle/bootstrap/bootstrap_signer.go | 32 +++++++++---------- .../bootstrap/bootstrap_token_cleaner.go | 19 ++++++----- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index e3ef32e7fec..0734a18817d 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -41,7 +41,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { if len(secretNeedClean) > 0 { By("delete the bootstrap token secret") err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) secretNeedClean = "" } }) @@ -52,22 +52,22 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { It("should sign the new added bootstrap tokens", func() { By("create a new bootstrap token secret") tokenId, err := GenerateTokenId() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) secret := newTokenSecret(tokenId, "tokenSecret") _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("wait for the bootstrap token secret be signed") err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func() { By("create a new bootstrap token secret") tokenId, err := GenerateTokenId() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) secret := newTokenSecret(tokenId, "tokenSecret") secret, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId @@ -76,49 +76,49 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId) cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId] Expect(ok).Should(Equal(true)) By("update the cluster-info ConfigMap") originalData := cfgMap.Data[bootstrapapi.KubeConfigKey] updatedKubeConfig, err := randBytes(20) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = updatedKubeConfig _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) defer func() { By("update back the cluster-info ConfigMap") cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }() By("wait for signed bootstrap token updated") err = WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c, tokenId, signedToken) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func() { By("create a new bootstrap token secret") tokenId, err := GenerateTokenId() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) secret := newTokenSecret(tokenId, "tokenSecret") _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("wait for the bootstrap secret be signed") err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("delete the bootstrap token secret") err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(bootstrapapi.BootstrapTokenSecretPrefix+tokenId, &metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("wait for the bootstrap token removed from cluster-info ConfigMap") err = WaitForSignedClusterInfoByBootstrapTokenToDisappear(c, tokenId) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) }) diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index d5adba2ca1b..4dadf928b83 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -20,7 +20,6 @@ import ( "time" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -45,41 +44,41 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { By("delete the bootstrap token secret") err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{}) secretNeedClean = "" - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) } }) It("should delete the token secret when the secret expired", func() { By("create a new expired bootstrap token secret") tokenId, err := GenerateTokenId() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) tokenSecret, err := GenerateTokenSecret() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) secret := newTokenSecret(tokenId, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(-time.Hour)) _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("wait for the bootstrap token secret be deleted") err = WaitForBootstrapTokenSecretToDisappear(c, tokenId) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) It("should not delete the token secret when the secret is not expired", func() { By("create a new expired bootstrap token secret") tokenId, err := GenerateTokenId() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) tokenSecret, err := GenerateTokenSecret() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) secret := newTokenSecret(tokenId, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(time.Hour)) _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("wait for the bootstrap token secret not be deleted") err = WaitForBootstrapTokenSecretNotDisappear(c, tokenId, 20*time.Second) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) }) From 4751f403198cc3c5763c2d2fb9f1302e5a3c3115 Mon Sep 17 00:00:00 2001 From: Haiyan Meng Date: Fri, 10 May 2019 13:46:35 -0700 Subject: [PATCH 112/194] Remove the `host` label from the kubelet http traffic metrics Signed-off-by: Haiyan Meng --- pkg/kubelet/server/metrics/metrics.go | 6 +++--- pkg/kubelet/server/server.go | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/kubelet/server/metrics/metrics.go b/pkg/kubelet/server/metrics/metrics.go index 48ad2d145d0..1694ac9bc5c 100644 --- a/pkg/kubelet/server/metrics/metrics.go +++ b/pkg/kubelet/server/metrics/metrics.go @@ -38,7 +38,7 @@ var ( // server_type aims to differentiate the readonly server and the readwrite server. // long_running marks whether the request is long-running or not. // Currently, long-running requests include exec/attach/portforward/debug. - []string{"method", "path", "host", "server_type", "long_running"}, + []string{"method", "path", "server_type", "long_running"}, ) // HTTPRequestsDuration tracks the duration in seconds to serve http requests. HTTPRequestsDuration = prometheus.NewHistogramVec( @@ -49,7 +49,7 @@ var ( // Use DefBuckets for now, will customize the buckets if necessary. Buckets: prometheus.DefBuckets, }, - []string{"method", "path", "host", "server_type", "long_running"}, + []string{"method", "path", "server_type", "long_running"}, ) // HTTPInflightRequests tracks the number of the inflight http requests. HTTPInflightRequests = prometheus.NewGaugeVec( @@ -58,7 +58,7 @@ var ( Name: "http_inflight_requests", Help: "Number of the inflight http requests", }, - []string{"method", "path", "host", "server_type", "long_running"}, + []string{"method", "path", "server_type", "long_running"}, ) ) diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index e4e9f94208e..f4206d0cb85 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -859,17 +859,17 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { serverType = "readwrite" } - method, path, host := req.Method, trimURLPath(req.URL.Path), req.URL.Host + method, path := req.Method, trimURLPath(req.URL.Path) longRunning := strconv.FormatBool(isLongRunningRequest(path)) - servermetrics.HTTPRequests.WithLabelValues(method, path, host, serverType, longRunning).Inc() + servermetrics.HTTPRequests.WithLabelValues(method, path, serverType, longRunning).Inc() - servermetrics.HTTPInflightRequests.WithLabelValues(method, path, host, serverType, longRunning).Inc() - defer servermetrics.HTTPInflightRequests.WithLabelValues(method, path, host, serverType, longRunning).Dec() + servermetrics.HTTPInflightRequests.WithLabelValues(method, path, serverType, longRunning).Inc() + defer servermetrics.HTTPInflightRequests.WithLabelValues(method, path, serverType, longRunning).Dec() startTime := time.Now() - defer servermetrics.HTTPRequestsDuration.WithLabelValues(method, path, host, serverType, longRunning).Observe(servermetrics.SinceInSeconds(startTime)) + defer servermetrics.HTTPRequestsDuration.WithLabelValues(method, path, serverType, longRunning).Observe(servermetrics.SinceInSeconds(startTime)) s.restfulCont.ServeHTTP(w, req) } From e017436bef8be3cf12907d5a2386d31b24151b3d Mon Sep 17 00:00:00 2001 From: Antoine Pelisse Date: Fri, 10 May 2019 14:58:22 -0700 Subject: [PATCH 113/194] Fix missing pdb in test --- pkg/registry/core/pod/storage/eviction_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/registry/core/pod/storage/eviction_test.go b/pkg/registry/core/pod/storage/eviction_test.go index ced919f9184..83590a22f95 100644 --- a/pkg/registry/core/pod/storage/eviction_test.go +++ b/pkg/registry/core/pod/storage/eviction_test.go @@ -251,7 +251,7 @@ func TestEvictionDryRun(t *testing.T) { t.Error(err) } - client := fake.NewSimpleClientset() + client := fake.NewSimpleClientset(tc.pdbs...) evictionRest := newEvictionStorage(storage.Store, client.PolicyV1beta1()) eviction := &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, DeleteOptions: tc.evictionOptions} _, err := evictionRest.Create(testContext, eviction, nil, tc.requestOptions) From 65fcbf4afb94dabc8c67cd1447108bae6153e4ac Mon Sep 17 00:00:00 2001 From: Patrick Christopher Date: Fri, 10 May 2019 16:55:01 -0700 Subject: [PATCH 114/194] upgrade elasticsearch for vuln handling --- cluster/addons/fluentd-elasticsearch/es-image/Dockerfile | 2 +- cluster/addons/fluentd-elasticsearch/es-image/Makefile | 2 +- cluster/addons/fluentd-elasticsearch/es-statefulset.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile b/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile index 79a1474e960..7bb7612dbfe 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile @@ -17,7 +17,7 @@ COPY elasticsearch_logging_discovery.go go.mod go.sum / RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -ldflags "-w" -o /elasticsearch_logging_discovery /elasticsearch_logging_discovery.go -FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.1 +FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.7.2 VOLUME ["/data"] EXPOSE 9200 9300 diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index b3e154b16a4..8f5dbab5b4e 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -16,7 +16,7 @@ PREFIX = gcr.io/fluentd-elasticsearch IMAGE = elasticsearch -TAG = v6.6.1 +TAG = v6.7.2 build: gcloud builds submit --tag ${PREFIX}/${IMAGE}:${TAG} diff --git a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml index af48182363e..42dc598edf4 100644 --- a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml @@ -59,12 +59,12 @@ spec: selector: matchLabels: k8s-app: elasticsearch-logging - version: v6.6.1 + version: v6.7.2 template: metadata: labels: k8s-app: elasticsearch-logging - version: v6.6.1 + version: v6.7.2 spec: serviceAccountName: elasticsearch-logging containers: From 515f8342d342ce5d017df26838d6dd36fb162361 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Thu, 9 May 2019 23:17:57 +0800 Subject: [PATCH 115/194] fix golint failures of test/e2e/network --- hack/.golint_failures | 1 - test/e2e/network/dns.go | 180 ++++---- test/e2e/network/dns_common.go | 70 +-- test/e2e/network/dns_configmap.go | 58 +-- test/e2e/network/dns_scale_records.go | 8 +- test/e2e/network/example_cluster_dns.go | 24 +- test/e2e/network/firewall.go | 76 +-- test/e2e/network/framework.go | 1 + test/e2e/network/ingress.go | 343 +++++++------- test/e2e/network/ingress_scale.go | 14 +- test/e2e/network/kube_proxy.go | 28 +- test/e2e/network/network_policy.go | 112 ++--- test/e2e/network/network_tiers.go | 52 +-- test/e2e/network/networking.go | 98 ++-- test/e2e/network/networking_perf.go | 8 +- test/e2e/network/no_snat.go | 20 +- test/e2e/network/proxy.go | 28 +- test/e2e/network/service.go | 586 ++++++++++++------------ test/e2e/network/service_latency.go | 4 +- test/e2e/network/util_iperf.go | 6 +- 20 files changed, 858 insertions(+), 859 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index c53673bb7b5..a9a1453759c 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -608,7 +608,6 @@ test/e2e/chaosmonkey test/e2e/common test/e2e/framework test/e2e/lifecycle/bootstrap -test/e2e/network test/e2e/node test/e2e/scalability test/e2e/scheduling diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index f14e6ad3c25..acd94976b63 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const dnsTestPodHostName = "dns-querier-1" @@ -60,16 +60,16 @@ var _ = SIGDescribe("DNS", func() { } wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - It("should resolve DNS of partial qualified names for the cluster ", func() { + ginkgo.It("should resolve DNS of partial qualified names for the cluster ", func() { // All the names we need to be able to resolve. // TODO: Spin up a separate test service and test that dns works for that service. namesToResolve := []string{ @@ -89,11 +89,11 @@ var _ = SIGDescribe("DNS", func() { hostEntries := []string{hostFQDN, dnsTestPodHostName} wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) @@ -108,11 +108,11 @@ var _ = SIGDescribe("DNS", func() { hostEntries := []string{hostFQDN, dnsTestPodHostName} wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes /etc/hosts and exposes the results by HTTP. - By("creating a pod to probe /etc/hosts") + ginkgo.By("creating a pod to probe /etc/hosts") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) @@ -125,27 +125,27 @@ var _ = SIGDescribe("DNS", func() { framework.ConformanceIt("should provide DNS for services ", func() { // NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test": "true", } headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() regularServiceName := "test-service-2" regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) - Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName) defer func() { - By("deleting the test service") - defer GinkgoRecover() + ginkgo.By("deleting the test service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) }() @@ -160,39 +160,39 @@ var _ = SIGDescribe("DNS", func() { wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod.ObjectMeta.Labels = testServiceSelector validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - It("should resolve DNS of partial qualified names for services ", func() { + ginkgo.It("should resolve DNS of partial qualified names for services ", func() { // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test": "true", } headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() regularServiceName := "test-service-2" regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) - Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName) defer func() { - By("deleting the test service") - defer GinkgoRecover() + ginkgo.By("deleting the test service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) }() @@ -209,20 +209,20 @@ var _ = SIGDescribe("DNS", func() { wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod.ObjectMeta.Labels = testServiceSelector validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - It("should provide DNS for pods for Hostname [LinuxOnly]", func() { + ginkgo.It("should provide DNS for pods for Hostname [LinuxOnly]", func() { // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test-hostname-attribute": "true", } @@ -230,11 +230,11 @@ var _ = SIGDescribe("DNS", func() { podHostname := "dns-querier-2" headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() @@ -242,11 +242,11 @@ var _ = SIGDescribe("DNS", func() { hostNames := []string{hostFQDN, podHostname} wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1.ObjectMeta.Labels = testServiceSelector pod1.Spec.Hostname = podHostname @@ -255,9 +255,9 @@ var _ = SIGDescribe("DNS", func() { validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...)) }) - It("should provide DNS for pods for Subdomain", func() { + ginkgo.It("should provide DNS for pods for Subdomain", func() { // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test-hostname-attribute": "true", } @@ -265,11 +265,11 @@ var _ = SIGDescribe("DNS", func() { podHostname := "dns-querier-2" headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() @@ -277,11 +277,11 @@ var _ = SIGDescribe("DNS", func() { namesToResolve := []string{hostFQDN} wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1.ObjectMeta.Labels = testServiceSelector pod1.Spec.Hostname = podHostname @@ -298,72 +298,72 @@ var _ = SIGDescribe("DNS", func() { */ framework.ConformanceIt("should provide DNS for ExternalName services", func() { // Create a test ExternalName service. - By("Creating a test externalName service") + ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService) - Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ExternalName service: %s", serviceName) defer func() { - By("deleting the test externalName service") - defer GinkgoRecover() + ginkgo.By("deleting the test externalName service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") // Test changing the externalName field - By("changing the externalName to bar.example.com") + ginkgo.By("changing the externalName to bar.example.com") _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.ExternalName = "bar.example.com" }) - Expect(err).NotTo(HaveOccurred(), "failed to change externalName of service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change externalName of service: %s", serviceName) wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a second pod to probe DNS") + ginkgo.By("creating a second pod to probe DNS") pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.") // Test changing type from ExternalName to ClusterIP - By("changing the service to type=ClusterIP") + ginkgo.By("changing the service to type=ClusterIP") _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, } }) - Expect(err).NotTo(HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName) wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy") jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie") - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a third pod to probe DNS") + ginkgo.By("creating a third pod to probe DNS") pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get service: %s", externalNameService.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get service: %s", externalNameService.Name) validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) }) - It("should support configurable pod DNS nameservers", func() { - By("Creating a pod with dnsPolicy=None and customized dnsConfig...") + ginkgo.It("should support configurable pod DNS nameservers", func() { + ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...") testServerIP := "1.1.1.1" testSearchPath := "resolv.conf.local" testAgnhostPod := f.NewAgnhostPod(f.Namespace.Name, "pause") @@ -373,15 +373,15 @@ var _ = SIGDescribe("DNS", func() { Searches: []string{testSearchPath}, } testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name) framework.Logf("Created pod %v", testAgnhostPod) defer func() { framework.Logf("Deleting pod %s...", testAgnhostPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("Failed to delete pod %s: %v", testAgnhostPod.Name, err) + framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) } }() - Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name) + gomega.Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name) runCommand := func(arg string) string { cmd := []string{"/agnhost", arg} @@ -393,25 +393,25 @@ var _ = SIGDescribe("DNS", func() { CaptureStdout: true, CaptureStderr: true, }) - Expect(err).NotTo(HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err) return stdout } - By("Verifying customized DNS suffix list is configured on pod...") + ginkgo.By("Verifying customized DNS suffix list is configured on pod...") stdout := runCommand("dns-suffix") if !strings.Contains(stdout, testSearchPath) { framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout) } - By("Verifying customized DNS server is configured on pod...") + ginkgo.By("Verifying customized DNS server is configured on pod...") stdout = runCommand("dns-server-list") if !strings.Contains(stdout, testServerIP) { framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout) } }) - It("should support configurable pod resolv.conf", func() { - By("Preparing a test DNS service with injected DNS names...") + ginkgo.It("should support configurable pod resolv.conf", func() { + ginkgo.By("Preparing a test DNS service with injected DNS names...") testInjectedIP := "1.1.1.1" testDNSNameShort := "notexistname" testSearchPath := "resolv.conf.local" @@ -421,23 +421,23 @@ var _ = SIGDescribe("DNS", func() { testDNSNameFull: testInjectedIP, }) testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testServerPod.Name) e2elog.Logf("Created pod %v", testServerPod) defer func() { e2elog.Logf("Deleting pod %s...", testServerPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err) + framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) } }() - Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name) + gomega.Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name) // Retrieve server pod IP. testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %v", testServerPod.Name) testServerIP := testServerPod.Status.PodIP e2elog.Logf("testServerIP is %s", testServerIP) - By("Creating a pod with dnsPolicy=None and customized dnsConfig...") + ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...") testUtilsPod := generateDNSUtilsPod() testUtilsPod.Spec.DNSPolicy = v1.DNSNone testNdotsValue := "2" @@ -452,17 +452,17 @@ var _ = SIGDescribe("DNS", func() { }, } testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name) e2elog.Logf("Created pod %v", testUtilsPod) defer func() { e2elog.Logf("Deleting pod %s...", testUtilsPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err) + framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() - Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name) + gomega.Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name) - By("Verifying customized DNS option is configured on pod...") + ginkgo.By("Verifying customized DNS option is configured on pod...") // TODO: Figure out a better way other than checking the actual resolv,conf file. cmd := []string{"cat", "/etc/resolv.conf"} stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ @@ -473,12 +473,12 @@ var _ = SIGDescribe("DNS", func() { CaptureStdout: true, CaptureStderr: true, }) - Expect(err).NotTo(HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) if !strings.Contains(stdout, "ndots:2") { framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout) } - By("Verifying customized name server and search path are working...") + ginkgo.By("Verifying customized name server and search path are working...") // Do dig on not-exist-dns-name and see if the injected DNS record is returned. // This verifies both: // - Custom search path is appended. @@ -494,7 +494,7 @@ var _ = SIGDescribe("DNS", func() { CaptureStderr: true, }) if err != nil { - e2elog.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err) + e2elog.Logf("ginkgo.Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err) return false, nil } res := strings.Split(stdout, "\n") @@ -505,7 +505,7 @@ var _ = SIGDescribe("DNS", func() { return true, nil } err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc) - Expect(err).NotTo(HaveOccurred(), "failed to verify customized name server and search path") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to verify customized name server and search path") // TODO: Add more test cases for other DNSPolicies. }) diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 929ec23e063..a53653b6389 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -35,8 +35,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) type dnsTestCommon struct { @@ -62,14 +62,14 @@ func newDNSTestCommon() dnsTestCommon { } func (t *dnsTestCommon) init() { - By("Finding a DNS pod") + ginkgo.By("Finding a DNS pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) options := metav1.ListOptions{LabelSelector: label.String()} namespace := "kube-system" pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", namespace) - Expect(len(pods.Items)).Should(BeNumerically(">=", 1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", namespace) + gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1)) t.dnsPod = &pods.Items[0] e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name) @@ -157,23 +157,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { }.AsSelector().String(), } cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns) if len(cmList.Items) == 0 { - By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) + ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm) - Expect(err).NotTo(HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) } else { - By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) + ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm) - Expect(err).NotTo(HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) } } func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string { if t.name == "coredns" { pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name) return pcm.Data } return nil @@ -189,10 +189,10 @@ func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) { } func (t *dnsTestCommon) deleteConfigMap() { - By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) + ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) t.cm = nil err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete config map: %s", t.name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete config map: %s", t.name) } func (t *dnsTestCommon) createUtilPodLabel(baseName string) { @@ -224,9 +224,9 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { var err error t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.utilPod) e2elog.Logf("Created pod %v", t.utilPod) - Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod) + gomega.Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.utilPod) t.utilService = &v1.Service{ TypeMeta: metav1.TypeMeta{ @@ -249,7 +249,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { } t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) e2elog.Logf("Created service %v", t.utilService) } @@ -272,7 +272,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() { for _, pod := range pods.Items { err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", pod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", pod.Name) } } @@ -315,13 +315,13 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) { var err error t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.dnsServerPod) e2elog.Logf("Created pod %v", t.dnsServerPod) - Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod) + gomega.Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod) t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get( t.dnsServerPod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name) } func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { @@ -539,30 +539,30 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed) return false, nil })) - Expect(len(failed)).To(Equal(0)) + gomega.Expect(len(failed)).To(gomega.Equal(0)) } func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) { - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) defer func() { - By("deleting the pod") - defer GinkgoRecover() + ginkgo.By("deleting the pod") + defer ginkgo.GinkgoRecover() podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("retrieving the pod") + ginkgo.By("retrieving the pod") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find results for each expected name. - By("looking for the results for each expected name from probers") + ginkgo.By("looking for the results for each expected name from probers") assertFilesExist(fileNames, "results", pod, f.ClientSet) // TODO: probe from the host, too. @@ -571,26 +571,26 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) } func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) defer func() { - By("deleting the pod") - defer GinkgoRecover() + ginkgo.By("deleting the pod") + defer ginkgo.GinkgoRecover() podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("retrieving the pod") + ginkgo.By("retrieving the pod") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find the expected value for each expected name. - By("looking for the results for each expected name from probers") + ginkgo.By("looking for the results for each expected name from probers") assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value) e2elog.Logf("DNS probes using %s succeeded\n", pod.Name) diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index 1cb384b504c..b10dd9ca816 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) type dnsFederationsConfigMapTest struct { @@ -45,7 +45,7 @@ var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() { t := &dnsFederationsConfigMapTest{dnsTestCommon: newDNSTestCommon()} - It("should be able to change federation configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change federation configuration [Slow][Serial]", func() { t.c = t.f.ClientSet t.run() }) @@ -96,17 +96,17 @@ func (t *dnsFederationsConfigMapTest) run() { }`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)} valid2m := map[string]string{t.labels[1]: "xyz.com"} - By("default -> valid1") + ginkgo.By("default -> valid1") t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.deleteCoreDNSPods() t.validate(framework.TestContext.ClusterDNSDomain) - By("valid1 -> valid2") + ginkgo.By("valid1 -> valid2") t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true) t.deleteCoreDNSPods() t.validate(framework.TestContext.ClusterDNSDomain) - By("valid2 -> default") + ginkgo.By("valid2 -> default") t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false) t.deleteCoreDNSPods() t.validate(framework.TestContext.ClusterDNSDomain) @@ -121,27 +121,27 @@ func (t *dnsFederationsConfigMapTest) run() { valid2m := map[string]string{t.labels[1]: "xyz"} invalid := map[string]string{"federations": "invalid.map=xyz"} - By("empty -> valid1") + ginkgo.By("empty -> valid1") t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.validate(framework.TestContext.ClusterDNSDomain) - By("valid1 -> valid2") + ginkgo.By("valid1 -> valid2") t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true) t.validate(framework.TestContext.ClusterDNSDomain) - By("valid2 -> invalid") + ginkgo.By("valid2 -> invalid") t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false) t.validate(framework.TestContext.ClusterDNSDomain) - By("invalid -> valid1") + ginkgo.By("invalid -> valid1") t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.validate(framework.TestContext.ClusterDNSDomain) - By("valid1 -> deleted") + ginkgo.By("valid1 -> deleted") t.deleteConfigMap() t.validate(framework.TestContext.ClusterDNSDomain) - By("deleted -> invalid") + ginkgo.By("deleted -> invalid") t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false) t.validate(framework.TestContext.ClusterDNSDomain) } @@ -151,7 +151,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) { federations := t.fedMap if len(federations) == 0 { - By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels)) + ginkgo.By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels)) for _, label := range t.labels { var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.", @@ -173,7 +173,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) { // Check local mapping. Checking a remote mapping requires // creating an arbitrary DNS record which is not possible at the // moment. - By(fmt.Sprintf("Validating federation record %v", label)) + ginkgo.By(fmt.Sprintf("Validating federation record %v", label)) predicate := func(actual []string) bool { for _, v := range actual { if v == localDNS { @@ -407,16 +407,16 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { serviceName := "dns-externalname-upstream-test" externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil { - Fail(fmt.Sprintf("Failed when creating service: %v", err)) + ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } serviceNameLocal := "dns-externalname-upstream-local" externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil { - Fail(fmt.Sprintf("Failed when creating service: %v", err)) + ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } defer func() { - By("deleting the test externalName service") - defer GinkgoRecover() + ginkgo.By("deleting the test externalName service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil) }() @@ -482,28 +482,28 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() { - Context("Change stubDomain", func() { + ginkgo.Context("Change stubDomain", func() { nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} - It("should be able to change stubDomain configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() { nsTest.c = nsTest.f.ClientSet nsTest.run(false) }) }) - Context("Forward PTR lookup", func() { + ginkgo.Context("Forward PTR lookup", func() { fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} - It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { fwdTest.c = fwdTest.f.ClientSet fwdTest.run(false) }) }) - Context("Forward external name lookup", func() { + ginkgo.Context("Forward external name lookup", func() { externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} - It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.run(false) }) @@ -512,28 +512,28 @@ var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() { var _ = SIGDescribe("DNS configMap nameserver [Feature:Networking-IPv6]", func() { - Context("Change stubDomain", func() { + ginkgo.Context("Change stubDomain", func() { nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} - It("should be able to change stubDomain configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() { nsTest.c = nsTest.f.ClientSet nsTest.run(true) }) }) - Context("Forward PTR lookup", func() { + ginkgo.Context("Forward PTR lookup", func() { fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} - It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { fwdTest.c = fwdTest.f.ClientSet fwdTest.run(true) }) }) - Context("Forward external name lookup", func() { + ginkgo.Context("Forward external name lookup", func() { externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} - It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.run(true) }) diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 1b6ae5dcc55..37002bfd2c6 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -30,7 +30,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) const ( @@ -43,7 +43,7 @@ const ( var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { f := framework.NewDefaultFramework("performancedns") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute) @@ -52,7 +52,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { }) // answers dns for service - creates the maximum number of services, and then check dns record for one - It("Should answer DNS query for maximum number of services per cluster", func() { + ginkgo.It("Should answer DNS query for maximum number of services per cluster", func() { // get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace @@ -64,7 +64,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { services := generateServicesInNamespaces(namespaces, maxServicesPerCluster) createService := func(i int) { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i])) } e2elog.Logf("Creating %v test services", maxServicesPerCluster) diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 17cf3758791..cf7e3dcd458 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -52,11 +52,11 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { f := framework.NewDefaultFramework("cluster-dns") var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet }) - It("should create pod that uses dns", func() { + ginkgo.It("should create pod that uses dns", func() { mkpath := func(file string) string { return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file) } @@ -84,7 +84,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { var err error namespaceName := fmt.Sprintf("dnsexample%d", i) namespaces[i], err = f.CreateNamespace(namespaceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName) } for _, ns := range namespaces { @@ -106,13 +106,13 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns.Name).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", ns.Name) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) - Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for all pods to respond") e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) err = framework.ServiceResponding(c, ns.Name, backendSvcName) - Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for the service to respond") } // Now another tricky part: @@ -134,7 +134,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name) _, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout) - Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for output from pod exec") updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain)) @@ -153,7 +153,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // wait for pods to print their result for _, ns := range namespaces { _, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout) - Expect(err).NotTo(HaveOccurred(), "pod %s failed to print result in logs", frontendPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "pod %s failed to print result in logs", frontendPodName) } }) }) @@ -165,10 +165,10 @@ func getNsCmdFlag(ns *v1.Namespace) string { // pass enough context with the 'old' parameter so that it replaces what your really intended. func prepareResourceWithReplacedString(inputFile, old, new string) string { f, err := os.Open(inputFile) - Expect(err).NotTo(HaveOccurred(), "failed to open file: %s", inputFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to open file: %s", inputFile) defer f.Close() data, err := ioutil.ReadAll(f) - Expect(err).NotTo(HaveOccurred(), "failed to read from file: %s", inputFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from file: %s", inputFile) podYaml := strings.Replace(string(data), old, new, 1) return podYaml } diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index ecce3c4195d..07363bd5550 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -30,8 +30,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework/providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -49,38 +49,38 @@ var _ = SIGDescribe("Firewall rule", func() { var cloudConfig framework.CloudConfig var gceCloud *gcecloud.Cloud - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce") var err error cs = f.ClientSet cloudConfig = framework.TestContext.CloudConfig gceCloud, err = gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // This test takes around 6 minutes to run - It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() { + ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() { ns := f.Namespace.Name // This source ranges is just used to examine we have exact same things on LB firewall rules firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"} serviceName := "firewall-test-loadbalancer" - By("Getting cluster ID") + ginkgo.By("Getting cluster ID") clusterID, err := gce.GetClusterID(cs) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Got cluster ID: %v", clusterID) jig := framework.NewServiceTestJig(cs, serviceName) nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests) - Expect(nodeList).NotTo(BeNil()) + gomega.Expect(nodeList).NotTo(gomega.BeNil()) nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) if len(nodesNames) <= 0 { framework.Failf("Expect at least 1 node, got: %v", nodesNames) } nodesSet := sets.NewString(nodesNames...) - By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") + ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}} svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges @@ -90,61 +90,61 @@ var _ = SIGDescribe("Firewall rule", func() { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.LoadBalancerSourceRanges = nil }) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) - By("Waiting for the local traffic health check firewall rule to be deleted") + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted") localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP - By("Checking if service's firewall rule is correct") + ginkgo.By("Checking if service's firewall rule is correct") lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag) fw, err := gceCloud.GetFirewall(lbFw.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) - By("Checking if service's nodes health check firewall rule is correct") + ginkgo.By("Checking if service's nodes health check firewall rule is correct") nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true) fw, err = gceCloud.GetFirewall(nodesHCFw.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) // OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE - By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") + ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal }) - By("Waiting for the nodes health check firewall rule to be deleted") + ginkgo.By("Waiting for the nodes health check firewall rule to be deleted") _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Waiting for the correct local traffic health check firewall rule to be created") + ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created") localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) + ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) for i, nodeName := range nodesNames { podName := fmt.Sprintf("netexec%v", i) jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true) defer func() { e2elog.Logf("Cleaning up the netexec pod: %v", podName) - Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(gomega.HaveOccurred()) }() } // Send requests from outside of the cluster because internal traffic is whitelisted - By("Accessing the external service ip from outside, all non-master nodes should be reached") - Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached") + gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred()) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster // by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect // simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but // that's much harder to do in the current e2e framework. - By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0])) + ginkgo.By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0])) nodesSet.Delete(nodesNames[0]) // Instance could run in a different zone in multi-zone test. Figure out which zone // it is in before proceeding. @@ -154,31 +154,31 @@ var _ = SIGDescribe("Firewall rule", func() { } removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{}) defer func() { - By("Adding tags back to the node and wait till the traffic is recovered") + ginkgo.By("Adding tags back to the node and wait till the traffic is recovered") nodesSet.Insert(nodesNames[0]) gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) // Make sure traffic is recovered before exit - Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred()) }() - By("Accessing serivce through the external ip and examine got no response from the node without tags") - Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) + ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags") + gomega.Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(gomega.HaveOccurred()) }) - It("should have correct firewall rules for e2e cluster", func() { + ginkgo.It("should have correct firewall rules for e2e cluster", func() { nodes := framework.GetReadySchedulableNodesOrDie(cs) if len(nodes.Items) <= 0 { framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) } - By("Checking if e2e firewall rules are correct") + ginkgo.By("Checking if e2e firewall rules are correct") for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { fw, err := gceCloud.GetFirewall(expFw.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) } - By("Checking well known ports on master and nodes are not exposed externally") + ginkgo.By("Checking well known ports on master and nodes are not exposed externally") nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) if len(nodeAddrs) == 0 { framework.Failf("did not find any node addresses") diff --git a/test/e2e/network/framework.go b/test/e2e/network/framework.go index bbabb66fc79..4080c32308b 100644 --- a/test/e2e/network/framework.go +++ b/test/e2e/network/framework.go @@ -18,6 +18,7 @@ package network import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-network] "+text, body) } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 5ea0bd4fda5..45eca3516d0 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -40,8 +40,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -50,7 +50,7 @@ const ( ) var _ = SIGDescribe("Loadbalancing: L7", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() var ( ns string jig *ingress.TestJig @@ -58,7 +58,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ) f := framework.NewDefaultFramework("ingress") - BeforeEach(func() { + ginkgo.BeforeEach(func() { jig = ingress.NewIngressTestJig(f.ClientSet) ns = f.Namespace.Name @@ -81,59 +81,59 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // // Slow by design ~10m for each "It" block dominated by loadbalancer setup time // TODO: write similar tests for nginx, haproxy and AWS Ingress. - Describe("GCE [Slow] [Feature:Ingress]", func() { + ginkgo.Describe("GCE [Slow] [Feature:Ingress]", func() { var gceController *gce.IngressController // Platform specific setup - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing gce controller") + ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // Platform specific cleanup - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") return } - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() - By("Cleaning up cloud resources") - Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) + ginkgo.By("Cleaning up cloud resources") + gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(true) } }) - It("should create ingress with pre-shared certificate", func() { + ginkgo.It("should create ingress with pre-shared certificate", func() { executePresharedCertTest(f, jig, "") }) - It("should support multiple TLS certs", func() { - By("Creating an ingress with no certs.") + ginkgo.It("should support multiple TLS certs", func() { + ginkgo.By("Creating an ingress with no certs.") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ ingress.IngressStaticIPKey: ns, }, map[string]string{}) - By("Adding multiple certs to the ingress.") + ginkgo.By("Adding multiple certs to the ingress.") hosts := []string{"test1.ingress.com", "test2.ingress.com", "test3.ingress.com", "test4.ingress.com"} secrets := []string{"tls-secret-1", "tls-secret-2", "tls-secret-3", "tls-secret-4"} certs := [][]byte{} @@ -143,33 +143,33 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } for i, host := range hosts { err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } - By("Remove all but one of the certs on the ingress.") + ginkgo.By("Remove all but one of the certs on the ingress.") jig.RemoveHTTPS(secrets[1]) jig.RemoveHTTPS(secrets[2]) jig.RemoveHTTPS(secrets[3]) - By("Test that the remaining cert is properly served.") + ginkgo.By("Test that the remaining cert is properly served.") err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0]) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) - By("Add back one of the certs that was removed and check that all certs are served.") + ginkgo.By("Add back one of the certs that was removed and check that all certs are served.") jig.AddHTTPS(secrets[1], hosts[1]) for i, host := range hosts[:2] { err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } }) - It("multicluster ingress should get instance group annotation", func() { + ginkgo.It("multicluster ingress should get instance group annotation", func() { name := "echomap" jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{ ingress.IngressClassKey: ingress.MulticlusterIngressClassValue, }, map[string]string{}) - By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) + ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -237,118 +237,118 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // zone based on pod labels. }) - Describe("GCE [Slow] [Feature:NEG]", func() { + ginkgo.Describe("GCE [Slow] [Feature:NEG]", func() { var gceController *gce.IngressController // Platform specific setup - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing gce controller") + ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // Platform specific cleanup - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") return } - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() - By("Cleaning up cloud resources") - Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) + ginkgo.By("Cleaning up cloud resources") + gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { jig.PollInterval = 5 * time.Second conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ ingress.NEGAnnotation: `{"ingress": true}`, }) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(true) - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) } }) - It("should be able to switch between IG and NEG modes", func() { + ginkgo.It("should be able to switch between IG and NEG modes", func() { var err error - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) - By("Switch backend service to use IG") + ginkgo.By("Switch backend service to use IG") svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil { - e2elog.Logf("Failed to verify IG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err) return false, nil } return true, nil }) - Expect(err).NotTo(HaveOccurred(), "Expect backend service to target IG, but failed to observe") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target IG, but failed to observe") jig.WaitForIngress(true) - By("Switch backend service to use NEG") + ginkgo.By("Switch backend service to use NEG") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil { - e2elog.Logf("Failed to verify NEG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err) return false, nil } return true, nil }) - Expect(err).NotTo(HaveOccurred(), "Expect backend service to target NEG, but failed to observe") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target NEG, but failed to observe") jig.WaitForIngress(true) }) - It("should be able to create a ClusterIP service", func() { - By("Create a basic HTTP ingress using NEG") + ginkgo.It("should be able to create a ClusterIP service", func() { + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) svcPorts := jig.GetServicePorts(false) - Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(gomega.HaveOccurred()) // ClusterIP ServicePorts have no NodePort for _, sp := range svcPorts { - Expect(sp.NodePort).To(Equal(int32(0))) + gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0))) } }) - It("should sync endpoints to NEG", func() { + ginkgo.It("should sync endpoints to NEG", func() { name := "hostname" scaleAndValidateNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if scale.Spec.Replicas != int32(num) { scale.Spec.Replicas = int32(num) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -358,45 +358,45 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { e2elog.Logf("Expecting %d backends, got %d", num, res.Len()) return res.Len() == num, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) // initial replicas number is 1 scaleAndValidateNEG(1) - By("Scale up number of backends to 5") + ginkgo.By("Scale up number of backends to 5") scaleAndValidateNEG(5) - By("Scale down number of backends to 3") + ginkgo.By("Scale down number of backends to 3") scaleAndValidateNEG(3) - By("Scale up number of backends to 6") + ginkgo.By("Scale up number of backends to 6") scaleAndValidateNEG(6) - By("Scale down number of backends to 2") + ginkgo.By("Scale down number of backends to 2") scaleAndValidateNEG(3) }) - It("rolling update backend pods should not cause service disruption", func() { + ginkgo.It("rolling update backend pods should not cause service disruption", func() { name := "hostname" replicas := 8 - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Scale backend replicas to %d", replicas)) + ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas)) scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) scale.Spec.Replicas = int32(replicas) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -405,21 +405,21 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } return res.Len() == replicas, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Trigger rolling update and observe service disruption") + ginkgo.By("Trigger rolling update and observe service disruption") deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // trigger by changing graceful termination period to 60 seconds gracePeriod := int64(60) deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod _, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if int(deploy.Status.UpdatedReplicas) == replicas { if res.Len() == replicas { return true, nil @@ -427,29 +427,28 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len()) return false, nil - } else { - e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.") - return false, nil } + e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.") + return false, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { + ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { name := "hostname" expectedKeys := []int32{80, 443} scaleAndValidateExposedNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if scale.Spec.Replicas != int32(num) { scale.Spec.Replicas = int32(num) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) var status ingress.NegStatus v, ok := svc.Annotations[ingress.NEGStatusAnnotation] @@ -482,10 +481,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(networkEndpoints) != num { e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints)) return false, nil @@ -494,31 +493,31 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) // initial replicas number is 1 scaleAndValidateExposedNEG(1) - By("Scale up number of backends to 5") + ginkgo.By("Scale up number of backends to 5") scaleAndValidateExposedNEG(5) - By("Scale down number of backends to 3") + ginkgo.By("Scale down number of backends to 3") scaleAndValidateExposedNEG(3) - By("Scale up number of backends to 6") + ginkgo.By("Scale up number of backends to 6") scaleAndValidateExposedNEG(6) - By("Scale down number of backends to 2") + ginkgo.By("Scale down number of backends to 2") scaleAndValidateExposedNEG(3) }) - It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { - By("Create a basic HTTP ingress using standalone NEG") + ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { + ginkgo.By("Create a basic HTTP ingress using standalone NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) @@ -526,120 +525,120 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { detectNegAnnotation(f, jig, gceController, ns, name, 2) // Add Ingress annotation - NEGs should stay the same. - By("Adding NEG Ingress annotation") + ginkgo.By("Adding NEG Ingress annotation") svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Modify exposed NEG annotation, but keep ingress annotation - By("Modifying exposed NEG annotation, but keep Ingress annotation") + ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Remove Ingress annotation. Expect 1 NEG - By("Disabling Ingress annotation, but keeping one standalone NEG") + ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 1) // Remove NEG annotation entirely. Expect 0 NEGs. - By("Removing NEG annotation") + ginkgo.By("Removing NEG annotation") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { delete(svc.Annotations, ingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 0) }) }) - Describe("GCE [Slow] [Feature:kubemci]", func() { + ginkgo.Describe("GCE [Slow] [Feature:kubemci]", func() { var gceController *gce.IngressController var ipName, ipAddress string // Platform specific setup - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") jig.Class = ingress.MulticlusterIngressClassValue jig.PollInterval = 5 * time.Second - By("Initializing gce controller") + ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19): // Kubemci should reserve a static ip if user has not specified one. ipName = "kubemci-" + string(uuid.NewUUID()) // ip released when the rest of lb resources are deleted in CleanupIngressController ipAddress = gceController.CreateStaticIP(ipName) - By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress)) + ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress)) }) // Platform specific cleanup - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") } else { - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() } - By("Cleaning up cloud resources") - Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) + ginkgo.By("Cleaning up cloud resources") + gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ ingress.IngressStaticIPKey: ipName, }) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(false /*waitForNodePort*/) } }) - It("should create ingress with pre-shared certificate", func() { + ginkgo.It("should create ingress with pre-shared certificate", func() { executePresharedCertTest(f, jig, ipName) }) - It("should create ingress with backend HTTPS", func() { + ginkgo.It("should create ingress with backend HTTPS", func() { executeBacksideBacksideHTTPSTest(f, jig, ipName) }) - It("should support https-only annotation", func() { + ginkgo.It("should support https-only annotation", func() { executeStaticIPHttpsOnlyTest(f, jig, ipName, ipAddress) }) - It("should remove clusters as expected", func() { + ginkgo.It("should remove clusters as expected", func() { ingAnnotations := map[string]string{ ingress.IngressStaticIPKey: ipName, } @@ -668,8 +667,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { verifyKubemciStatusHas(name, "is spread across 0 cluster") }) - It("single and multi-cluster ingresses should be able to exist together", func() { - By("Creating a single cluster ingress first") + ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() { + ginkgo.By("Creating a single cluster ingress first") jig.Class = "" singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2") jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{}) @@ -678,7 +677,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { singleIng := jig.Ingress // Create the multi-cluster ingress next. - By("Creating a multi-cluster ingress next") + ginkgo.By("Creating a multi-cluster ingress next") jig.Class = ingress.MulticlusterIngressClassValue ingAnnotations := map[string]string{ ingress.IngressStaticIPKey: ipName, @@ -688,7 +687,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.WaitForIngress(false /*waitForNodePort*/) mciIngress := jig.Ingress - By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work") + ginkgo.By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work") jig.Ingress = singleIng jig.Class = "" jig.TryDeleteIngress() @@ -696,18 +695,18 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.Class = ingress.MulticlusterIngressClassValue jig.WaitForIngress(false /*waitForNodePort*/) - By("Cleanup: Deleting the multi-cluster ingress") + ginkgo.By("Cleanup: Deleting the multi-cluster ingress") jig.TryDeleteIngress() }) }) // Time: borderline 5m, slow by design - Describe("[Slow] Nginx", func() { + ginkgo.Describe("[Slow] Nginx", func() { var nginxController *ingress.NginxIngressController - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing nginx controller") + ginkgo.By("Initializing nginx controller") jig.Class = "nginx" nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client} @@ -723,30 +722,30 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { nginxController.Init() }) - AfterEach(func() { + ginkgo.AfterEach(func() { if framework.ProviderIs("gce", "gke") { framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID)) } - if CurrentGinkgoTestDescription().Failed { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") return } - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { // Poll more frequently to reduce e2e completion time. // This test runs in presubmit. jig.PollInterval = 5 * time.Second conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(false) } }) @@ -766,28 +765,28 @@ func verifyKubemciStatusHas(name, expectedSubStr string) { func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { preSharedCertName := "test-pre-shared-cert" - By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) + ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) testHostname := "test.ingress.com" cert, key, err := ingress.GenerateRSACerts(testHostname, true) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { // We would not be able to delete the cert until ingress controller // cleans up the target proxy that references it. - By("Deleting ingress before deleting ssl certificate") + ginkgo.By("Deleting ingress before deleting ssl certificate") if jig.Ingress != nil { jig.TryDeleteIngress() } - By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) + ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) { if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) { - e2elog.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) + e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) return false, nil } return true, nil }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to delete ssl certificate %q: %v", preSharedCertName, err)) }() _, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{ Name: preSharedCertName, @@ -795,9 +794,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat PrivateKey: string(key), Description: "pre-shared cert for ingress testing", }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to create ssl certificate %q: %v", preSharedCertName, err)) - By("Creating an ingress referencing the pre-shared certificate") + ginkgo.By("Creating an ingress referencing the pre-shared certificate") // Create an ingress referencing this cert using pre-shared-cert annotation. ingAnnotations := map[string]string{ ingress.IngressPreSharedCertKey: preSharedCertName, @@ -810,9 +809,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat } jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) - By("Test that ingress works with the pre-shared certificate") + ginkgo.By("Test that ingress works with the pre-shared certificate") err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { @@ -821,30 +820,30 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ingress.IngressAllowHTTPKey: "false", }, map[string]string{}) - By("waiting for Ingress to come up with ip: " + ip) + ginkgo.By("waiting for Ingress to come up with ip: " + ip) httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) - By("should reject HTTP traffic") + ginkgo.By("should reject HTTP traffic") framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) } func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { - By("Creating a set of ingress, service and deployment that have backside re-encryption configured") + ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured") deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) defer func() { - By("Cleaning up re-encryption ingress, service and deployment") + ginkgo.By("Cleaning up re-encryption ingress, service and deployment") if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 { - framework.Failf("Failed to cleanup re-encryption ingress: %v", errs) + framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs) } }() - Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to create re-encryption ingress") - By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) + ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to wait for ingress IP") - By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) + ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") @@ -858,7 +857,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ e2elog.Logf("Poll succeeded, request was served by HTTPS") return true, nil }) - Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to verify backside re-encryption ingress") } func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { @@ -872,7 +871,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro if negs == 0 { err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)) if err != nil { - e2elog.Logf("Failed to validate IG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to validate IG backend service: %v", err) return false, nil } return true, nil @@ -898,10 +897,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro } gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(networkEndpoints) != 1 { e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints)) return false, nil @@ -910,11 +909,11 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) if err != nil { - e2elog.Logf("Failed to validate NEG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to validate NEG backend service: %v", err) return false, nil } return true, nil }); err != nil { - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go index dc9d9df107e..bda4be54b62 100644 --- a/test/e2e/network/ingress_scale.go +++ b/test/e2e/network/ingress_scale.go @@ -20,26 +20,26 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/network/scale" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() var ( ns string ) f := framework.NewDefaultFramework("ingress-scale") - BeforeEach(func() { + ginkgo.BeforeEach(func() { ns = f.Namespace.Name }) - Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { + ginkgo.Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { var ( scaleFramework *scale.IngressScaleFramework ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) @@ -48,13 +48,13 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) } }) - It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { + ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { framework.Failf("Unexpected error while running ingress scale test: %v", errs) } diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index f08c736fe38..adea7307ad8 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -33,8 +33,8 @@ import ( "k8s.io/kubernetes/test/images/net/nat" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Net) @@ -49,7 +49,7 @@ var _ = SIGDescribe("Network", func() { fr := framework.NewDefaultFramework("network") - It("should set TCP CLOSE_WAIT timeout", func() { + ginkgo.It("should set TCP CLOSE_WAIT timeout", func() { nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet) ips := framework.CollectAddresses(nodes, v1.NodeInternalIP) @@ -145,21 +145,21 @@ var _ = SIGDescribe("Network", func() { }, } - By(fmt.Sprintf( + ginkgo.By(fmt.Sprintf( "Launching a server daemon on node %v (node ip: %v, image: %v)", serverNodeInfo.name, serverNodeInfo.nodeIP, kubeProxyE2eImage)) fr.PodClient().CreateSync(serverPodSpec) - By(fmt.Sprintf( + ginkgo.By(fmt.Sprintf( "Launching a client daemon on node %v (node ip: %v, image: %v)", clientNodeInfo.name, clientNodeInfo.nodeIP, kubeProxyE2eImage)) fr.PodClient().CreateSync(clientPodSpec) - By("Make client connect") + ginkgo.By("Make client connect") options := nat.CloseWaitClientOptions{ RemoteAddr: fmt.Sprintf("%v:%v", @@ -179,7 +179,7 @@ var _ = SIGDescribe("Network", func() { <-time.After(time.Duration(1) * time.Second) - By("Checking /proc/net/nf_conntrack for the timeout") + ginkgo.By("Checking /proc/net/nf_conntrack for the timeout") // If test flakes occur here, then this check should be performed // in a loop as there may be a race with the client connecting. e2essh.IssueSSHCommandWithResult( @@ -214,8 +214,8 @@ var _ = SIGDescribe("Network", func() { e2elog.Logf("conntrack entry timeout was: %v, expected: %v", timeoutSeconds, expectedTimeoutSeconds) - Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should( - BeNumerically("<", (epsilonSeconds))) + gomega.Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should( + gomega.BeNumerically("<", (epsilonSeconds))) }) // Regression test for #74839, where: @@ -223,7 +223,7 @@ var _ = SIGDescribe("Network", func() { // a problem where spurious retransmits in a long-running TCP connection to a service // IP could result in the connection being closed with the error "Connection reset by // peer" - It("should resolve connrection reset issue #74839 [Slow]", func() { + ginkgo.It("should resolve connrection reset issue #74839 [Slow]", func() { serverLabel := map[string]string{ "app": "boom-server", } @@ -265,7 +265,7 @@ var _ = SIGDescribe("Network", func() { _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod) framework.ExpectNoError(err) - By("Server pod created") + ginkgo.By("Server pod created") svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -284,7 +284,7 @@ var _ = SIGDescribe("Network", func() { _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc) framework.ExpectNoError(err) - By("Server service created") + ginkgo.By("Server service created") pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -319,13 +319,13 @@ var _ = SIGDescribe("Network", func() { _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod) framework.ExpectNoError(err) - By("Client pod created") + ginkgo.By("Client pod created") for i := 0; i < 20; i++ { time.Sleep(3 * time.Second) resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(BeNil()) + gomega.Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(gomega.BeNil()) } }) }) diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index 2586c0c7011..6db4cd155a0 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -27,8 +27,8 @@ import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) /* @@ -45,27 +45,27 @@ var _ = SIGDescribe("NetworkPolicy", func() { var podServer *v1.Pod f := framework.NewDefaultFramework("network-policy") - Context("NetworkPolicy between server and client", func() { - BeforeEach(func() { - By("Creating a simple server that serves on port 80 and 81.") + ginkgo.Context("NetworkPolicy between server and client", func() { + ginkgo.BeforeEach(func() { + ginkgo.By("Creating a simple server that serves on port 80 and 81.") podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81}) - By("Waiting for pod ready", func() { + ginkgo.By("Waiting for pod ready", func() { err := f.WaitForPodReady(podServer.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // Create pods, which should be able to communicate with the server on port 80 and 81. - By("Testing pods can connect to both ports when no policy is present.") + ginkgo.By("Testing pods can connect to both ports when no policy is present.") testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80) testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupServerPodAndService(f, podServer, service) }) - It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "deny-all", @@ -77,7 +77,7 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, @@ -85,8 +85,8 @@ var _ = SIGDescribe("NetworkPolicy", func() { testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80) }) - It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() { - By("Creating a network policy for the server which allows traffic from the pod 'client-a'.") + ginkgo.It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy for the server which allows traffic from the pod 'client-a'.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-client-a-via-pod-selector", @@ -112,18 +112,18 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating client-a which should be able to contact the server.", func() { + ginkgo.By("Creating client-a which should be able to contact the server.", func() { testCanConnect(f, f.Namespace, "client-a", service, 80) }) - By("Creating client-b which should not be able to contact the server.", func() { + ginkgo.By("Creating client-b which should not be able to contact the server.", func() { testCannotConnect(f, f.Namespace, "client-b", service, 80) }) }) - It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() { nsA := f.Namespace nsBName := f.BaseName + "-b" // The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself @@ -132,15 +132,15 @@ var _ = SIGDescribe("NetworkPolicy", func() { nsB, err := f.CreateNamespace(nsBName, map[string]string{ "ns-name": nsBName, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create Server with Service in NS-B e2elog.Logf("Waiting for server to come up.") err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create Policy for that service that allows traffic only via namespace B - By("Creating a network policy for the server which allows traffic from namespace-b.") + ginkgo.By("Creating a network policy for the server which allows traffic from namespace-b.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ns-b-via-namespace-selector", @@ -165,15 +165,15 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) testCannotConnect(f, nsA, "client-a", service, 80) testCanConnect(f, nsB, "client-b", service, 80) }) - It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { - By("Creating a network policy for the Service which allows traffic only to one port.") + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ingress-on-port-81", @@ -194,16 +194,16 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Testing pods can connect only to the port allowed by the policy.") + ginkgo.By("Testing pods can connect only to the port allowed by the policy.") testCannotConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-b", service, 81) }) - It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { - By("Creating a network policy for the Service which allows traffic only to one port.") + ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ingress-on-port-80", @@ -224,10 +224,10 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating a network policy for the Service which allows traffic only to another port.") + ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.") policy2 := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ingress-on-port-81", @@ -248,16 +248,16 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy2) - By("Testing pods can connect to both ports when both policies are present.") + ginkgo.By("Testing pods can connect to both ports when both policies are present.") testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-b", service, 81) }) - It("should support allow-all policy [Feature:NetworkPolicy]", func() { - By("Creating a network policy which allows all traffic.") + ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy which allows all traffic.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-all", @@ -271,15 +271,15 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Testing pods can connect to both ports when an 'allow-all' policy is present.") + ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.") testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-b", service, 81) }) - It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-client-a-via-named-port-ingress-rule", @@ -301,18 +301,18 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating client-a which should be able to contact the server.", func() { + ginkgo.By("Creating client-a which should be able to contact the server.", func() { testCanConnect(f, f.Namespace, "client-a", service, 80) }) - By("Creating client-b which should not be able to contact the server on port 81.", func() { + ginkgo.By("Creating client-b which should not be able to contact the server on port 81.", func() { testCannotConnect(f, f.Namespace, "client-b", service, 81) }) }) - It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { clientPodName := "client-a" protocolUDP := v1.ProtocolUDP policy := &networkingv1.NetworkPolicy{ @@ -343,13 +343,13 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating client-a which should be able to contact the server.", func() { + ginkgo.By("Creating client-a which should be able to contact the server.", func() { testCanConnect(f, f.Namespace, clientPodName, service, 80) }) - By("Creating client-a which should not be able to contact the server on port 81.", func() { + ginkgo.By("Creating client-a which should not be able to contact the server on port 81.", func() { testCannotConnect(f, f.Namespace, clientPodName, service, 81) }) }) @@ -357,10 +357,10 @@ var _ = SIGDescribe("NetworkPolicy", func() { }) func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { - By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name)) + ginkgo.By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name)) podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { - By(fmt.Sprintf("Cleaning up the pod %s", podName)) + ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } @@ -368,7 +368,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se e2elog.Logf("Waiting for %s to complete.", podClient.Name) err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name) - Expect(err).NotTo(HaveOccurred(), "Pod did not finish as expected.") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Pod did not finish as expected.") e2elog.Logf("Waiting for %s to complete.", podClient.Name) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) @@ -404,10 +404,10 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se } func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { - By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) + ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { - By(fmt.Sprintf("Cleaning up the pod %s", podName)) + ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } @@ -495,7 +495,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, }) } - By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) + ginkgo.By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -508,11 +508,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, RestartPolicy: v1.RestartPolicyNever, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Created pod %v", pod.ObjectMeta.Name) svcName := fmt.Sprintf("svc-%s", podName) - By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) + ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: svcName, @@ -524,18 +524,18 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, }, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Created service %s", svc.Name) return pod, svc } func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { - By("Cleaning up the server.") + ginkgo.By("Cleaning up the server.") if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) } - By("Cleaning up the server's service.") + ginkgo.By("Cleaning up the server's service.") if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil { framework.Failf("unable to cleanup svc %v: %v", service.Name, err) } @@ -569,13 +569,13 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return pod } func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { - By("Cleaning up the policy.") + ginkgo.By("Cleaning up the policy.") if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil { framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) } diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index c5c7b09bde3..ef8130267f9 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -33,8 +33,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework/providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { @@ -43,14 +43,14 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { var cs clientset.Interface serviceLBNames := []string{} - BeforeEach(func() { + ginkgo.BeforeEach(func() { // This test suite requires the GCE environment. framework.SkipUnlessProviderIs("gce") cs = f.ClientSet }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { @@ -60,7 +60,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { //reset serviceLBNames serviceLBNames = []string{} }) - It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { + ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { lagTimeout := framework.LoadBalancerLagTimeoutDefault createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs) @@ -68,19 +68,19 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, svcName) - By("creating a pod to be part of the service " + svcName) + ginkgo.By("creating a pod to be part of the service " + svcName) jig.RunOrFail(ns, nil) // Test 1: create a standard tiered LB for the Service. - By("creating a Service of type LoadBalancer using the standard network tier") + ginkgo.By("creating a Service of type LoadBalancer using the standard network tier") svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) }) // Verify that service has been updated properly. svcTier, err := gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) - Expect(svcTier).To(Equal(cloud.NetworkTierStandard)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard)) // Record the LB name for test cleanup. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) @@ -88,26 +88,26 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout) // Test 2: re-create a LB of a different tier for the updated Service. - By("updating the Service to use the premium (default) tier") + ginkgo.By("updating the Service to use the premium (default) tier") svc = jig.UpdateServiceOrFail(ns, svcName, func(svc *v1.Service) { clearNetworkTier(svc) }) // Verify that service has been updated properly. svcTier, err = gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) - Expect(svcTier).To(Equal(cloud.NetworkTierDefault)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierDefault)) // Wait until the ingress IP changes. Each tier has its own pool of // IPs, so changing tiers implies changing IPs. ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout) // Test 3: create a standard-tierd LB with a user-requested IP. - By("reserving a static IP for the load balancer") + ginkgo.By("reserving a static IP for the load balancer") requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard) - Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to reserve a STANDARD tiered address") defer func() { if requestedAddrName != "" { // Release GCE static address - this is not kube-managed and will not be automatically released. @@ -116,19 +116,19 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { } } }() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP) - By("updating the Service to use the standard tier with a requested IP") + ginkgo.By("updating the Service to use the standard tier with a requested IP") svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) { svc.Spec.LoadBalancerIP = requestedIP setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) }) // Verify that service has been updated properly. - Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP)) + gomega.Expect(svc.Spec.LoadBalancerIP).To(gomega.Equal(requestedIP)) svcTier, err = gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) - Expect(svcTier).To(Equal(cloud.NetworkTierStandard)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard)) // Wait until the ingress IP changes and verifies the LB. ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout) @@ -150,10 +150,10 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin lbIngress := &svc.Status.LoadBalancer.Ingress[0] ingressIP := framework.GetIngressPoint(lbIngress) - By("running sanity and reachability checks") + ginkgo.By("running sanity and reachability checks") if svc.Spec.LoadBalancerIP != "" { // Verify that the new ingress IP is the requested IP if it's set. - Expect(ingressIP).To(Equal(svc.Spec.LoadBalancerIP)) + gomega.Expect(ingressIP).To(gomega.Equal(svc.Spec.LoadBalancerIP)) } jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) // If the IP has been used by previous test, sometimes we get the lingering @@ -163,10 +163,10 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin // Verify the network tier matches the desired. svcNetTier, err := gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) netTier, err := getLBNetworkTierByIP(ingressIP) - Expect(err).NotTo(HaveOccurred(), "failed to get the network tier of the load balancer") - Expect(netTier).To(Equal(svcNetTier)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the network tier of the load balancer") + gomega.Expect(netTier).To(gomega.Equal(svcNetTier)) return ingressIP } diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index 281211a4f5e..0821e0bd1b5 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -24,18 +24,18 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Networking", func() { var svcname = "nettest" f := framework.NewDefaultFramework(svcname) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Assert basic external connectivity. // Since this is not really a test of kubernetes in any way, we // leave it as a pre-test assertion, rather than a Ginko test. - By("Executing a successful http request from the external internet") + ginkgo.By("Executing a successful http request from the external internet") resp, err := http.Get("http://google.com") if err != nil { framework.Failf("Unable to connect/talk to the internet: %v", err) @@ -45,20 +45,20 @@ var _ = SIGDescribe("Networking", func() { } }) - It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { - By("Running container which tries to ping 8.8.8.8") + ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { + ginkgo.By("Running container which tries to ping 8.8.8.8") framework.ExpectNoError( framework.CheckConnectivityToHost(f, "", "ping-test", "8.8.8.8", framework.IPv4PingCommand, 30)) }) - It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() { - By("Running container which tries to ping 2001:4860:4860::8888") + ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() { + ginkgo.By("Running container which tries to ping 2001:4860:4860::8888") framework.ExpectNoError( framework.CheckConnectivityToHost(f, "", "ping-test", "2001:4860:4860::8888", framework.IPv6PingCommand, 30)) }) // First test because it has no dependencies on variables created later on. - It("should provide unchanging, static URL paths for kubernetes api services", func() { + ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func() { tests := []struct { path string }{ @@ -74,22 +74,22 @@ var _ = SIGDescribe("Networking", func() { tests = append(tests, struct{ path string }{path: "/logs"}) } for _, test := range tests { - By(fmt.Sprintf("testing: %s", test.path)) + ginkgo.By(fmt.Sprintf("testing: %s", test.path)) data, err := f.ClientSet.CoreV1().RESTClient().Get(). AbsPath(test.path). DoRaw() if err != nil { - framework.Failf("Failed: %v\nBody: %s", err, string(data)) + framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data)) } } }) - It("should check kube-proxy urls", func() { + ginkgo.It("should check kube-proxy urls", func() { // TODO: this is overkill we just need the host networking pod // to hit kube-proxy urls. config := framework.NewNetworkingTestConfig(f) - By("checking kube-proxy URLs") + ginkgo.By("checking kube-proxy URLs") config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK") // Verify /healthz returns the proper content. config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated") @@ -98,116 +98,116 @@ var _ = SIGDescribe("Networking", func() { }) // TODO: Remove [Slow] when this has had enough bake time to prove presubmit worthiness. - Describe("Granular Checks: Services [Slow]", func() { + ginkgo.Describe("Granular Checks: Services [Slow]", func() { - It("should function for pod-Service: http", func() { + ginkgo.It("should function for pod-Service: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for pod-Service: udp", func() { + ginkgo.It("should function for pod-Service: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for node-Service: http", func() { + ginkgo.It("should function for node-Service: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromNode("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for node-Service: udp", func() { + ginkgo.It("should function for node-Service: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromNode("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for endpoint-Service: http", func() { + ginkgo.It("should function for endpoint-Service: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for endpoint-Service: udp", func() { + ginkgo.It("should function for endpoint-Service: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should update endpoints: http", func() { + ginkgo.It("should update endpoints: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNetProxyPod() - By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) }) - It("should update endpoints: udp", func() { + ginkgo.It("should update endpoints: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNetProxyPod() - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - It("should update nodePort: http [Slow]", func() { + ginkgo.It("should update nodePort: http [Slow]", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNodePortService() - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, config.MaxTries, sets.NewString()) }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - It("should update nodePort: udp [Slow]", func() { + ginkgo.It("should update nodePort: udp [Slow]", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNodePortService() - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, config.MaxTries, sets.NewString()) }) - It("should function for client IP based session affinity: http", func() { + ginkgo.It("should function for client IP based session affinity: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort)) // Check if number of endpoints returned are exactly one. eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks) if err != nil { - framework.Failf("Failed to get endpoints from test container, error: %v", err) + framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } if len(eps) == 0 { framework.Failf("Unexpected no endpoints return") @@ -217,14 +217,14 @@ var _ = SIGDescribe("Networking", func() { } }) - It("should function for client IP based session affinity: udp", func() { + ginkgo.It("should function for client IP based session affinity: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort)) // Check if number of endpoints returned are exactly one. eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks) if err != nil { - framework.Failf("Failed to get endpoints from test container, error: %v", err) + framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } if len(eps) == 0 { framework.Failf("Unexpected no endpoints return") diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index 9892feeab6d..ecf05e83ac1 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -22,8 +22,8 @@ import ( "math" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" @@ -54,12 +54,12 @@ func networkingIPerfTest(isIPv6 bool) { familyStr = "-V " } - It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { + ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) totalPods := len(nodes.Items) // for a single service, we expect to divide bandwidth between the network. Very crude estimate. expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) - Expect(totalPods).NotTo(Equal(0)) + gomega.Expect(totalPods).NotTo(gomega.Equal(0)) appName := "iperf-e2e" _, err := f.CreateServiceForSimpleAppWithPods( 8001, diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index e62026156a5..52b0afe7c6f 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - // . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -94,11 +94,11 @@ var ( // Produces a pod spec that passes nip as NODE_IP env var using downward API func newTestPod(nodename string, nip string) *v1.Pod { pod := testPod - node_ip := v1.EnvVar{ + nodeIP := v1.EnvVar{ Name: "NODE_IP", Value: nip, } - pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, node_ip) + pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, nodeIP) pod.Spec.NodeName = nodename return &pod } @@ -135,12 +135,12 @@ func checknosnatURL(proxy, pip string, ips []string) string { // We use the [Feature:NoSNAT] tag so that most jobs will skip this test by default. var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { f := framework.NewDefaultFramework("no-snat-test") - It("Should be able to send traffic between Pods without SNAT", func() { + ginkgo.It("Should be able to send traffic between Pods without SNAT", func() { cs := f.ClientSet pc := cs.CoreV1().Pods(f.Namespace.Name) nc := cs.CoreV1().Nodes() - By("creating a test pod on each Node") + ginkgo.By("creating a test pod on each Node") nodes, err := nc.List(metav1.ListOptions{}) framework.ExpectNoError(err) if len(nodes.Items) == 0 { @@ -167,7 +167,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { // on the master, but do allow this on the nodes. node, err := getSchedulable(nodes.Items) framework.ExpectNoError(err) - By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) + + ginkgo.By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) + " so we can target our test Pods through this Node's ExternalIP") extIP, err := getIP(v1.NodeExternalIP, node) @@ -177,7 +177,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { _, err = pc.Create(newTestProxyPod(node.Name)) framework.ExpectNoError(err) - By("waiting for all of the no-snat-test pods to be scheduled and running") + ginkgo.By("waiting for all of the no-snat-test pods to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) if err != nil { @@ -197,7 +197,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { }) framework.ExpectNoError(err) - By("waiting for the no-snat-test-proxy Pod to be scheduled and running") + ginkgo.By("waiting for the no-snat-test-proxy Pod to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { pod, err := pc.Get("no-snat-test-proxy", metav1.GetOptions{}) if err != nil { @@ -213,7 +213,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { }) framework.ExpectNoError(err) - By("sending traffic from each pod to the others and checking that SNAT does not occur") + ginkgo.By("sending traffic from each pod to the others and checking that SNAT does not occur") pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) framework.ExpectNoError(err) diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 5887aa77658..511f94608ac 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -38,8 +38,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -55,7 +55,7 @@ const ( var _ = SIGDescribe("Proxy", func() { version := "v1" - Context("version "+version, func() { + ginkgo.Context("version "+version, func() { options := framework.Options{ ClientQPS: -1.0, } @@ -116,12 +116,12 @@ var _ = SIGDescribe("Proxy", func() { }, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Make an RC with a single pod. The 'porter' image is // a simple server which serves the values of the // environmental variables below. - By("starting an echo server on multiple ports") + ginkgo.By("starting an echo server on multiple ports") pods := []*v1.Pod{} cfg := testutils.RCConfig{ Client: f.ClientSet, @@ -160,10 +160,10 @@ var _ = SIGDescribe("Proxy", func() { Labels: labels, CreatedPods: &pods, } - Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) + gomega.Expect(framework.RunRC(cfg)).NotTo(gomega.HaveOccurred()) defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name) - Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred()) + gomega.Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(gomega.HaveOccurred()) // table constructors // Try proxying through the service and directly to through the pod. @@ -212,7 +212,7 @@ var _ = SIGDescribe("Proxy", func() { e2elog.Logf("setup took %v, starting test cases", d) numberTestCases := len(expectations) totalAttempts := numberTestCases * proxyAttempts - By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts)) + ginkgo.By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts)) for i := 0; i < proxyAttempts; i++ { wg.Add(numberTestCases) @@ -297,25 +297,25 @@ func pickNode(cs clientset.Interface) (string, error) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { node, err := pickNode(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: Change it to test whether all requests succeeded when requests // not reaching Kubelet issue is debugged. serviceUnavailableErrors := 0 for i := 0; i < proxyAttempts; i++ { _, status, d, err := doProxy(f, prefix+node+nodeDest, i) if status == http.StatusServiceUnavailable { - e2elog.Logf("Failed proxying node logs due to service unavailable: %v", err) + e2elog.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err) time.Sleep(time.Second) serviceUnavailableErrors++ } else { - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal(http.StatusOK)) - Expect(d).To(BeNumerically("<", proxyHTTPCallTimeout)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(status).To(gomega.Equal(http.StatusOK)) + gomega.Expect(d).To(gomega.BeNumerically("<", proxyHTTPCallTimeout)) } } if serviceUnavailableErrors > 0 { e2elog.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors) } maxFailures := int(math.Floor(0.1 * float64(proxyAttempts))) - Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures)) + gomega.Expect(serviceUnavailableErrors).To(gomega.BeNumerically("<", maxFailures)) } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index a5bb5a3428d..2741d05449d 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -43,8 +43,8 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" gcecloud "k8s.io/legacy-cloud-providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -83,12 +83,12 @@ var _ = SIGDescribe("Services", func() { var cs clientset.Interface serviceLBNames := []string{} - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { @@ -108,7 +108,7 @@ var _ = SIGDescribe("Services", func() { */ framework.ConformanceIt("should provide secure master service ", func() { _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch the service object for the service named kubernetes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch the service object for the service named kubernetes") }) /* @@ -125,10 +125,10 @@ var _ = SIGDescribe("Services", func() { "baz": "blah", } - By("creating service " + serviceName + " in namespace " + ns) + ginkgo.By("creating service " + serviceName + " in namespace " + ns) defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() ports := []v1.ServicePort{{ Port: 80, @@ -136,7 +136,7 @@ var _ = SIGDescribe("Services", func() { }} _, err := jig.CreateServiceWithServicePort(labels, ns, ports) - Expect(err).NotTo(HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Services", func() { defer func() { for name := range names { err := cs.CoreV1().Pods(ns).Delete(name, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -181,7 +181,7 @@ var _ = SIGDescribe("Services", func() { defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() labels := map[string]string{"foo": "bar"} @@ -189,7 +189,7 @@ var _ = SIGDescribe("Services", func() { svc1port := "svc1" svc2port := "svc2" - By("creating service " + serviceName + " in namespace " + ns) + ginkgo.By("creating service " + serviceName + " in namespace " + ns) ports := []v1.ServicePort{ { Name: "portname1", @@ -203,7 +203,7 @@ var _ = SIGDescribe("Services", func() { }, } _, err := jig.CreateServiceWithServicePort(labels, ns, ports) - Expect(err).NotTo(HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) port1 := 100 port2 := 101 framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) @@ -212,7 +212,7 @@ var _ = SIGDescribe("Services", func() { defer func() { for name := range names { err := cs.CoreV1().Pods(ns).Delete(name, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -249,7 +249,7 @@ var _ = SIGDescribe("Services", func() { framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) }) - It("should preserve source pod IP for traffic thru service cluster IP", func() { + ginkgo.It("should preserve source pod IP for traffic thru service cluster IP", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. @@ -264,7 +264,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "sourceip-test" ns := f.Namespace.Name - By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) jig := framework.NewServiceTestJig(cs, serviceName) servicePort := 8080 tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) @@ -272,12 +272,12 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the sourceip test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() - serviceIp := tcpService.Spec.ClusterIP - e2elog.Logf("sourceip-test cluster ip: %s", serviceIp) + serviceIP := tcpService.Spec.ClusterIP + e2elog.Logf("sourceip-test cluster ip: %s", serviceIP) - By("Picking multiple nodes") + ginkgo.By("Picking multiple nodes") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if len(nodes.Items) == 1 { @@ -287,30 +287,30 @@ var _ = SIGDescribe("Services", func() { node1 := nodes.Items[0] node2 := nodes.Items[1] - By("Creating a webserver pod be part of the TCP service which echoes back source ip") + ginkgo.By("Creating a webserver pod be part of the TCP service which echoes back source ip") serverPodName := "echoserver-sourceip" jig.LaunchEchoserverPodOnNode(f, node1.Name, serverPodName) defer func() { e2elog.Logf("Cleaning up the echo server pod") err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s on node: %s", serverPodName, node1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s on node: %s", serverPodName, node1.Name) }() // Waiting for service to expose endpoint. framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{serverPodName: {servicePort}}) - By("Retrieve sourceip from a pod on the same node") - sourceIp1, execPodIp1 := execSourceipTest(f, cs, ns, node1.Name, serviceIp, servicePort) - By("Verifying the preserved source ip") - Expect(sourceIp1).To(Equal(execPodIp1)) + ginkgo.By("Retrieve sourceip from a pod on the same node") + sourceIP1, execPodIP1 := execSourceipTest(f, cs, ns, node1.Name, serviceIP, servicePort) + ginkgo.By("Verifying the preserved source ip") + gomega.Expect(sourceIP1).To(gomega.Equal(execPodIP1)) - By("Retrieve sourceip from a pod on a different node") - sourceIp2, execPodIp2 := execSourceipTest(f, cs, ns, node2.Name, serviceIp, servicePort) - By("Verifying the preserved source ip") - Expect(sourceIp2).To(Equal(execPodIp2)) + ginkgo.By("Retrieve sourceip from a pod on a different node") + sourceIP2, execPodIP2 := execSourceipTest(f, cs, ns, node2.Name, serviceIP, servicePort) + ginkgo.By("Verifying the preserved source ip") + gomega.Expect(sourceIP2).To(gomega.Equal(execPodIP2)) }) - It("should be able to up and down services", func() { + ginkgo.It("should be able to up and down services", func() { // TODO: use the ServiceTestJig here // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) @@ -320,52 +320,52 @@ var _ = SIGDescribe("Services", func() { ns := f.Namespace.Name numPods, servicePort := 3, defaultServeHostnameServicePort - By("creating service1 in namespace " + ns) + ginkgo.By("creating service1 in namespace " + ns) podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) - By("creating service2 in namespace " + ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + ginkgo.By("creating service2 in namespace " + ns) podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } host := hosts[0] - By("verifying service1 is up") + ginkgo.By("verifying service1 is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - By("verifying service2 is up") + ginkgo.By("verifying service2 is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. - By("stopping service1") + ginkgo.By("stopping service1") framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) - By("verifying service1 is not up") + ginkgo.By("verifying service1 is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) - By("verifying service2 is still up") + ginkgo.By("verifying service2 is still up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. - By("creating service3 in namespace " + ns) + ginkgo.By("creating service3 in namespace " + ns) podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service3"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) if svc2IP == svc3IP { framework.Failf("service IPs conflict: %v", svc2IP) } - By("verifying service2 is still up") + ginkgo.By("verifying service2 is still up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) - By("verifying service3 is up") + ginkgo.By("verifying service3 is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) }) - It("should work after restarting kube-proxy [Disruptive]", func() { + ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() { // TODO: use the ServiceTestJig here framework.SkipUnlessProviderIs("gce", "gke") @@ -379,20 +379,20 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) defer func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -401,14 +401,14 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) - By(fmt.Sprintf("Restarting kube-proxy on %v", host)) + ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host)) if err := framework.RestartKubeProxy(host); err != nil { framework.Failf("error restarting kube-proxy: %v", err) } framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) - By("Removing iptable rules") + ginkgo.By("Removing iptable rules") result, err := e2essh.SSH(` sudo iptables -t nat -F KUBE-SERVICES || true; sudo iptables -t nat -F KUBE-PORTALS-HOST || true; @@ -421,7 +421,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) - It("should work after restarting apiserver [Disruptive]", func() { + ginkgo.It("should work after restarting apiserver [Disruptive]", func() { // TODO: use the ServiceTestJig here framework.SkipUnlessProviderIs("gce", "gke") @@ -432,10 +432,10 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -444,11 +444,11 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver - By("Restarting apiserver") + ginkgo.By("Restarting apiserver") if err := framework.RestartApiserver(cs); err != nil { framework.Failf("error restarting apiserver: %v", err) } - By("Waiting for apiserver to come up by polling /healthz") + ginkgo.By("Waiting for apiserver to come up by polling /healthz") if err := framework.WaitForApiserverUp(cs); err != nil { framework.Failf("error while waiting for apiserver up: %v", err) } @@ -459,7 +459,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2")) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) @@ -471,27 +471,27 @@ var _ = SIGDescribe("Services", func() { // TODO: Run this test against the userspace proxy and nodes // configured with a default deny firewall to validate that the // proxy whitelists NodePort traffic. - It("should be able to create a functioning NodePort service", func() { + ginkgo.It("should be able to create a functioning NodePort service", func() { serviceName := "nodeport-test" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) nodeIP := framework.PickNodeIP(jig.Client) // for later - By("creating service " + serviceName + " with type=NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns) service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) jig.SanityCheckService(service, v1.ServiceTypeNodePort) nodePort := int(service.Spec.Ports[0].NodePort) - By("creating pod to be part of service " + serviceName) + ginkgo.By("creating pod to be part of service " + serviceName) jig.RunOrFail(ns, nil) - By("hitting the pod through the service's NodePort") + ginkgo.By("hitting the pod through the service's NodePort") jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout) - By("verifying the node port is locked") + ginkgo.By("verifying the node port is locked") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. @@ -503,7 +503,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() { // requires cloud load-balancer support framework.SkipUnlessProviderIs("gce", "gke", "aws") @@ -525,9 +525,9 @@ var _ = SIGDescribe("Services", func() { ns1 := f.Namespace.Name // LB1 in ns1 on TCP e2elog.Logf("namespace for TCP test: %s", ns1) - By("creating a second namespace") + ginkgo.By("creating a second namespace") namespacePtr, err := f.CreateNamespace("services", nil) - Expect(err).NotTo(HaveOccurred(), "failed to create namespace") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace") ns2 := namespacePtr.Name // LB2 in ns2 on UDP e2elog.Logf("namespace for UDP test: %s", ns2) @@ -537,30 +537,30 @@ var _ = SIGDescribe("Services", func() { // Test TCP and UDP Services. Services with the same name in different // namespaces should get different node ports and load balancers. - By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) tcpService := jig.CreateTCPServiceOrFail(ns1, nil) jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) - By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) + ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) udpService := jig.CreateUDPServiceOrFail(ns2, nil) jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP) - By("verifying that TCP and UDP use the same port") + ginkgo.By("verifying that TCP and UDP use the same port") if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { framework.Failf("expected to use the same port for TCP and UDP") } svcPort := int(tcpService.Spec.Ports[0].Port) e2elog.Logf("service port (TCP and UDP): %d", svcPort) - By("creating a pod to be part of the TCP service " + serviceName) + ginkgo.By("creating a pod to be part of the TCP service " + serviceName) jig.RunOrFail(ns1, nil) - By("creating a pod to be part of the UDP service " + serviceName) + ginkgo.By("creating a pod to be part of the UDP service " + serviceName) jig.RunOrFail(ns2, nil) // Change the services to NodePort. - By("changing the TCP service to type=NodePort") + ginkgo.By("changing the TCP service to type=NodePort") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort }) @@ -568,7 +568,7 @@ var _ = SIGDescribe("Services", func() { tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) e2elog.Logf("TCP node port: %d", tcpNodePort) - By("changing the UDP service to type=NodePort") + ginkgo.By("changing the UDP service to type=NodePort") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort }) @@ -576,10 +576,10 @@ var _ = SIGDescribe("Services", func() { udpNodePort := int(udpService.Spec.Ports[0].NodePort) e2elog.Logf("UDP node port: %d", udpNodePort) - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) // Change the services to LoadBalancer. @@ -589,10 +589,10 @@ var _ = SIGDescribe("Services", func() { requestedIP := "" staticIPName := "" if framework.ProviderIs("gce", "gke") { - By("creating a static load balancer IP") + ginkgo.By("creating a static load balancer IP") staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred(), "failed to get GCE cloud provider") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get GCE cloud provider") err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) defer func() { @@ -603,22 +603,22 @@ var _ = SIGDescribe("Services", func() { } } }() - Expect(err).NotTo(HaveOccurred(), "failed to create region address: %s", staticIPName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create region address: %s", staticIPName) reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) - Expect(err).NotTo(HaveOccurred(), "failed to get region address: %s", staticIPName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get region address: %s", staticIPName) requestedIP = reservedAddr.Address e2elog.Logf("Allocated static load balancer IP: %s", requestedIP) } - By("changing the TCP service to type=LoadBalancer") + ginkgo.By("changing the TCP service to type=LoadBalancer") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable s.Spec.Type = v1.ServiceTypeLoadBalancer }) if loadBalancerSupportsUDP { - By("changing the UDP service to type=LoadBalancer") + ginkgo.By("changing the UDP service to type=LoadBalancer") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeLoadBalancer }) @@ -628,7 +628,7 @@ var _ = SIGDescribe("Services", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService)) } - By("waiting for the TCP service to have a load balancer") + ginkgo.By("waiting for the TCP service to have a load balancer") // Wait for the load balancer to be created asynchronously tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) @@ -646,10 +646,10 @@ var _ = SIGDescribe("Services", func() { // This is mostly out of fear of leaking the IP in a timeout case // (as of this writing we're not 100% sure where the leaks are // coming from, so this is first-aid rather than surgery). - By("demoting the static IP to ephemeral") + ginkgo.By("demoting the static IP to ephemeral") if staticIPName != "" { gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred(), "failed to get GCE cloud provider") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get GCE cloud provider") // Deleting it after it is attached "demotes" it to an // ephemeral IP, which can be auto-released. if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { @@ -661,7 +661,7 @@ var _ = SIGDescribe("Services", func() { var udpIngressIP string if loadBalancerSupportsUDP { - By("waiting for the UDP service to have a load balancer") + ginkgo.By("waiting for the UDP service to have a load balancer") // 2nd one should be faster since they ran in parallel. udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout) jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer) @@ -671,29 +671,29 @@ var _ = SIGDescribe("Services", func() { udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) e2elog.Logf("UDP load balancer: %s", udpIngressIP) - By("verifying that TCP and UDP use different load balancers") + ginkgo.By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { framework.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } } - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) } // Change the services' node ports. - By("changing the TCP service's NodePort") + ginkgo.By("changing the TCP service's NodePort") tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) tcpNodePortOld := tcpNodePort @@ -706,7 +706,7 @@ var _ = SIGDescribe("Services", func() { } e2elog.Logf("TCP node port: %d", tcpNodePort) - By("changing the UDP service's NodePort") + ginkgo.By("changing the UDP service's NodePort") udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort) if loadBalancerSupportsUDP { jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer) @@ -723,29 +723,29 @@ var _ = SIGDescribe("Services", func() { } e2elog.Logf("UDP node port: %d", udpNodePort) - By("hitting the TCP service's new NodePort") + ginkgo.By("hitting the TCP service's new NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's new NodePort") + ginkgo.By("hitting the UDP service's new NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("checking the old TCP NodePort is closed") + ginkgo.By("checking the old TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, framework.KubeProxyLagTimeout) - By("checking the old UDP NodePort is closed") + ginkgo.By("checking the old UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePortOld, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) } // Change the services' main ports. - By("changing the TCP service's port") + ginkgo.By("changing the TCP service's port") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.Ports[0].Port++ }) @@ -762,7 +762,7 @@ var _ = SIGDescribe("Services", func() { framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } - By("changing the UDP service's port") + ginkgo.By("changing the UDP service's port") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Ports[0].Port++ }) @@ -783,59 +783,59 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("service port (TCP and UDP): %d", svcPort) - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } - By("Scaling the pods to 0") + ginkgo.By("Scaling the pods to 0") jig.Scale(ns1, 0) jig.Scale(ns2, 0) - By("looking for ICMP REJECT on the TCP service's NodePort") + ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort") jig.TestRejectedHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("looking for ICMP REJECT on the UDP service's NodePort") + ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort") jig.TestRejectedUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("looking for ICMP REJECT on the TCP service's LoadBalancer") + ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer") jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { - By("looking for ICMP REJECT on the UDP service's LoadBalancer") + ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer") jig.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } - By("Scaling the pods to 1") + ginkgo.By("Scaling the pods to 1") jig.Scale(ns1, 1) jig.Scale(ns2, 1) - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } // Change the services back to ClusterIP. - By("changing TCP service back to type=ClusterIP") + ginkgo.By("changing TCP service back to type=ClusterIP") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 @@ -844,7 +844,7 @@ var _ = SIGDescribe("Services", func() { tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout) jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) - By("changing UDP service back to type=ClusterIP") + ginkgo.By("changing UDP service back to type=ClusterIP") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 @@ -855,32 +855,32 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP) } - By("checking the TCP NodePort is closed") + ginkgo.By("checking the TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("checking the UDP NodePort is closed") + ginkgo.By("checking the UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("checking the TCP LoadBalancer is closed") + ginkgo.By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) if loadBalancerSupportsUDP { - By("checking the UDP LoadBalancer is closed") + ginkgo.By("checking the UDP LoadBalancer is closed") jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) } }) - It("should be able to update NodePorts with two same port numbers but different protocols", func() { + ginkgo.It("should be able to update NodePorts with two same port numbers but different protocols", func() { serviceName := "nodeport-update-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) tcpService := jig.CreateTCPServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the updating NodePorts test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) svcPort := int(tcpService.Spec.Ports[0].Port) @@ -888,7 +888,7 @@ var _ = SIGDescribe("Services", func() { // Change the services to NodePort and add a UDP port. - By("changing the TCP service to type=NodePort and add a UDP port") + ginkgo.By("changing the TCP service to type=NodePort and add a UDP port") newService := jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.Ports = []v1.ServicePort{ @@ -917,20 +917,20 @@ var _ = SIGDescribe("Services", func() { } }) - It("should be able to change the type from ExternalName to ClusterIP", func() { + ginkgo.It("should be able to change the type from ExternalName to ClusterIP", func() { serviceName := "externalname-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the ExternalName to ClusterIP test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) - By("changing the ExternalName service to type=ClusterIP") + ginkgo.By("changing the ExternalName service to type=ClusterIP") clusterIPService := jig.UpdateServiceOrFail(ns, externalNameService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.ExternalName = "" @@ -941,20 +941,20 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP) }) - It("should be able to change the type from ExternalName to NodePort", func() { + ginkgo.It("should be able to change the type from ExternalName to NodePort", func() { serviceName := "externalname-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the ExternalName to NodePort test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) - By("changing the ExternalName service to type=NodePort") + ginkgo.By("changing the ExternalName service to type=NodePort") nodePortService := jig.UpdateServiceOrFail(ns, externalNameService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.ExternalName = "" @@ -965,20 +965,20 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort) }) - It("should be able to change the type from ClusterIP to ExternalName", func() { + ginkgo.It("should be able to change the type from ClusterIP to ExternalName", func() { serviceName := "clusterip-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) clusterIPService := jig.CreateTCPServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the ClusterIP to ExternalName test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP) - By("changing the ClusterIP service to type=ExternalName") + ginkgo.By("changing the ClusterIP service to type=ExternalName") externalNameService := jig.UpdateServiceOrFail(ns, clusterIPService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeExternalName s.Spec.ExternalName = "foo.example.com" @@ -987,22 +987,22 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) }) - It("should be able to change the type from NodePort to ExternalName", func() { + ginkgo.It("should be able to change the type from NodePort to ExternalName", func() { serviceName := "nodeport-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns) nodePortService := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) defer func() { e2elog.Logf("Cleaning up the NodePort to ExternalName test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort) - By("changing the NodePort service to type=ExternalName") + ginkgo.By("changing the NodePort service to type=ExternalName") externalNameService := jig.UpdateServiceOrFail(ns, nodePortService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeExternalName s.Spec.ExternalName = "foo.example.com" @@ -1012,20 +1012,20 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) }) - It("should use same NodePort with same port but different protocols", func() { + ginkgo.It("should use same NodePort with same port but different protocols", func() { serviceName := "nodeports" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) } }() - By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns) service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: t.ServiceName, @@ -1049,7 +1049,7 @@ var _ = SIGDescribe("Services", func() { }, } result, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) if len(result.Spec.Ports) != 2 { framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) @@ -1059,7 +1059,7 @@ var _ = SIGDescribe("Services", func() { } }) - It("should prevent NodePort collisions", func() { + ginkgo.It("should prevent NodePort collisions", func() { // TODO: use the ServiceTestJig here baseName := "nodeport-collision-" serviceName1 := baseName + "1" @@ -1068,18 +1068,18 @@ var _ = SIGDescribe("Services", func() { t := framework.NewServerTest(cs, ns, serviceName1) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) } }() - By("creating service " + serviceName1 + " with type NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName1 + " with type NodePort in namespace " + ns) service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort result, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) if result.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", result) @@ -1092,7 +1092,7 @@ var _ = SIGDescribe("Services", func() { framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result) } - By("creating service " + serviceName2 + " with conflicting NodePort") + ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort") service2 := t.BuildServiceSpec() service2.Name = serviceName2 service2.Spec.Type = v1.ServiceTypeNodePort @@ -1102,25 +1102,25 @@ var _ = SIGDescribe("Services", func() { framework.Failf("Created service with conflicting NodePort: %v", result2) } expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) - Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) + gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) - By("deleting service " + serviceName1 + " to release NodePort") + ginkgo.By("deleting service " + serviceName1 + " to release NodePort") err = t.DeleteService(serviceName1) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName1, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName1, ns) - By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") + ginkgo.By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") _, err = t.CreateService(service2) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) }) - It("should check NodePort out-of-range", func() { + ginkgo.It("should check NodePort out-of-range", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-range-test" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) @@ -1130,9 +1130,9 @@ var _ = SIGDescribe("Services", func() { service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort - By("creating service " + serviceName + " with type NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", service) @@ -1156,7 +1156,7 @@ var _ = SIGDescribe("Services", func() { break } } - By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) + ginkgo.By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) result, err := framework.UpdateService(cs, ns, serviceName, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) @@ -1164,13 +1164,13 @@ var _ = SIGDescribe("Services", func() { framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) } expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) - Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) + gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) - By("deleting original service " + serviceName) + ginkgo.By("deleting original service " + serviceName) err = t.DeleteService(serviceName) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) - By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) + ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) service = t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) @@ -1178,17 +1178,17 @@ var _ = SIGDescribe("Services", func() { if err == nil { framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) } - Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) + gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) }) - It("should release NodePorts on delete", func() { + ginkgo.It("should release NodePorts on delete", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-reuse" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) @@ -1198,9 +1198,9 @@ var _ = SIGDescribe("Services", func() { service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort - By("creating service " + serviceName + " with type NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", service) @@ -1217,9 +1217,9 @@ var _ = SIGDescribe("Services", func() { } nodePort := port.NodePort - By("deleting original service " + serviceName) + ginkgo.By("deleting original service " + serviceName) err = t.DeleteService(serviceName) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) @@ -1236,21 +1236,21 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) } - By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) + ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) service = t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort service.Spec.Ports[0].NodePort = nodePort service, err = t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) }) - It("should create endpoints for unready pods", func() { + ginkgo.It("should create endpoints for unready pods", func() { serviceName := "tolerate-unready" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) @@ -1299,19 +1299,19 @@ var _ = SIGDescribe("Services", func() { }, nil) rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds - By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector)) + ginkgo.By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector)) _, err := t.CreateRC(rcSpec) framework.ExpectNoError(err) - By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector)) + ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector)) _, err = t.CreateService(service) framework.ExpectNoError(err) - By("Verifying pods for RC " + t.Name) + ginkgo.By("Verifying pods for RC " + t.Name) framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1)) svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Waiting for endpoints of Service with DNS name " + svcName) + ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName) execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) @@ -1328,16 +1328,16 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } - By("Scaling down replication controller to zero") + ginkgo.By("Scaling down replication controller to zero") framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) - By("Update service to not tolerate unready services") + ginkgo.By("Update service to not tolerate unready services") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "false" }) framework.ExpectNoError(err) - By("Check if pod is unreachable") + ginkgo.By("Check if pod is unreachable") cmd = fmt.Sprintf("wget -qO- -T 2 http://%s:%d/; test \"$?\" -eq \"1\"", svcName, port) if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error @@ -1351,13 +1351,13 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } - By("Update service to tolerate unready services again") + ginkgo.By("Update service to tolerate unready services again") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "true" }) framework.ExpectNoError(err) - By("Check if terminating pod is available through service") + ginkgo.By("Check if terminating pod is available through service") cmd = fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error @@ -1371,7 +1371,7 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } - By("Remove pods immediately") + ginkgo.By("Remove pods immediately") label := labels.SelectorFromSet(labels.Set(t.Labels)) options := metav1.ListOptions{LabelSelector: label.String()} podClient := t.Client.CoreV1().Pods(f.Namespace.Name) @@ -1389,7 +1389,7 @@ var _ = SIGDescribe("Services", func() { } }) - It("should only allow access from service loadbalancer source ranges [Slow]", func() { + ginkgo.It("should only allow access from service loadbalancer source ranges [Slow]", func() { // this feature currently supported only on GCE/GKE/AWS framework.SkipUnlessProviderIs("gce", "gke", "aws") @@ -1406,18 +1406,18 @@ var _ = SIGDescribe("Services", func() { serviceName := "lb-sourcerange" jig := framework.NewServiceTestJig(cs, serviceName) - By("Prepare allow source ips") + ginkgo.By("Prepare allow source ips") // prepare the exec pods // acceptPod are allowed to access the loadbalancer acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil) dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) dropPod, err := cs.CoreV1().Pods(namespace).Get(dropPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch pod: %s in namespace: %s", dropPodName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", dropPodName, namespace) - By("creating a pod to be part of the service " + serviceName) + ginkgo.By("creating a pod to be part of the service " + serviceName) // This container is an nginx container listening on port 80 // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response jig.RunOrFail(namespace, nil) @@ -1433,7 +1433,7 @@ var _ = SIGDescribe("Services", func() { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.LoadBalancerSourceRanges = nil }) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, loadBalancerCreateTimeout) @@ -1442,14 +1442,14 @@ var _ = SIGDescribe("Services", func() { // timeout when we haven't just created the load balancer normalReachabilityTimeout := 2 * time.Minute - By("check reachability from different sources") + ginkgo.By("check reachability from different sources") svcIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) // Wait longer as this is our first request after creation. We can't check using a separate method, // because the LB should only be reachable from the "accept" pod framework.CheckReachabilityFromPod(true, loadBalancerLagTimeout, namespace, acceptPodName, svcIP) framework.CheckReachabilityFromPod(false, normalReachabilityTimeout, namespace, dropPodName, svcIP) - By("Update service LoadBalancerSourceRange and check reachability") + ginkgo.By("Update service LoadBalancerSourceRange and check reachability") jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { // only allow access from dropPod svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"} @@ -1457,7 +1457,7 @@ var _ = SIGDescribe("Services", func() { framework.CheckReachabilityFromPod(false, normalReachabilityTimeout, namespace, acceptPodName, svcIP) framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP) - By("Delete LoadBalancerSourceRange field and check reachability") + ginkgo.By("Delete LoadBalancerSourceRange field and check reachability") jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.LoadBalancerSourceRanges = nil }) @@ -1466,7 +1466,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() { framework.SkipUnlessProviderIs("azure", "gke", "gce") createTimeout := framework.LoadBalancerCreateTimeoutDefault @@ -1480,7 +1480,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "lb-internal" jig := framework.NewServiceTestJig(cs, serviceName) - By("creating pod to be part of service " + serviceName) + ginkgo.By("creating pod to be part of service " + serviceName) jig.RunOrFail(namespace, nil) enableILB, disableILB := framework.EnableAndDisableInternalLB() @@ -1491,7 +1491,7 @@ var _ = SIGDescribe("Services", func() { return strings.HasPrefix(ingressEndpoint, "10.") } - By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") + ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer enableILB(svc) @@ -1501,11 +1501,11 @@ var _ = SIGDescribe("Services", func() { lbIngress := &svc.Status.LoadBalancer.Ingress[0] svcPort := int(svc.Spec.Ports[0].Port) // should have an internal IP. - Expect(isInternalEndpoint(lbIngress)).To(BeTrue()) + gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeTrue()) // ILBs are not accessible from the test orchestrator, so it's necessary to use // a pod to test the service. - By("hitting the internal load balancer from pod") + ginkgo.By("hitting the internal load balancer from pod") e2elog.Logf("creating pod with host network") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") @@ -1527,10 +1527,10 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("Successful curl; stdout: %v", stdout) return true, nil }); pollErr != nil { - framework.Failf("Failed to hit ILB IP, err: %v", pollErr) + framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) } - By("switching to external type LoadBalancer") + ginkgo.By("switching to external type LoadBalancer") svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { disableILB(svc) }) @@ -1547,9 +1547,9 @@ var _ = SIGDescribe("Services", func() { } // should have an external IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - Expect(isInternalEndpoint(lbIngress)).To(BeFalse()) + gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse()) - By("hitting the external load balancer") + ginkgo.By("hitting the external load balancer") e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) tcpIngressIP = framework.GetIngressPoint(lbIngress) jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) @@ -1557,7 +1557,7 @@ var _ = SIGDescribe("Services", func() { // GCE cannot test a specific IP because the test may not own it. This cloud specific condition // will be removed when GCP supports similar functionality. if framework.ProviderIs("azure") { - By("switching back to interal type LoadBalancer, with static IP specified.") + ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.") internalStaticIP := "10.240.11.11" svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { svc.Spec.LoadBalancerIP = internalStaticIP @@ -1576,17 +1576,17 @@ var _ = SIGDescribe("Services", func() { } // should have the given static internal IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - Expect(framework.GetIngressPoint(lbIngress)).To(Equal(internalStaticIP)) + gomega.Expect(framework.GetIngressPoint(lbIngress)).To(gomega.Equal(internalStaticIP)) } - By("switching to ClusterIP type to destroy loadbalancer") + ginkgo.By("switching to ClusterIP type to destroy loadbalancer") jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, createTimeout) }) // This test creates a load balancer, make sure its health check interval // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated // to be something else, see if the interval will be reconciled. - It("should reconcile LB health check interval [Slow][Serial]", func() { + ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() { const gceHcCheckIntervalSeconds = int64(8) // This test is for clusters on GCE. // (It restarts kube-controller-manager, which we don't support on GKE) @@ -1604,7 +1604,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "lb-hc-int" jig := framework.NewServiceTestJig(cs, serviceName) - By("create load balancer service") + ginkgo.By("create load balancer service") // Create loadbalancer service with source range from node[0] and podAccept svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer @@ -1615,7 +1615,7 @@ var _ = SIGDescribe("Services", func() { jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, framework.LoadBalancerCreateTimeoutDefault) @@ -1625,15 +1625,15 @@ var _ = SIGDescribe("Services", func() { if err != nil { framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) } - Expect(hc.CheckIntervalSec).To(Equal(gceHcCheckIntervalSeconds)) + gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds)) - By("modify the health check interval") + ginkgo.By("modify the health check interval") hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil { framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) } - By("restart kube-controller-manager") + ginkgo.By("restart kube-controller-manager") if err := framework.RestartControllerManager(); err != nil { framework.Failf("framework.RestartControllerManager() = %v; want nil", err) } @@ -1641,12 +1641,12 @@ var _ = SIGDescribe("Services", func() { framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err) } - By("health check should be reconciled") + ginkgo.By("health check should be reconciled") pollInterval := framework.Poll * 10 if pollErr := wait.PollImmediate(pollInterval, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { hc, err := gceCloud.GetHTTPHealthCheck(hcName) if err != nil { - e2elog.Logf("Failed to get HttpHealthCheck(%q): %v", hcName, err) + e2elog.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err) return false, err } e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec) @@ -1656,32 +1656,32 @@ var _ = SIGDescribe("Services", func() { } }) - It("should have session affinity work for service with type clusterIP", func() { + ginkgo.It("should have session affinity work for service with type clusterIP", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForNonLBService(f, cs, svc) }) - It("should be able to switch session affinity for service with type clusterIP", func() { + ginkgo.It("should be able to switch session affinity for service with type clusterIP", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForNonLBServiceWithTransition(f, cs, svc) }) - It("should have session affinity work for NodePort service", func() { + ginkgo.It("should have session affinity work for NodePort service", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForNonLBService(f, cs, svc) }) - It("should be able to switch session affinity for NodePort service", func() { + ginkgo.It("should be able to switch session affinity for NodePort service", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForNonLBServiceWithTransition(f, cs, svc) }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1692,7 +1692,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1703,7 +1703,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1714,7 +1714,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1724,7 +1724,7 @@ var _ = SIGDescribe("Services", func() { execAffinityTestForLBServiceWithTransition(f, cs, svc) }) - It("should implement service.kubernetes.io/service-proxy-name", func() { + ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() { // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) // this test does not work if the Node does not support SSH Key @@ -1739,53 +1739,53 @@ var _ = SIGDescribe("Services", func() { // test again late to make sure it never becomes available. // svcToggled: Created without the label then the label is toggled verifying reachability at each step. - By("creating service-disabled in namespace " + ns) + ginkgo.By("creating service-disabled in namespace " + ns) svcDisabled := getServeHostnameService("service-disabled") svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels _, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) - By("creating service in namespace " + ns) + ginkgo.By("creating service in namespace " + ns) svcToggled := getServeHostnameService("service") podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name) hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } host := hosts[0] - By("verifying service is up") + ginkgo.By("verifying service is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) - By("verifying service-disabled is not up") + ginkgo.By("verifying service-disabled is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) - By("adding service-proxy-name label") + ginkgo.By("adding service-proxy-name label") jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) { svc.ObjectMeta.Labels = serviceProxyNameLabels }) - By("verifying service is not up") + ginkgo.By("verifying service is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort)) - By("removing service-proxy-name annotation") + ginkgo.By("removing service-proxy-name annotation") jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) { svc.ObjectMeta.Labels = nil }) - By("verifying service is up") + ginkgo.By("verifying service is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) - By("verifying service-disabled is still not up") + ginkgo.By("verifying service-disabled is still not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) }) - It("should be rejected when no endpoints exist", func() { + ginkgo.It("should be rejected when no endpoints exist", func() { namespace := f.Namespace.Name serviceName := "no-pods" jig := framework.NewServiceTestJig(cs, serviceName) @@ -1799,16 +1799,16 @@ var _ = SIGDescribe("Services", func() { TargetPort: intstr.FromInt(80), }} - By("creating a service with no endpoints") + ginkgo.By("creating a service with no endpoints") _, err := jig.CreateServiceWithServicePort(labels, namespace, ports) if err != nil { - framework.Failf("Failed to create service: %v", err) + framework.Failf("ginkgo.Failed to create service: %v", err) } nodeName := nodes.Items[0].Name podName := "execpod-noendpoints" - By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) + ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) @@ -1819,7 +1819,7 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("waiting up to %v wget %v", framework.KubeProxyEndpointLagTimeout, serviceAddress) cmd := fmt.Sprintf(`wget -T 3 -qO- %v`, serviceAddress) - By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) + ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) expectedErr := "connection refused" if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyEndpointLagTimeout, func() (bool, error) { _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) @@ -1828,15 +1828,13 @@ var _ = SIGDescribe("Services", func() { if strings.Contains(strings.ToLower(err.Error()), expectedErr) { e2elog.Logf("error contained '%s', as expected: %s", expectedErr, err.Error()) return true, nil - } else { - e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error()) - return false, nil } - } else { - return true, errors.New("expected wget call to fail") + e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error()) + return false, nil } + return true, errors.New("expected wget call to fail") }); pollErr != nil { - Expect(pollErr).NotTo(HaveOccurred()) + gomega.Expect(pollErr).NotTo(gomega.HaveOccurred()) } }) @@ -1850,7 +1848,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { var cs clientset.Interface serviceLBNames := []string{} - BeforeEach(func() { + ginkgo.BeforeEach(func() { // requires cloud load-balancer support - this feature currently supported only on GCE/GKE framework.SkipUnlessProviderIs("gce", "gke") @@ -1860,8 +1858,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { @@ -1872,7 +1870,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = []string{} }) - It("should work for type=LoadBalancer", func() { + ginkgo.It("should work for type=LoadBalancer", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -1889,33 +1887,33 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // Make sure we didn't leak the health check node port. threshold := 2 for _, ips := range jig.GetEndpointNodes(svc) { - Expect(jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)).NotTo(HaveOccurred()) + gomega.Expect(jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)).NotTo(gomega.HaveOccurred()) } - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() svcTCPPort := int(svc.Spec.Ports[0].Port) ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) - By("reading clientIP using the TCP service's service port via its external VIP") + ginkgo.By("reading clientIP using the TCP service's service port via its external VIP") content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP) - By("checking if Source IP is preserved") + ginkgo.By("checking if Source IP is preserved") if strings.HasPrefix(clientIP, "10.") { framework.Failf("Source IP was NOT preserved") } }) - It("should work for type=NodePort", func() { + ginkgo.It("should work for type=NodePort", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true) defer func() { - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() tcpNodePort := int(svc.Spec.Ports[0].NodePort) @@ -1924,7 +1922,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for nodeName, nodeIPs := range endpointsNodeMap { nodeIP := nodeIPs[0] - By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) + ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) content := jig.GetHTTPContent(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout, path) clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) @@ -1934,7 +1932,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - It("should only target nodes with endpoints", func() { + ginkgo.It("should only target nodes with endpoints", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -1953,7 +1951,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) @@ -1971,7 +1969,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for i := 0; i < len(nodes.Items); i++ { endpointNodeName := nodes.Items[i].Name - By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) + ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) jig.RunOrFail(namespace, func(rc *v1.ReplicationController) { rc.Name = serviceName if endpointNodeName != "" { @@ -1979,7 +1977,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) + ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) jig.WaitForEndpointOnNode(namespace, serviceName, endpointNodeName) // HealthCheck should pass only on the node where num(endpoints) > 0 @@ -1992,13 +1990,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { port := strconv.Itoa(healthCheckNodePort) ipPort := net.JoinHostPort(publicIP, port) e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) - Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(HaveOccurred()) + gomega.Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(gomega.HaveOccurred()) } framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) } }) - It("should work from pods", func() { + ginkgo.It("should work from pods", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -2008,7 +2006,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) @@ -2018,13 +2016,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { nodeName := nodes.Items[0].Name podName := "execpod-sourceip" - By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) + ginkgo.By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) defer func() { err := cs.CoreV1().Pods(namespace).Delete(execPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", execPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", execPodName) }() execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -2033,7 +2031,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path) var srcIP string - By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName)) + ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName)) if pollErr := wait.PollImmediate(framework.Poll, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { @@ -2047,7 +2045,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - It("should handle updates to ExternalTrafficPolicy field", func() { + ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -2061,13 +2059,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() // save the health check node port because it disappears when ESIPP is turned off. healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) - By("turning ESIPP off") + ginkgo.By("turning ESIPP off") svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster }) @@ -2089,14 +2087,14 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) path := "/clientip" - By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) + ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) for nodeName, nodeIPs := range noEndpointNodeMap { - By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) + ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) jig.GetHTTPContent(nodeIPs[0], svcNodePort, framework.KubeProxyLagTimeout, path) } for nodeName, nodeIPs := range endpointNodeMap { - By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) + ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) var body bytes.Buffer pollfn := func() (bool, error) { result := framework.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) @@ -2114,7 +2112,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } // Poll till kube-proxy re-adds the MASQUERADE rule on the node. - By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) + ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) var clientIP string pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") @@ -2134,7 +2132,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // If the health check nodePort has NOT been freed, the new service // creation will fail. - By("setting ExternalTraffic field back to OnlyLocal") + ginkgo.By("setting ExternalTraffic field back to OnlyLocal") svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal // Request the same healthCheckNodePort as before, to test the user-requested allocation path @@ -2143,7 +2141,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { pollErr = wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, path) clientIP = content.String() - By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) + ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) if !strings.HasPrefix(clientIP, "10.") { return true, nil } @@ -2163,7 +2161,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam defer func() { e2elog.Logf("Cleaning up the exec pod") err := c.CoreV1().Pods(ns).Delete(execPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", execPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", execPodName) }() execPod, err := f.ClientSet.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -2193,7 +2191,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam // Desired stdout in this format: client_address=x.x.x.x outputs := strings.Split(strings.TrimSpace(stdout), "=") if len(outputs) != 2 { - // Fail the test if output format is unexpected. + // ginkgo.Fail the test if output format is unexpected. framework.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout) } return execPod.Status.PodIP, outputs[1] @@ -2215,49 +2213,49 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { ns := f.Namespace.Name numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name - By("creating service in namespace " + ns) + ginkgo.By("creating service in namespace " + ns) serviceType := svc.Spec.Type svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) defer func() { framework.StopServeHostnameService(cs, ns, serviceName) }() jig := framework.NewServiceTestJig(cs, serviceName) svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch service: %s in namespace: %s", serviceName, ns) - var svcIp string + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch service: %s in namespace: %s", serviceName, ns) + var svcIP string if serviceType == v1.ServiceTypeNodePort { nodes := framework.GetReadySchedulableNodesOrDie(cs) addrs := framework.CollectAddresses(nodes, v1.NodeInternalIP) - Expect(len(addrs)).To(BeNumerically(">", 0), "Failed to get Node internal IP") - svcIp = addrs[0] + gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP") + svcIP = addrs[0] servicePort = int(svc.Spec.Ports[0].NodePort) } else { - svcIp = svc.Spec.ClusterIP + svcIP = svc.Spec.ClusterIP } execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) defer func() { e2elog.Logf("Cleaning up the exec pod") err := cs.CoreV1().Pods(ns).Delete(execPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", execPodName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", execPodName, ns) }() execPod, err := cs.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch pod: %s in namespace: %s", execPodName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", execPodName, ns) if !isTransitionTest { - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, false)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, false)).To(gomega.BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } } @@ -2275,12 +2273,12 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name - By("creating service in namespace " + ns) + ginkgo.By("creating service in namespace " + ns) svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) jig := framework.NewServiceTestJig(cs, serviceName) - By("waiting for loadbalancer for service " + ns + "/" + serviceName) + ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName) svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) defer func() { @@ -2295,16 +2293,16 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, port := int(svc.Spec.Ports[0].Port) if !isTransitionTest { - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(gomega.BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue()) } } diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index e374fa60503..1946e68c344 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -35,7 +35,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) type durations []time.Duration @@ -161,7 +161,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab blocker := make(chan struct{}, inParallel) for i := 0; i < total; i++ { go func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() blocker <- struct{}{} defer func() { <-blocker }() if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil { diff --git a/test/e2e/network/util_iperf.go b/test/e2e/network/util_iperf.go index 05b02dcfe0b..8f47199de0d 100644 --- a/test/e2e/network/util_iperf.go +++ b/test/e2e/network/util_iperf.go @@ -28,6 +28,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) +// IPerfResults is a struct that stores some IPerfResult type IPerfResults struct { BandwidthMap map[string]int64 } @@ -62,8 +63,8 @@ func (i *IPerfResults) ToTSV() string { var buffer bytes.Buffer for node, bandwidth := range i.BandwidthMap { - asJson, _ := json.Marshal(node) - buffer.WriteString("\t " + string(asJson) + "\t " + fmt.Sprintf("%E", float64(bandwidth))) + asJSON, _ := json.Marshal(node) + buffer.WriteString("\t " + string(asJSON) + "\t " + fmt.Sprintf("%E", float64(bandwidth))) } return buffer.String() } @@ -88,6 +89,7 @@ func NewIPerf(csvLine string) *IPerfResult { return &i } +// StrSlice represents a string slice type StrSlice []string func (s StrSlice) get(i int) string { From 01d38fec1688aeffc1c0bb4aed8ada9441695d92 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Sat, 11 May 2019 23:30:17 +0800 Subject: [PATCH 116/194] fix golint failures of test/e2e/storage --- hack/.golint_failures | 1 - test/e2e/storage/csi_mock_volume.go | 11 +++--- test/e2e/storage/detach_mounted.go | 1 + .../nfs_persistent_volume-disruptive.go | 6 +-- test/e2e/storage/pd.go | 3 +- test/e2e/storage/persistent_volumes-local.go | 37 ++++++++++--------- test/e2e/storage/volume_provisioning.go | 7 ++-- test/e2e/storage/volumes.go | 1 + 8 files changed, 31 insertions(+), 36 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 3a5ade55fbc..3635b0bbd31 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -610,7 +610,6 @@ test/e2e/network test/e2e/node test/e2e/scalability test/e2e/scheduling -test/e2e/storage test/e2e/storage/drivers test/e2e/storage/testsuites test/e2e/storage/utils diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 0ef00d2287d..0c0fea51a3e 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -756,12 +756,11 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len()) } return nil - } else { - if foundAttributes.Len() != 0 { - return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List()) - } - return nil } + if foundAttributes.Len() != 0 { + return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List()) + } + return nil } func waitForCSIDriver(cs clientset.Interface, driverName string) error { @@ -774,7 +773,7 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error { return err } } - return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName) + return fmt.Errorf("gave up after waiting %v for CSIDriver %q", timeout, driverName) } func destroyCSIDriver(cs clientset.Interface, driverName string) { diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index 8a64d3e93a2..25d72fc62fe 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -35,6 +35,7 @@ import ( ) var ( + // BusyBoxImage is the image URI of BusyBox. BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) durationForStuckMount = 110 * time.Second ) diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 9fa50814c1d..1973436540f 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -39,10 +39,6 @@ type disruptiveTest struct { runTest testBody } -const ( - MinNodes = 2 -) - var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { f := framework.NewDefaultFramework("disruptive-pv") @@ -60,7 +56,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { ginkgo.BeforeEach(func() { // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. - framework.SkipUnlessNodeCountIsAtLeast(MinNodes) + framework.SkipUnlessNodeCountIsAtLeast(minNodes) framework.SkipIfProviderIs("local") c = f.ClientSet diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index c4f4614983e..e0c31653fa4 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -421,9 +421,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() { err = cs.CoreV1().Pods(ns).Evict(evictTarget) if err != nil { return false, nil - } else { - return true, nil } + return true, nil }) framework.ExpectNoError(err, fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout)) } diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index ec59ea81d51..4094beaced9 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -59,27 +59,30 @@ type localTestConfig struct { type localVolumeType string const ( - // default local volume type, aka a directory + // DirectoryLocalVolumeType is the default local volume type, aka a directory DirectoryLocalVolumeType localVolumeType = "dir" - // like DirectoryLocalVolumeType but it's a symbolic link to directory + // DirectoryLinkLocalVolumeType is like DirectoryLocalVolumeType, + // but it's a symbolic link to directory DirectoryLinkLocalVolumeType localVolumeType = "dir-link" - // like DirectoryLocalVolumeType but bind mounted + // DirectoryBindMountedLocalVolumeType is like DirectoryLocalVolumeType + // but bind mounted DirectoryBindMountedLocalVolumeType localVolumeType = "dir-bindmounted" - // like DirectoryLocalVolumeType but it's a symbolic link to self bind mounted directory + // DirectoryLinkBindMountedLocalVolumeType is like DirectoryLocalVolumeType, + // but it's a symbolic link to self bind mounted directory // Note that bind mounting at symbolic link actually mounts at directory it // links to. DirectoryLinkBindMountedLocalVolumeType localVolumeType = "dir-link-bindmounted" - // creates a tmpfs and mounts it + // TmpfsLocalVolumeType creates a tmpfs and mounts it TmpfsLocalVolumeType localVolumeType = "tmpfs" - // tests based on local ssd at /mnt/disks/by-uuid/ + // GCELocalSSDVolumeType tests based on local ssd at /mnt/disks/by-uuid/ GCELocalSSDVolumeType localVolumeType = "gce-localssd-scsi-fs" - // Creates a local file, formats it, and maps it as a block device. + // BlockLocalVolumeType creates a local file, formats it, and maps it as a block device. BlockLocalVolumeType localVolumeType = "block" - // Creates a local file serving as the backing for block device., formats it, - // and mounts it to use as FS mode local volume. + // BlockFsWithFormatLocalVolumeType creates a local file serving as the backing for block device, + // formats it, and mounts it to use as FS mode local volume. BlockFsWithFormatLocalVolumeType localVolumeType = "blockfswithformat" - // Creates a local file serving as the backing for block device. do not format it manually, - // and mounts it to use as FS mode local volume. + // BlockFsWithoutFormatLocalVolumeType creates a local file serving as the backing for block device, + // does not format it manually, and mounts it to use as FS mode local volume. BlockFsWithoutFormatLocalVolumeType localVolumeType = "blockfswithoutformat" ) @@ -1003,21 +1006,19 @@ func createWriteCmd(testDir string, testFile string, writeTestFileContent string // Cleanup the file containing testFileContent. deleteTestFileCmd := fmt.Sprintf("rm %s", testFilePath) return fmt.Sprintf("%s && %s && %s && %s", writeTestFileCmd, sudoCmd, writeBlockCmd, deleteTestFileCmd) - } else { - testFilePath := filepath.Join(testDir, testFile) - return fmt.Sprintf("mkdir -p %s; echo %s > %s", testDir, writeTestFileContent, testFilePath) } + testFilePath := filepath.Join(testDir, testFile) + return fmt.Sprintf("mkdir -p %s; echo %s > %s", testDir, writeTestFileContent, testFilePath) } func createReadCmd(testFileDir string, testFile string, volumeType localVolumeType) string { if volumeType == BlockLocalVolumeType { // Create the command to read the beginning of the block device and print it in ascii. return fmt.Sprintf("hexdump -n 100 -e '100 \"%%_p\"' %s | head -1", testFileDir) - } else { - // Create the command to read (aka cat) a file. - testFilePath := filepath.Join(testFileDir, testFile) - return fmt.Sprintf("cat %s", testFilePath) } + // Create the command to read (aka cat) a file. + testFilePath := filepath.Join(testFileDir, testFile) + return fmt.Sprintf("cat %s", testFilePath) } // Read testFile and evaluate whether it contains the testFileContent diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 7838662f241..4b179617cfd 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -847,7 +847,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipIfProviderIs("gke") ginkgo.By("creating a Gluster DP server Pod") pod := startGlusterDpServerPod(c, ns) - serverUrl := "http://" + pod.Status.PodIP + ":8081" + serverURL := "http://" + pod.Status.PodIP + ":8081" ginkgo.By("creating a StorageClass") test := testsuites.StorageClassTest{ Client: c, @@ -855,7 +855,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Provisioner: "kubernetes.io/glusterfs", ClaimSize: "2Gi", ExpectedSize: "2Gi", - Parameters: map[string]string{"resturl": serverUrl}, + Parameters: map[string]string{"resturl": serverURL}, } suffix := fmt.Sprintf("glusterdptest") test.Class = newStorageClass(test, ns, suffix) @@ -1206,9 +1206,8 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]* } if len(remainingPVs) > 0 { return false, nil // Poll until no PVs remain - } else { - return true, nil // No PVs remain } + return true, nil // No PVs remain }) return remainingPVs, err } diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 49224c31e3f..d2edf0158f8 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -15,6 +15,7 @@ limitations under the License. */ // This test is volumes test for configmap. + package storage import ( From 04f59751ecc4206c8b078d3a29e303617d90a978 Mon Sep 17 00:00:00 2001 From: Elisiano Petrini Date: Sat, 11 May 2019 15:54:09 -0400 Subject: [PATCH 117/194] Fixed spelling in a couple of descriptions --- pkg/apis/core/types.go | 8 ++++---- staging/src/k8s.io/api/core/v1/types.go | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 1b1f7b6b942..5b88d570706 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -1443,7 +1443,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool } @@ -1466,7 +1466,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool } @@ -1793,7 +1793,7 @@ type ConfigMapKeySelector struct { LocalObjectReference // The key to select. Key string - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional Optional *bool } @@ -1804,7 +1804,7 @@ type SecretKeySelector struct { LocalObjectReference // The key of the secret to select from. Must be a valid secret key. Key string - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional Optional *bool } diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index a1ffef0cb2e..d40e1033a96 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -1094,7 +1094,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1520,7 +1520,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1547,7 +1547,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1889,7 +1889,7 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1900,7 +1900,7 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } From afe6d48303d0504c274b8349e54a0fb7f8b49e70 Mon Sep 17 00:00:00 2001 From: Elisiano Petrini Date: Sat, 11 May 2019 18:06:43 -0400 Subject: [PATCH 118/194] Regenerated files --- api/openapi-spec/swagger.json | 10 +++++----- staging/src/k8s.io/api/core/v1/generated.proto | 10 +++++----- .../k8s.io/api/core/v1/types_swagger_doc_generated.go | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 08d6edc1c84..2c16c004955 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -6447,7 +6447,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or it's key must be defined", + "description": "Specify whether the ConfigMap or its key must be defined", "type": "boolean" } }, @@ -6537,7 +6537,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or it's keys must be defined", + "description": "Specify whether the ConfigMap or its keys must be defined", "type": "boolean" } }, @@ -6563,7 +6563,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or it's keys must be defined", + "description": "Specify whether the ConfigMap or its keys must be defined", "type": "boolean" } }, @@ -10319,7 +10319,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the Secret or it's key must be defined", + "description": "Specify whether the Secret or its key must be defined", "type": "boolean" } }, @@ -10414,7 +10414,7 @@ "type": "array" }, "optional": { - "description": "Specify whether the Secret or it's keys must be defined", + "description": "Specify whether the Secret or its keys must be defined", "type": "boolean" }, "secretName": { diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index ff865d5d574..9d23f83a561 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -496,7 +496,7 @@ message ConfigMapKeySelector { // The key to select. optional string key = 2; - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional optional bool optional = 3; } @@ -556,7 +556,7 @@ message ConfigMapProjection { // +optional repeated KeyToPath items = 2; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -588,7 +588,7 @@ message ConfigMapVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -3986,7 +3986,7 @@ message SecretKeySelector { // The key of the secret to select from. Must be a valid secret key. optional string key = 2; - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional optional bool optional = 3; } @@ -4068,7 +4068,7 @@ message SecretVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional optional bool optional = 4; } diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 4a533d18f29..b300453dfa2 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -272,7 +272,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string { var map_ConfigMapKeySelector = map[string]string{ "": "Selects a key from a ConfigMap.", "key": "The key to select.", - "optional": "Specify whether the ConfigMap or it's key must be defined", + "optional": "Specify whether the ConfigMap or its key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -305,7 +305,7 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -316,7 +316,7 @@ var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -1958,7 +1958,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string { var map_SecretKeySelector = map[string]string{ "": "SecretKeySelector selects a key of a Secret.", "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or it's key must be defined", + "optional": "Specify whether the Secret or its key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -2000,7 +2000,7 @@ var map_SecretVolumeSource = map[string]string{ "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or it's keys must be defined", + "optional": "Specify whether the Secret or its keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { From badac1b31cf59787161a709db2c48e1311e266fc Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Sat, 11 May 2019 16:19:26 -0700 Subject: [PATCH 119/194] Swap mapfile out in update-codegen --- hack/update-codegen.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 7c01c7c1913..5f34e61731f 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -62,16 +62,18 @@ GV_DIRS_CSV=$(IFS=',';echo "${GV_DIRS[*]// /,}";IFS=$) # update- and verify- scripts. ${clientgen} --output-base "${KUBE_ROOT}/vendor" --output-package="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" --go-header-file "${KUBE_ROOT}/hack/boilerplate/boilerplate.generatego.txt" "$@" -mapfile -t listergen_external_apis < <( +listergen_external_apis=() +kube::util::read-array listergen_external_apis < <( cd "${KUBE_ROOT}/staging/src" find k8s.io/api -name types.go -print0 | xargs -0 -n1 dirname | sort ) listergen_external_apis_csv=$(IFS=,; echo "${listergen_external_apis[*]}") ${listergen} --output-base "${KUBE_ROOT}/vendor" --output-package "k8s.io/client-go/listers" --input-dirs "${listergen_external_apis_csv}" --go-header-file "${KUBE_ROOT}/hack/boilerplate/boilerplate.generatego.txt" "$@" -mapfile -t informergen_external_apis < <( +informergen_external_apis=() +# because client-gen doesn't do policy/v1alpha1, we have to skip it too +kube::util::read-array informergen_external_apis < <( cd "${KUBE_ROOT}/staging/src" - # because client-gen doesn't do policy/v1alpha1, we have to skip it too find k8s.io/api -name types.go -print0 | xargs -0 -n1 dirname | sort | grep -v pkg.apis.policy.v1alpha1 ) informergen_external_apis_csv=$(IFS=,; echo "${informergen_external_apis[*]}") From 087bc1369e043a1b58aca41d97c818beddb06536 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 12:32:08 +0800 Subject: [PATCH 120/194] remove dot imports in e2e/node --- test/e2e/node/apparmor.go | 14 +++--- test/e2e/node/crictl.go | 10 ++-- test/e2e/node/events.go | 20 ++++---- test/e2e/node/kubelet.go | 68 +++++++++++++------------- test/e2e/node/kubelet_perf.go | 26 +++++----- test/e2e/node/mount_propagation.go | 14 +++--- test/e2e/node/node_problem_detector.go | 58 +++++++++++----------- test/e2e/node/pod_gc.go | 6 +-- test/e2e/node/pods.go | 68 +++++++++++++------------- test/e2e/node/pre_stop.go | 40 +++++++-------- test/e2e/node/security_context.go | 40 +++++++-------- test/e2e/node/ssh.go | 12 ++--- test/e2e/node/ttlafterfinished.go | 24 ++++----- 13 files changed, 200 insertions(+), 200 deletions(-) diff --git a/test/e2e/node/apparmor.go b/test/e2e/node/apparmor.go index efed9490251..f93afba15c1 100644 --- a/test/e2e/node/apparmor.go +++ b/test/e2e/node/apparmor.go @@ -21,29 +21,29 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("AppArmor", func() { f := framework.NewDefaultFramework("apparmor") - Context("load AppArmor profiles", func() { - BeforeEach(func() { + ginkgo.Context("load AppArmor profiles", func() { + ginkgo.BeforeEach(func() { common.SkipIfAppArmorNotSupported() common.LoadAppArmorProfiles(f) }) - AfterEach(func() { - if !CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if !ginkgo.CurrentGinkgoTestDescription().Failed { return } framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf) }) - It("should enforce an AppArmor profile", func() { + ginkgo.It("should enforce an AppArmor profile", func() { common.CreateAppArmorTestPod(f, false, true) }) - It("can disable an AppArmor profile, using unconfined", func() { + ginkgo.It("can disable an AppArmor profile, using unconfined", func() { common.CreateAppArmorTestPod(f, true, true) }) }) diff --git a/test/e2e/node/crictl.go b/test/e2e/node/crictl.go index d274e1d69b0..026dd546726 100644 --- a/test/e2e/node/crictl.go +++ b/test/e2e/node/crictl.go @@ -24,22 +24,22 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("crictl", func() { f := framework.NewDefaultFramework("crictl") - BeforeEach(func() { + ginkgo.BeforeEach(func() { // `crictl` is not available on all cloud providers. framework.SkipUnlessProviderIs("gce", "gke") // The test requires $HOME/.ssh/id_rsa key to be present. framework.SkipUnlessSSHKeyPresent() }) - It("should be able to run crictl on the node", func() { + ginkgo.It("should be able to run crictl on the node", func() { // Get all nodes' external IPs. - By("Getting all nodes' SSH-able IP addresses") + ginkgo.By("Getting all nodes' SSH-able IP addresses") hosts, err := e2essh.NodeSSHHosts(f.ClientSet) if err != nil { framework.Failf("Error getting node hostnames: %v", err) @@ -55,7 +55,7 @@ var _ = SIGDescribe("crictl", func() { for _, testCase := range testCases { // Choose an arbitrary node to test. host := hosts[0] - By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd)) + ginkgo.By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd)) result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index 26d8f80808d..ce611148595 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -29,8 +29,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("Events", func() { @@ -45,7 +45,7 @@ var _ = SIGDescribe("Events", func() { podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - By("creating the pod") + ginkgo.By("creating the pod") name := "send-events-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -67,9 +67,9 @@ var _ = SIGDescribe("Events", func() { }, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") defer func() { - By("deleting the pod") + ginkgo.By("deleting the pod") podClient.Delete(pod.Name, nil) }() if _, err := podClient.Create(pod); err != nil { @@ -78,13 +78,13 @@ var _ = SIGDescribe("Events", func() { framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("retrieving the pod") + ginkgo.By("retrieving the pod") podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get pod: %v", err) @@ -92,7 +92,7 @@ var _ = SIGDescribe("Events", func() { e2elog.Logf("%+v\n", podWithUid) var events *v1.EventList // Check for scheduler event about the pod. - By("checking for scheduler event about the pod") + ginkgo.By("checking for scheduler event about the pod") framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { selector := fields.Set{ "involvedObject.kind": "Pod", @@ -112,7 +112,7 @@ var _ = SIGDescribe("Events", func() { return false, nil })) // Check for kubelet event about the pod. - By("checking for kubelet event about the pod") + ginkgo.By("checking for kubelet event about the pod") framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { selector := fields.Set{ "involvedObject.uid": string(podWithUid.UID), diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index fba5e32f9a6..b7158a16aea 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -35,8 +35,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -119,7 +119,7 @@ func stopNfsServer(serverPod *v1.Pod) { // will execute the passed in shell cmd. Waits for the pod to start. // Note: the nfs plugin is defined inline, no PV or PVC. func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod { - By("create pod using nfs volume") + ginkgo.By("create pod using nfs volume") isPrivileged := true cmdLine := []string{"-c", cmd} @@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, }, } rtnPod, err := c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = f.WaitForPodReady(rtnPod.Name) // running & ready - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return rtnPod } @@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) { mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs") // use ip rather than hostname in GCE nodeIP, err := framework.GetHostExternalAddress(c, pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) condMsg := "deleted" if !expectClean { @@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) { e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg) err = wait.Poll(poll, timeout, func() (bool, error) { result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2essh.LogResult(result) ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0) if expectClean && ok { // keep trying @@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) { } return true, nil // done, host is as expected }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg)) } if expectClean { @@ -244,7 +244,7 @@ var _ = SIGDescribe("kubelet", func() { ) f := framework.NewDefaultFramework("kubelet") - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) @@ -265,14 +265,14 @@ var _ = SIGDescribe("kubelet", func() { {podsPerNode: 10, timeout: 1 * time.Minute}, } - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Use node labels to restrict the pods to be assigned only to the // nodes we observe initially. nodeLabels = make(map[string]string) nodeLabels["kubelet_cleanup"] = "true" nodes := framework.GetReadySchedulableNodesOrDie(c) numNodes = len(nodes.Items) - Expect(numNodes).NotTo(BeZero()) + gomega.Expect(numNodes).NotTo(gomega.BeZero()) nodeNames = sets.NewString() // If there are a lot of nodes, we don't want to use all of them // (if there are 1000 nodes in the cluster, starting 10 pods/node @@ -297,7 +297,7 @@ var _ = SIGDescribe("kubelet", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { if resourceMonitor != nil { resourceMonitor.Stop() } @@ -312,30 +312,30 @@ var _ = SIGDescribe("kubelet", func() { for _, itArg := range deleteTests { name := fmt.Sprintf( "kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout) - It(name, func() { + ginkgo.It(name, func() { totalPods := itArg.podsPerNode * numNodes - By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) + ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) - Expect(framework.RunRC(testutils.RCConfig{ + gomega.Expect(framework.RunRC(testutils.RCConfig{ Client: f.ClientSet, Name: rcName, Namespace: f.Namespace.Name, Image: imageutils.GetPauseImageName(), Replicas: totalPods, NodeSelector: nodeLabels, - })).NotTo(HaveOccurred()) + })).NotTo(gomega.HaveOccurred()) // Perform a sanity check so that we know all desired pods are // running on the nodes according to kubelet. The timeout is set to // only 30 seconds here because framework.RunRC already waited for all pods to // transition to the running status. - Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, - time.Second*30)).NotTo(HaveOccurred()) + gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, + time.Second*30)).NotTo(gomega.HaveOccurred()) if resourceMonitor != nil { resourceMonitor.LogLatest() } - By("Deleting the RC") + ginkgo.By("Deleting the RC") framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) // Check that the pods really are gone by querying /runningpods on the // node. The /runningpods handler checks the container runtime (or its @@ -345,8 +345,8 @@ var _ = SIGDescribe("kubelet", func() { // - a bug in graceful termination (if it is enabled) // - docker slow to delete pods (or resource problems causing slowness) start := time.Now() - Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, - itArg.timeout)).NotTo(HaveOccurred()) + gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, + itArg.timeout)).NotTo(gomega.HaveOccurred()) e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), time.Since(start)) if resourceMonitor != nil { @@ -369,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() { // If the nfs-server pod is deleted the client pod's mount can not be unmounted. // If the nfs-server pod is deleted and re-created, due to having a different ip // addr, the client pod's mount still cannot be unmounted. - Context("Host cleanup after disrupting NFS volume [NFS]", func() { + ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() { // issue #31272 var ( nfsServerPod *v1.Pod @@ -389,38 +389,38 @@ var _ = SIGDescribe("kubelet", func() { }, } - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) _, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) }) - AfterEach(func() { + ginkgo.AfterEach(func() { err := framework.DeletePodWithWait(f, c, pod) - Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name) err = framework.DeletePodWithWait(f, c, nfsServerPod) - Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name) }) // execute It blocks from above table of tests for _, t := range testTbl { - It(t.itDescr, func() { + ginkgo.It(t.itDescr, func() { pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd) - By("Stop the NFS server") + ginkgo.By("Stop the NFS server") stopNfsServer(nfsServerPod) - By("Delete the pod mounted to the NFS volume -- expect failure") + ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure") err := framework.DeletePodWithWait(f, c, pod) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) // pod object is now stale, but is intentionally not nil - By("Check if pod's host has been cleaned up -- expect not") + ginkgo.By("Check if pod's host has been cleaned up -- expect not") checkPodCleanup(c, pod, false) - By("Restart the nfs server") + ginkgo.By("Restart the nfs server") restartNfsServer(nfsServerPod) - By("Verify that the deleted client pod is now cleaned up") + ginkgo.By("Verify that the deleted client pod is now cleaned up") checkPodCleanup(c, pod, true) }) } diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index 0ba0fc8ea0c..790387406c5 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -30,8 +30,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -66,23 +66,23 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) { numNodes := nodeNames.Len() totalPods := podsPerNode * numNodes - By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) + ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID())) // TODO: Use a more realistic workload - Expect(framework.RunRC(testutils.RCConfig{ + gomega.Expect(framework.RunRC(testutils.RCConfig{ Client: f.ClientSet, Name: rcName, Namespace: f.Namespace.Name, Image: imageutils.GetPauseImageName(), Replicas: totalPods, - })).NotTo(HaveOccurred()) + })).NotTo(gomega.HaveOccurred()) // Log once and flush the stats. rm.LogLatest() rm.Reset() - By("Start monitoring resource usage") + ginkgo.By("Start monitoring resource usage") // Periodically dump the cpu summary until the deadline is met. // Note that without calling framework.ResourceMonitor.Reset(), the stats // would occupy increasingly more memory. This should be fine @@ -100,10 +100,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames logPodsOnNodes(f.ClientSet, nodeNames.List()) } - By("Reporting overall resource usage") + ginkgo.By("Reporting overall resource usage") logPodsOnNodes(f.ClientSet, nodeNames.List()) usageSummary, err := rm.GetLatest() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO(random-liu): Remove the original log when we migrate to new perfdash e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary)) // Log perf result @@ -116,7 +116,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary))) verifyCPULimits(expectedCPU, cpuSummary) - By("Deleting the RC") + ginkgo.By("Deleting the RC") framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) } @@ -197,7 +197,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { var om *framework.RuntimeOperationMonitor var rm *framework.ResourceMonitor - BeforeEach(func() { + ginkgo.BeforeEach(func() { nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeNames = sets.NewString() for _, node := range nodes.Items { @@ -208,7 +208,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { rm.Start() }) - AfterEach(func() { + ginkgo.AfterEach(func() { rm.Stop() result := om.GetLatestRuntimeOperationErrorRate() e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result)) @@ -260,7 +260,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { podsPerNode := itArg.podsPerNode name := fmt.Sprintf( "resource tracking for %d pods per node", podsPerNode) - It(name, func() { + ginkgo.It(name, func() { runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) }) } @@ -271,7 +271,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { podsPerNode := density[i] name := fmt.Sprintf( "resource tracking for %d pods per node", podsPerNode) - It(name, func() { + ginkgo.It(name, func() { runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil) }) } diff --git a/test/e2e/node/mount_propagation.go b/test/e2e/node/mount_propagation.go index 14eba769289..48427871d73 100644 --- a/test/e2e/node/mount_propagation.go +++ b/test/e2e/node/mount_propagation.go @@ -27,8 +27,8 @@ import ( e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod { @@ -80,7 +80,7 @@ func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode var _ = SIGDescribe("Mount propagation", func() { f := framework.NewDefaultFramework("mount-propagation") - It("should propagate mounts to the host", func() { + ginkgo.It("should propagate mounts to the host", func() { // This test runs two pods: master and slave with respective mount // propagation on common /var/lib/kubelet/XXXX directory. Both mount a // tmpfs to a subdirectory there. We check that these mounts are @@ -88,13 +88,13 @@ var _ = SIGDescribe("Mount propagation", func() { // Pick a node where all pods will run. nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling") + gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling") node := &nodes.Items[0] // Fail the test if the namespace is not set. We expect that the // namespace is unique and we might delete user data if it's not. if len(f.Namespace.Name) == 0 { - Expect(f.Namespace.Name).ToNot(Equal("")) + gomega.Expect(f.Namespace.Name).ToNot(gomega.Equal("")) return } @@ -172,10 +172,10 @@ var _ = SIGDescribe("Mount propagation", func() { shouldBeVisible := mounts.Has(mountName) if shouldBeVisible { framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd) - Expect(stdout).To(Equal(mountName), msg) + gomega.Expect(stdout).To(gomega.Equal(mountName), msg) } else { // We *expect* cat to return error here - Expect(err).To(HaveOccurred(), msg) + gomega.Expect(err).To(gomega.HaveOccurred(), msg) } } } diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 8c7094c7432..1c27a231b1e 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -32,8 +32,8 @@ import ( e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" testutils "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) // This test checks if node-problem-detector (NPD) runs fine without error on @@ -45,7 +45,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { ) f := framework.NewDefaultFramework("node-problem-detector") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) framework.SkipUnlessProviderIs("gce", "gke") @@ -53,10 +53,10 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute) }) - It("should run without error", func() { - By("Getting all nodes and their SSH-able IP addresses") + ginkgo.It("should run without error", func() { + ginkgo.By("Getting all nodes and their SSH-able IP addresses") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodes.Items)).NotTo(BeZero()) + gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero()) hosts := []string{} for _, node := range nodes.Items { for _, addr := range node.Status.Addresses { @@ -66,7 +66,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { } } } - Expect(len(hosts)).To(Equal(len(nodes.Items))) + gomega.Expect(len(hosts)).To(gomega.Equal(len(nodes.Items))) isStandaloneMode := make(map[string]bool) cpuUsageStats := make(map[string][]float64) @@ -84,22 +84,22 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) isStandaloneMode[host] = (err == nil && result.Code == 0) - By(fmt.Sprintf("Check node %q has node-problem-detector process", host)) + ginkgo.By(fmt.Sprintf("Check node %q has node-problem-detector process", host)) // Using brackets "[n]" is a trick to prevent grep command itself from // showing up, because string text "[n]ode-problem-detector" does not // match regular expression "[n]ode-problem-detector". psCmd := "ps aux | grep [n]ode-problem-detector" result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) - Expect(result.Code).To(BeZero()) - Expect(result.Stdout).To(ContainSubstring("node-problem-detector")) + gomega.Expect(result.Code).To(gomega.BeZero()) + gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector")) - By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host)) + ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host)) journalctlCmd := "sudo journalctl -u node-problem-detector" result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) - Expect(result.Code).To(BeZero()) - Expect(result.Stdout).NotTo(ContainSubstring("node-problem-detector.service: Failed")) + gomega.Expect(result.Code).To(gomega.BeZero()) + gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed")) if isStandaloneMode[host] { cpuUsage, uptime := getCpuStat(f, host) @@ -107,29 +107,29 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { uptimeStats[host] = append(uptimeStats[host], uptime) } - By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host)) + ginkgo.By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host)) log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds." injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\"" _, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) - Expect(result.Code).To(BeZero()) + gomega.Expect(result.Code).To(gomega.BeZero()) } - By("Check node-problem-detector can post conditions and events to API server") + ginkgo.By("Check node-problem-detector can post conditions and events to API server") for _, node := range nodes.Items { - By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name)) - Eventually(func() error { + ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name)) + gomega.Eventually(func() error { return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "AUFSUmountHung", node.Name) - }, pollTimeout, pollInterval).Should(Succeed()) + }, pollTimeout, pollInterval).Should(gomega.Succeed()) - By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name)) + ginkgo.By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name)) eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()} - Eventually(func() error { + gomega.Eventually(func() error { return verifyEvents(f, eventListOptions, 1, "AUFSUmountHung", node.Name) - }, pollTimeout, pollInterval).Should(Succeed()) + }, pollTimeout, pollInterval).Should(gomega.Succeed()) } - By("Gather node-problem-detector cpu and memory stats") + ginkgo.By("Gather node-problem-detector cpu and memory stats") numIterations := 60 for i := 1; i <= numIterations; i++ { for j, host := range hosts { @@ -217,22 +217,22 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64 memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat" result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) - Expect(result.Code).To(BeZero()) + gomega.Expect(result.Code).To(gomega.BeZero()) lines := strings.Split(result.Stdout, "\n") memoryUsage, err := strconv.ParseFloat(lines[0], 64) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) var totalInactiveFile float64 for _, line := range lines[1:] { tokens := strings.Split(line, " ") if tokens[0] == "total_rss" { rss, err = strconv.ParseFloat(tokens[1], 64) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) } if tokens[0] == "total_inactive_file" { totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) } } @@ -253,7 +253,7 @@ func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) { cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'" result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) - Expect(result.Code).To(BeZero()) + gomega.Expect(result.Code).To(gomega.BeZero()) lines := strings.Split(result.Stdout, "\n") usage, err = strconv.ParseFloat(lines[0], 64) @@ -279,6 +279,6 @@ func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, work hasNpdPod = true break } - Expect(hasNpdPod).To(BeTrue()) + gomega.Expect(hasNpdPod).To(gomega.BeTrue()) return } diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index e834376a9cd..73de4edc863 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,7 +36,7 @@ import ( // Slow by design (7 min) var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() { f := framework.NewDefaultFramework("pod-garbage-collector") - It("should handle the creation of 1000 pods", func() { + ginkgo.It("should handle the creation of 1000 pods", func() { var count int for count < 1000 { pod, err := createTerminatingPod(f) @@ -62,7 +62,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" timeout := 2 * time.Minute gcThreshold := 100 - By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) + ginkgo.By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 1c070911114..f44dcbe8c87 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -34,8 +34,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -44,7 +44,7 @@ var _ = SIGDescribe("Pods Extended", func() { framework.KubeDescribe("Delete Grace Period", func() { var podClient *framework.PodClient - BeforeEach(func() { + ginkgo.BeforeEach(func() { podClient = f.PodClient() }) @@ -54,7 +54,7 @@ var _ = SIGDescribe("Pods Extended", func() { Description: Create a pod, make sure it is running. Create a 'kubectl local proxy', capture the port the proxy is listening. Using the http client send a ‘delete’ with gracePeriodSeconds=30. Pod SHOULD get deleted within 30 seconds. */ framework.ConformanceIt("should be submitted and removed", func() { - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-submit-remove-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -75,51 +75,51 @@ var _ = SIGDescribe("Pods Extended", func() { }, } - By("setting up selector") + ginkgo.By("setting up selector") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) // We need to wait for the pod to be running, otherwise the deletion // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") // start local proxy, so we can send graceful deletion over query string, rather than body parameter cmd := framework.KubectlCmd("proxy", "-p", "0") stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) - Expect(err).NotTo(HaveOccurred(), "failed to start up proxy") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to start up proxy") defer stdout.Close() defer stderr.Close() defer framework.TryKill(cmd) buf := make([]byte, 128) var n int n, err = stdout.Read(buf) - Expect(err).NotTo(HaveOccurred(), "failed to read from kubectl proxy stdout") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from kubectl proxy stdout") output := string(buf[:n]) proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") match := proxyRegexp.FindStringSubmatch(output) - Expect(len(match)).To(Equal(2)) + gomega.Expect(len(match)).To(gomega.Equal(2)) port, err := strconv.Atoi(match[1]) - Expect(err).NotTo(HaveOccurred(), "failed to convert port into string") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to convert port into string") endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name) tr := &http.Transport{ @@ -127,21 +127,21 @@ var _ = SIGDescribe("Pods Extended", func() { } client := &http.Client{Transport: tr} req, err := http.NewRequest("DELETE", endpoint, nil) - Expect(err).NotTo(HaveOccurred(), "failed to create http request") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create http request") - By("deleting the pod gracefully") + ginkgo.By("deleting the pod gracefully") rsp, err := client.Do(req) - Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete") - Expect(rsp.StatusCode).Should(Equal(http.StatusOK), "failed to delete gracefully by client request") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to use http client to send delete") + gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request") var lastPod v1.Pod err = json.NewDecoder(rsp.Body).Decode(&lastPod) - Expect(err).NotTo(HaveOccurred(), "failed to decode graceful termination proxy response") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to decode graceful termination proxy response") defer rsp.Body.Close() - By("verifying the kubelet observed the termination notice") + ginkgo.By("verifying the kubelet observed the termination notice") - Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { + gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) if err != nil { e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) @@ -159,23 +159,23 @@ var _ = SIGDescribe("Pods Extended", func() { } e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") return true, nil - })).NotTo(HaveOccurred(), "kubelet never observed the termination notice") + })).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice") - Expect(lastPod.DeletionTimestamp).ToNot(BeNil()) - Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) + gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil()) + gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero()) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) }) }) framework.KubeDescribe("Pods Set QOS Class", func() { var podClient *framework.PodClient - BeforeEach(func() { + ginkgo.BeforeEach(func() { podClient = f.PodClient() }) /* @@ -184,7 +184,7 @@ var _ = SIGDescribe("Pods Extended", func() { Description: Create a Pod with CPU and Memory request and limits. Pos status MUST have QOSClass set to PodQOSGuaranteed. */ framework.ConformanceIt("should be submitted and removed ", func() { - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-qos-class-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -213,13 +213,13 @@ var _ = SIGDescribe("Pods Extended", func() { }, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) - By("verifying QOS class is set on the pod") + ginkgo.By("verifying QOS class is set on the pod") pod, err := podClient.Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed) }) }) }) diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 8c061471535..20b8cd59b9a 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -32,8 +32,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) // partially cloned from webserver.go @@ -57,17 +57,17 @@ func testPreStop(c clientset.Interface, ns string) { }, }, } - By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) + ginkgo.By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) podDescr, err := c.CoreV1().Pods(ns).Create(podDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { - By("Deleting the server pod") + ginkgo.By("Deleting the server pod") c.CoreV1().Pods(ns).Delete(podDescr.Name, nil) }() - By("Waiting for pods to come up.") + ginkgo.By("Waiting for pods to come up.") err = framework.WaitForPodRunningInNamespace(c, podDescr) framework.ExpectNoError(err, "waiting for server pod to start") @@ -100,7 +100,7 @@ func testPreStop(c clientset.Interface, ns string) { }, } - By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) + ginkgo.By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true @@ -108,7 +108,7 @@ func testPreStop(c clientset.Interface, ns string) { // At the end of the test, clean up by removing the pod. defer func() { if deletePreStop { - By("Deleting the tester pod") + ginkgo.By("Deleting the tester pod") c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil) } }() @@ -117,7 +117,7 @@ func testPreStop(c clientset.Interface, ns string) { framework.ExpectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. - By("Deleting pre-stop pod") + ginkgo.By("Deleting pre-stop pod") if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil { deletePreStop = false } @@ -144,7 +144,7 @@ func testPreStop(c clientset.Interface, ns string) { framework.Failf("Error validating prestop: %v", err) return true, err } - By(fmt.Sprintf("Error validating prestop: %v", err)) + ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err)) } else { e2elog.Logf("Saw: %s", string(body)) state := State{} @@ -165,7 +165,7 @@ func testPreStop(c clientset.Interface, ns string) { var _ = SIGDescribe("PreStop", func() { f := framework.NewDefaultFramework("prestop") var podClient *framework.PodClient - BeforeEach(func() { + ginkgo.BeforeEach(func() { podClient = f.PodClient() }) @@ -178,36 +178,36 @@ var _ = SIGDescribe("PreStop", func() { testPreStop(f.ClientSet, f.Namespace.Name) }) - It("graceful pod terminated should wait until preStop hook completes the process", func() { + ginkgo.It("graceful pod terminated should wait until preStop hook completes the process", func() { gracefulTerminationPeriodSeconds := int64(30) - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-prestop-hook-" + string(uuid.NewUUID()) pod := getPodWithpreStopLifeCycle(name) - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) - By("waiting for pod running") + ginkgo.By("waiting for pod running") framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) var err error pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") - By("deleting the pod gracefully") + ginkgo.By("deleting the pod gracefully") err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod") //wait up to graceful termination period seconds time.Sleep(30 * time.Second) - By("verifying the pod running state after graceful termination") + ginkgo.By("verifying the pod running state after graceful termination") result := &v1.PodList{} err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort) - Expect(err).NotTo(HaveOccurred(), "failed to get the pods of the node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the pods of the node") err = client.Into(result) - Expect(err).NotTo(HaveOccurred(), "failed to parse the pods of the node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to parse the pods of the node") for _, kubeletPod := range result.Items { if pod.Name != kubeletPod.Name { diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index 5ce7fd91d1c..1420ff5b373 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -31,8 +31,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) func scTestPod(hostIPC bool, hostPID bool) *v1.Pod { @@ -63,7 +63,7 @@ func scTestPod(hostIPC bool, hostPID bool) *v1.Pod { var _ = SIGDescribe("Security Context", func() { f := framework.NewDefaultFramework("security-context") - It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() { + ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() { pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} @@ -71,7 +71,7 @@ var _ = SIGDescribe("Security Context", func() { f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) }) - It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() { + ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() { pod := scTestPod(false, false) userID := int64(1001) pod.Spec.SecurityContext.RunAsUser = &userID @@ -83,7 +83,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() { + ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() { pod := scTestPod(false, false) userID := int64(1001) groupID := int64(2002) @@ -97,7 +97,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() { + ginkgo.It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() { pod := scTestPod(false, false) userID := int64(1001) overrideUserID := int64(1002) @@ -112,7 +112,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() { + ginkgo.It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() { pod := scTestPod(false, false) userID := int64(1001) groupID := int64(2001) @@ -131,19 +131,19 @@ var _ = SIGDescribe("Security Context", func() { }) }) - It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() { + ginkgo.It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() { testPodSELinuxLabeling(f, false, false) }) - It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() { + ginkgo.It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() { testPodSELinuxLabeling(f, true, false) }) - It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() { + ginkgo.It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() { testPodSELinuxLabeling(f, false, true) }) - It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]", func() { + ginkgo.It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]", func() { // TODO: port to SecurityContext as soon as seccomp is out of alpha pod := scTestPod(false, false) pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined" @@ -152,7 +152,7 @@ var _ = SIGDescribe("Security Context", func() { f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled }) - It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]", func() { + ginkgo.It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]", func() { // TODO: port to SecurityContext as soon as seccomp is out of alpha pod := scTestPod(false, false) pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined" @@ -160,7 +160,7 @@ var _ = SIGDescribe("Security Context", func() { f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled }) - It("should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]", func() { + ginkgo.It("should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]", func() { // TODO: port to SecurityContext as soon as seccomp is out of alpha pod := scTestPod(false, false) pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = v1.SeccompProfileRuntimeDefault @@ -168,7 +168,7 @@ var _ = SIGDescribe("Security Context", func() { f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered }) - It("should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]", func() { + ginkgo.It("should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]", func() { // TODO: port to SecurityContext as soon as seccomp is out of alpha pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} @@ -212,18 +212,18 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) testContent := "hello" testFilePath := mountPath + "/TEST" err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) - Expect(err).To(BeNil()) - Expect(content).To(ContainSubstring(testContent)) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Expect(content).To(gomega.ContainSubstring(testContent)) foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Confirm that the file can be accessed from a second // pod using host_path with the same MCS label volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName) - By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir)) + ginkgo.By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir)) pod = scTestPod(hostIPC, hostPID) pod.Spec.NodeName = foundPod.Spec.NodeName volumeMounts := []v1.VolumeMount{ @@ -266,5 +266,5 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) framework.ExpectNoError(err, "Error waiting for pod to run %v", pod) content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath) - Expect(content).NotTo(ContainSubstring(testContent)) + gomega.Expect(content).NotTo(gomega.ContainSubstring(testContent)) } diff --git a/test/e2e/node/ssh.go b/test/e2e/node/ssh.go index 8bfce883d14..85668576b48 100644 --- a/test/e2e/node/ssh.go +++ b/test/e2e/node/ssh.go @@ -24,7 +24,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) const maxNodes = 100 @@ -33,7 +33,7 @@ var _ = SIGDescribe("SSH", func() { f := framework.NewDefaultFramework("ssh") - BeforeEach(func() { + ginkgo.BeforeEach(func() { // When adding more providers here, also implement their functionality in e2essh.GetSigner(...). framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) @@ -42,9 +42,9 @@ var _ = SIGDescribe("SSH", func() { framework.SkipUnlessSSHKeyPresent() }) - It("should SSH to all nodes and run commands", func() { + ginkgo.It("should SSH to all nodes and run commands", func() { // Get all nodes' external IPs. - By("Getting all nodes' SSH-able IP addresses") + ginkgo.By("Getting all nodes' SSH-able IP addresses") hosts, err := e2essh.NodeSSHHosts(f.ClientSet) if err != nil { framework.Failf("Error getting node hostnames: %v", err) @@ -76,7 +76,7 @@ var _ = SIGDescribe("SSH", func() { nodes = maxNodes } testhosts := hosts[:nodes] - By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd)) + ginkgo.By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd)) for _, host := range testhosts { result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) @@ -104,7 +104,7 @@ var _ = SIGDescribe("SSH", func() { } // Quickly test that SSH itself errors correctly. - By("SSH'ing to a nonexistent host") + ginkgo.By("SSH'ing to a nonexistent host") if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { framework.Failf("Expected error trying to SSH to nonexistent host.") } diff --git a/test/e2e/node/ttlafterfinished.go b/test/e2e/node/ttlafterfinished.go index e4119d64235..fc30bd33797 100644 --- a/test/e2e/node/ttlafterfinished.go +++ b/test/e2e/node/ttlafterfinished.go @@ -27,8 +27,8 @@ import ( jobutil "k8s.io/kubernetes/test/e2e/framework/job" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const dummyFinalizer = "k8s.io/dummy-finalizer" @@ -36,7 +36,7 @@ const dummyFinalizer = "k8s.io/dummy-finalizer" var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLAfterFinished]", func() { f := framework.NewDefaultFramework("ttlafterfinished") - It("job should be deleted once it finishes after TTL seconds", func() { + ginkgo.It("job should be deleted once it finishes after TTL seconds", func() { testFinishedJob(f) }) }) @@ -50,11 +50,11 @@ func cleanupJob(f *framework.Framework, job *batch.Job) { j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil) } _, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout) err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func testFinishedJob(f *framework.Framework) { @@ -73,26 +73,26 @@ func testFinishedJob(f *framework.Framework) { e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name) job, err := jobutil.CreateJob(c, ns, job) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Wait for the Job to finish") err = jobutil.WaitForJobFinish(c, ns, job.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Wait for TTL after finished controller to delete the Job") err = jobutil.WaitForJobDeleting(c, ns, job.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished") job, err = jobutil.GetJob(c, ns, job.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) finishTime := jobutil.FinishTime(job) finishTimeUTC := finishTime.UTC() - Expect(finishTime.IsZero()).NotTo(BeTrue()) + gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue()) deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC() - Expect(deleteAtUTC).NotTo(BeNil()) + gomega.Expect(deleteAtUTC).NotTo(gomega.BeNil()) expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second) - Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse()) + gomega.Expect(deleteAtUTC.Before(expireAtUTC)).To(gomega.BeFalse()) } From 73c2daeeeadb5d91b8480205e6317537e8d00159 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 12:33:36 +0800 Subject: [PATCH 121/194] replace test error checking with more readable way --- test/e2e/node/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index b7158a16aea..d167dc7b0ab 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -411,7 +411,7 @@ var _ = SIGDescribe("kubelet", func() { ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure") err := framework.DeletePodWithWait(f, c, pod) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) // pod object is now stale, but is intentionally not nil ginkgo.By("Check if pod's host has been cleaned up -- expect not") From 124efde4f8c055c6295c5fbb88b63e98d5270f62 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 12:37:01 +0800 Subject: [PATCH 122/194] fix golint error in e2e/node --- hack/.golint_failures | 1 - test/e2e/node/events.go | 8 ++++---- test/e2e/node/framework.go | 1 + test/e2e/node/node_problem_detector.go | 6 +++--- test/e2e/node/pre_stop.go | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 064c109f162..c1a9aa1179d 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -606,7 +606,6 @@ test/e2e/chaosmonkey test/e2e/common test/e2e/framework test/e2e/lifecycle/bootstrap -test/e2e/node test/e2e/scalability test/e2e/scheduling test/e2e/storage/drivers diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index ce611148595..aa7841bf48e 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -85,18 +85,18 @@ var _ = SIGDescribe("Events", func() { gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) ginkgo.By("retrieving the pod") - podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{}) + podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get pod: %v", err) } - e2elog.Logf("%+v\n", podWithUid) + e2elog.Logf("%+v\n", podWithUID) var events *v1.EventList // Check for scheduler event about the pod. ginkgo.By("checking for scheduler event about the pod") framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { selector := fields.Set{ "involvedObject.kind": "Pod", - "involvedObject.uid": string(podWithUid.UID), + "involvedObject.uid": string(podWithUID.UID), "involvedObject.namespace": f.Namespace.Name, "source": v1.DefaultSchedulerName, }.AsSelector().String() @@ -115,7 +115,7 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("checking for kubelet event about the pod") framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { selector := fields.Set{ - "involvedObject.uid": string(podWithUid.UID), + "involvedObject.uid": string(podWithUID.UID), "involvedObject.kind": "Pod", "involvedObject.namespace": f.Namespace.Name, "source": "kubelet", diff --git a/test/e2e/node/framework.go b/test/e2e/node/framework.go index a206bef7008..e6ca8cd1f2d 100644 --- a/test/e2e/node/framework.go +++ b/test/e2e/node/framework.go @@ -18,6 +18,7 @@ package node import "k8s.io/kubernetes/test/e2e/framework" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return framework.KubeDescribe("[sig-node] "+text, body) } diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 1c27a231b1e..c9f4c4ae3ed 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -102,7 +102,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed")) if isStandaloneMode[host] { - cpuUsage, uptime := getCpuStat(f, host) + cpuUsage, uptime := getCPUStat(f, host) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) uptimeStats[host] = append(uptimeStats[host], uptime) } @@ -138,7 +138,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { rssStats[host] = append(rssStats[host], rss) workingSetStats[host] = append(workingSetStats[host], workingSet) if i == numIterations { - cpuUsage, uptime := getCpuStat(f, host) + cpuUsage, uptime := getCPUStat(f, host) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) uptimeStats[host] = append(uptimeStats[host], uptime) } @@ -249,7 +249,7 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64 return } -func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) { +func getCPUStat(f *framework.Framework, host string) (usage, uptime float64) { cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'" result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 20b8cd59b9a..f03a621727e 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -36,7 +36,7 @@ import ( "github.com/onsi/gomega" ) -// partially cloned from webserver.go +// State partially cloned from webserver.go type State struct { Received map[string]int } From 950f6e868caa0384c21b572961f6922817e31c9d Mon Sep 17 00:00:00 2001 From: draveness Date: Thu, 9 May 2019 09:48:02 +0800 Subject: [PATCH 123/194] refactor: use framework.ExpectNoError instead --- test/e2e/common/pods.go | 6 +++--- test/e2e/upgrades/apps/job.go | 7 +++---- test/e2e/upgrades/apps/statefulset.go | 5 ++--- test/e2e/upgrades/storage/BUILD | 1 - test/e2e/upgrades/storage/persistent_volumes.go | 3 +-- test/e2e/upgrades/storage/volume_mode.go | 11 +++++------ 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 32a20d19778..c2e7af9f452 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -814,19 +814,19 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) _, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Sleep for 10 seconds. time.Sleep(maxReadyStatusUpdateTolerance) gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True") ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) validatePodReadiness(true) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) validatePodReadiness(false) }) diff --git a/test/e2e/upgrades/apps/job.go b/test/e2e/upgrades/apps/job.go index cd836e75c05..dc955003f02 100644 --- a/test/e2e/upgrades/apps/job.go +++ b/test/e2e/upgrades/apps/job.go @@ -24,7 +24,6 @@ import ( "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) // JobUpgradeTest is a test harness for batch Jobs. @@ -44,11 +43,11 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) { t.job = jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6) job, err := jobutil.CreateJob(f.ClientSet, t.namespace, t.job) t.job = job - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Ensuring active pods == parallelism") err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } // Test verifies that the Jobs Pods are running after the an upgrade @@ -56,7 +55,7 @@ func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgr <-done ginkgo.By("Ensuring active pods == parallelism") err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } // Teardown cleans up any remaining resources. diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index bc476f9036a..7a9a11e9389 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -18,7 +18,6 @@ package upgrades import ( "github.com/onsi/ginkgo" - "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -69,12 +68,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) _, err := f.ClientSet.CoreV1().Services(ns).Create(t.service) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(t.set.Spec.Replicas) = 3 _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + t.set.Name) t.tester.Saturate(t.set) diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index 4b28af94fad..8044b3d8846 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -22,7 +22,6 @@ go_library( "//test/e2e/storage/utils:go_default_library", "//test/e2e/upgrades:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", - "//vendor/github.com/onsi/gomega:go_default_library", ], ) diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index dbcc331136d..9d12139680c 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework/volume" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/upgrades" ) @@ -69,7 +68,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating the PV and PVC") t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc)) ginkgo.By("Consuming the PV before upgrade") diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index f77b2cef498..da4048bcd72 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const devicePath = "/mnt/volume1" @@ -82,20 +81,20 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { } t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) t.pvc, err = framework.CreatePVC(cs, ns, t.pvc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Consuming the PVC before downgrade") t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Checking if PV exists as expected volume mode") utils.CheckVolumeModeOfPath(t.pod, block, devicePath) From 55d45867e6a8b6336be0c56da0b4843dd552b1b0 Mon Sep 17 00:00:00 2001 From: Baasbank Date: Fri, 3 May 2019 23:10:40 +0100 Subject: [PATCH 124/194] fixes golint error in staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1 fixes golint error in staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller fixes golint errors in staging/src/k8s.io/sample-apiserver/pkg/cmd/server fixex golint errors in staging/src/k8s.io/sample-apiserver/pkg/apiserver --- hack/.golint_failures | 4 ---- .../src/k8s.io/sample-apiserver/pkg/apiserver/apiserver.go | 7 ++++++- .../src/k8s.io/sample-apiserver/pkg/cmd/server/start.go | 6 ++++++ .../pkg/apis/samplecontroller/register.go | 1 + .../pkg/apis/samplecontroller/v1alpha1/register.go | 4 +++- 5 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 064c109f162..d0902d4beaf 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -593,12 +593,8 @@ staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer staging/src/k8s.io/sample-apiserver/pkg/apis/wardle staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 -staging/src/k8s.io/sample-apiserver/pkg/apiserver -staging/src/k8s.io/sample-apiserver/pkg/cmd/server staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder -staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller -staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1 test/e2e test/e2e/auth test/e2e/autoscaling diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/sample-apiserver/pkg/apiserver/apiserver.go index e93f1fa213a..384c46d4ae3 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apiserver/apiserver.go @@ -33,7 +33,10 @@ import ( ) var ( + // Scheme defines methods for serializing and deserializing API objects. Scheme = runtime.NewScheme() + // Codecs provides methods for retrieving codecs and serializers for specific + // versions and content types. Codecs = serializer.NewCodecFactory(Scheme) ) @@ -55,10 +58,12 @@ func init() { ) } +// ExtraConfig holds custom apiserver config type ExtraConfig struct { // Place you custom config here. } +// Config defines the config for the apiserver type Config struct { GenericConfig *genericapiserver.RecommendedConfig ExtraConfig ExtraConfig @@ -74,8 +79,8 @@ type completedConfig struct { ExtraConfig *ExtraConfig } +// CompletedConfig embeds a private pointer that cannot be instantiated outside of this package. type CompletedConfig struct { - // Embed a private pointer that cannot be instantiated outside of this package. *completedConfig } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index 4f17e82ba3e..fd3d365898b 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -41,6 +41,7 @@ import ( const defaultEtcdPathPrefix = "/registry/wardle.kubernetes.io" +// WardleServerOptions contains state for master/api server type WardleServerOptions struct { RecommendedOptions *genericoptions.RecommendedOptions @@ -49,6 +50,7 @@ type WardleServerOptions struct { StdErr io.Writer } +// NewWardleServerOptions returns a new WardleServerOptions func NewWardleServerOptions(out, errOut io.Writer) *WardleServerOptions { o := &WardleServerOptions{ RecommendedOptions: genericoptions.NewRecommendedOptions( @@ -92,12 +94,14 @@ func NewCommandStartWardleServer(defaults *WardleServerOptions, stopCh <-chan st return cmd } +// Validate validates WardleServerOptions func (o WardleServerOptions) Validate(args []string) error { errors := []error{} errors = append(errors, o.RecommendedOptions.Validate()...) return utilerrors.NewAggregate(errors) } +// Complete fills in fields required to have valid data func (o *WardleServerOptions) Complete() error { // register admission plugins banflunder.Register(o.RecommendedOptions.Admission.Plugins) @@ -108,6 +112,7 @@ func (o *WardleServerOptions) Complete() error { return nil } +// Config returns config for the api server given WardleServerOptions func (o *WardleServerOptions) Config() (*apiserver.Config, error) { // TODO have a "real" external address if err := o.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { @@ -138,6 +143,7 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { return config, nil } +// RunWardleServer starts a new WardleServer given WardleServerOptions func (o WardleServerOptions) RunWardleServer(stopCh <-chan struct{}) error { config, err := o.Config() if err != nil { diff --git a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/register.go b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/register.go index 394f7967e2b..fd5a7888d75 100644 --- a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/register.go @@ -16,6 +16,7 @@ limitations under the License. package samplecontroller +// GroupName is the group name used in this package const ( GroupName = "samplecontroller.k8s.io" ) diff --git a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go index df5695eb092..e8e0a28efaa 100644 --- a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go @@ -38,8 +38,10 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder initializes a scheme builder SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to Scheme. From a4825d1cb84d06da9a7f91ed46fdb4efc23cc37f Mon Sep 17 00:00:00 2001 From: SataQiu Date: Sun, 12 May 2019 22:57:24 +0800 Subject: [PATCH 125/194] fix golint failures of test/e2e/auth --- hack/.golint_failures | 1 - test/e2e/auth/audit.go | 22 +++++----- test/e2e/auth/audit_dynamic.go | 6 +-- test/e2e/auth/certificates.go | 4 +- test/e2e/auth/framework.go | 1 + test/e2e/auth/metadata_concealment.go | 8 ++-- test/e2e/auth/node_authn.go | 20 ++++----- test/e2e/auth/node_authz.go | 62 +++++++++++++-------------- test/e2e/auth/pod_security_policy.go | 52 +++++++++++----------- test/e2e/auth/service_accounts.go | 48 ++++++++++----------- 10 files changed, 112 insertions(+), 112 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 064c109f162..311a96ee845 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -600,7 +600,6 @@ staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1 test/e2e -test/e2e/auth test/e2e/autoscaling test/e2e/chaosmonkey test/e2e/common diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 579341831ad..dcd05853dbd 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -42,7 +42,7 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" jsonpatch "github.com/evanphx/json-patch" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var ( @@ -62,12 +62,12 @@ var ( var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { f := framework.NewDefaultFramework("audit") var namespace string - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce") namespace = f.Namespace.Name }) - It("should audit API calls to create, get, update, patch, delete, list, watch pods.", func() { + ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch pods.", func() { pod := &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "audit-pod", @@ -201,7 +201,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }) }) - It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() { + ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() { podLabels := map[string]string{"name": "audit-deployment-pod"} d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType) @@ -328,7 +328,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }) }) - It("should audit API calls to create, get, update, patch, delete, list, watch configmaps.", func() { + ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch configmaps.", func() { configMap := &apiv1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "audit-configmap", @@ -461,7 +461,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }) }) - It("should audit API calls to create, get, update, patch, delete, list, watch secrets.", func() { + ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch secrets.", func() { secret := &apiv1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "audit-secret", @@ -593,7 +593,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }) }) - It("should audit API calls to create and delete custom resource definition.", func() { + ginkgo.It("should audit API calls to create and delete custom resource definition.", func() { config, err := framework.LoadConfig() framework.ExpectNoError(err, "failed to load config") apiExtensionClient, err := apiextensionclientset.NewForConfig(config) @@ -654,12 +654,12 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }) // test authorizer annotations, RBAC is required. - It("should audit API calls to get a pod with unauthorized user.", func() { + ginkgo.It("should audit API calls to get a pod with unauthorized user.", func() { if !auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) { framework.Skipf("RBAC not enabled.") } - By("Creating a kubernetes client that impersonates an unauthorized anonymous user") + ginkgo.By("Creating a kubernetes client that impersonates an unauthorized anonymous user") config, err := framework.LoadConfig() framework.ExpectNoError(err) config.Impersonate = restclient.ImpersonationConfig{ @@ -691,8 +691,8 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }) }) - It("should list pods as impersonated user.", func() { - By("Creating a kubernetes client that impersonates an authorized user") + ginkgo.It("should list pods as impersonated user.", func() { + ginkgo.By("Creating a kubernetes client that impersonates an authorized user") config, err := framework.LoadConfig() framework.ExpectNoError(err) config.Impersonate = restclient.ImpersonationConfig{ diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index cb6183aadd0..671e60e145b 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -21,7 +21,7 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1" apiv1 "k8s.io/api/core/v1" @@ -44,10 +44,10 @@ import ( var _ = SIGDescribe("[Feature:DynamicAudit]", func() { f := framework.NewDefaultFramework("audit") - It("should dynamically audit API calls", func() { + ginkgo.It("should dynamically audit API calls", func() { namespace := f.Namespace.Name - By("Creating a kubernetes client that impersonates an unauthorized anonymous user") + ginkgo.By("Creating a kubernetes client that impersonates an unauthorized anonymous user") config, err := framework.LoadConfig() framework.ExpectNoError(err, "failed to fetch config") diff --git a/test/e2e/auth/certificates.go b/test/e2e/auth/certificates.go index 50b25bade47..4aca1875c48 100644 --- a/test/e2e/auth/certificates.go +++ b/test/e2e/auth/certificates.go @@ -31,13 +31,13 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Certificates API", func() { f := framework.NewDefaultFramework("certificates") - It("should support building a client with a CSR", func() { + ginkgo.It("should support building a client with a CSR", func() { const commonName = "tester-csr" pk, err := utils.NewPrivateKey() diff --git a/test/e2e/auth/framework.go b/test/e2e/auth/framework.go index 7f186e87886..70bfc631fcd 100644 --- a/test/e2e/auth/framework.go +++ b/test/e2e/auth/framework.go @@ -18,6 +18,7 @@ package auth import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-auth] "+text, body) } diff --git a/test/e2e/auth/metadata_concealment.go b/test/e2e/auth/metadata_concealment.go index e3d94acb3a2..5d9c3f70b5d 100644 --- a/test/e2e/auth/metadata_concealment.go +++ b/test/e2e/auth/metadata_concealment.go @@ -23,16 +23,16 @@ import ( "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" imageutil "k8s.io/kubernetes/test/utils/image" ) var _ = SIGDescribe("Metadata Concealment", func() { f := framework.NewDefaultFramework("metadata-concealment") - It("should run a check-metadata-concealment job to completion", func() { + ginkgo.It("should run a check-metadata-concealment job to completion", func() { framework.SkipUnlessProviderIs("gce") - By("Creating a job") + ginkgo.By("Creating a job") job := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "check-metadata-concealment", @@ -57,7 +57,7 @@ var _ = SIGDescribe("Metadata Concealment", func() { job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job (%s:%s)", f.Namespace.Name, job.Name) - By("Ensuring job reaches completions") + ginkgo.By("Ensuring job reaches completions") err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1)) framework.ExpectNoError(err, "failed to ensure job completion (%s:%s)", f.Namespace.Name, job.Name) }) diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index ff7d311e8af..60bcddb3761 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { @@ -34,12 +34,12 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { f := framework.NewDefaultFramework("node-authn") var ns string var nodeIPs []string - BeforeEach(func() { + ginkgo.BeforeEach(func() { ns = f.Namespace.Name nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) - Expect(len(nodeList.Items)).NotTo(BeZero()) + gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero()) pickedNode := nodeList.Items[0] nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP) @@ -50,20 +50,20 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { saName := "default" sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName) - Expect(len(sa.Secrets)).NotTo(BeZero()) + gomega.Expect(len(sa.Secrets)).NotTo(gomega.BeZero()) }) - It("The kubelet's main port 10250 should reject requests with no credentials", func() { + ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func() { pod := createNodeAuthTestPod(f) for _, nodeIP := range nodeIPs { // Anonymous authentication is disabled by default result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort)) - Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials") + gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials") } }) - It("The kubelet can delegate ServiceAccount tokens to the API server", func() { - By("create a new ServiceAccount for authentication") + ginkgo.It("The kubelet can delegate ServiceAccount tokens to the API server", func() { + ginkgo.By("create a new ServiceAccount for authentication") trueValue := true newSA := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -84,7 +84,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { "%{http_code}", "cat /var/run/secrets/kubernetes.io/serviceaccount/token", nodeIP, ports.KubeletPort)) - Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server") + gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server") } }) }) diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index 10492cfc4df..edbb5391ea8 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -30,13 +30,13 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( - NodesGroup = "system:nodes" - NodeNamePrefix = "system:node:" + nodesGroup = "system:nodes" + nodeNamePrefix = "system:node:" ) var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { @@ -48,47 +48,47 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { var asUser string var defaultSaSecret string var nodeName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { ns = f.Namespace.Name nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) - Expect(len(nodeList.Items)).NotTo(Equal(0)) + gomega.Expect(len(nodeList.Items)).NotTo(gomega.Equal(0)) nodeName = nodeList.Items[0].Name - asUser = NodeNamePrefix + nodeName + asUser = nodeNamePrefix + nodeName saName := "default" sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{}) - Expect(len(sa.Secrets)).NotTo(Equal(0)) + gomega.Expect(len(sa.Secrets)).NotTo(gomega.Equal(0)) framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName) defaultSaSecret = sa.Secrets[0].Name - By("Creating a kubernetes client that impersonates a node") + ginkgo.By("Creating a kubernetes client that impersonates a node") config, err := framework.LoadConfig() framework.ExpectNoError(err, "failed to load kubernetes client config") config.Impersonate = restclient.ImpersonationConfig{ UserName: asUser, - Groups: []string{NodesGroup}, + Groups: []string{nodesGroup}, } c, err = clientset.NewForConfig(config) framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config) }) - It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() { + ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() { _, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{}) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) }) - It("Getting an existing secret should exit with the Forbidden error", func() { + ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() { _, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{}) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) }) - It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() { + ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() { _, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{}) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) }) - It("Getting an existing configmap should exit with the Forbidden error", func() { - By("Create a configmap for testing") + ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() { + ginkgo.By("Create a configmap for testing") configmap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -101,11 +101,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap) framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap) _, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{}) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) }) - It("Getting a secret for a workload the node has access to should succeed", func() { - By("Create a secret for testing") + ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() { + ginkgo.By("Create a secret for testing") secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -118,11 +118,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { _, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret) framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name) - By("Node should not get the secret") + ginkgo.By("Node should not get the secret") _, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) - By("Create a pod that use the secret") + ginkgo.By("Create a pod that use the secret") pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pause", @@ -151,7 +151,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { _, err = f.ClientSet.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name) - By("The node should able to access the secret") + ginkgo.By("The node should able to access the secret") itv := framework.Poll dur := 1 * time.Minute err = wait.Poll(itv, dur, func() (bool, error) { @@ -165,7 +165,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { framework.ExpectNoError(err, "failed to get secret after trying every %v for %v (%s:%s)", itv, dur, ns, secret.Name) }) - It("A node shouldn't be able to create another node", func() { + ginkgo.It("A node shouldn't be able to create another node", func() { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, TypeMeta: metav1.TypeMeta{ @@ -173,14 +173,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { APIVersion: "v1", }, } - By(fmt.Sprintf("Create node foo by user: %v", asUser)) + ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) _, err := c.CoreV1().Nodes().Create(node) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) }) - It("A node shouldn't be able to delete another node", func() { - By(fmt.Sprintf("Create node foo by user: %v", asUser)) + ginkgo.It("A node shouldn't be able to delete another node", func() { + ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{}) - Expect(apierrors.IsForbidden(err)).Should(Equal(true)) + gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) }) }) diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index d47cd846fcf..9d779896975 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -37,8 +37,8 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" utilpointer "k8s.io/utils/pointer" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const nobodyUser = int64(65534) @@ -51,7 +51,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { // with reduced privileges. var c clientset.Interface var ns string // Test namespace, for convenience - BeforeEach(func() { + ginkgo.BeforeEach(func() { if !framework.IsPodSecurityPolicyEnabled(f) { framework.Skipf("PodSecurityPolicy not enabled") } @@ -60,7 +60,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { } ns = f.Namespace.Name - By("Creating a kubernetes client that impersonates the default service account") + ginkgo.By("Creating a kubernetes client that impersonates the default service account") config, err := framework.LoadConfig() framework.ExpectNoError(err) config.Impersonate = restclient.ImpersonationConfig{ @@ -70,24 +70,24 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { c, err = clientset.NewForConfig(config) framework.ExpectNoError(err) - By("Binding the edit role to the default SA") + ginkgo.By("Binding the edit role to the default SA") err = auth.BindClusterRole(f.ClientSet.RbacV1beta1(), "edit", ns, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"}) framework.ExpectNoError(err) }) - It("should forbid pod creation when no PSP is available", func() { - By("Running a restricted pod") + ginkgo.It("should forbid pod creation when no PSP is available", func() { + ginkgo.By("Running a restricted pod") _, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted")) expectForbidden(err) }) - It("should enforce the restricted policy.PodSecurityPolicy", func() { - By("Creating & Binding a restricted policy for the test service account") + ginkgo.It("should enforce the restricted policy.PodSecurityPolicy", func() { + ginkgo.By("Creating & Binding a restricted policy for the test service account") _, cleanup := createAndBindPSP(f, restrictedPSP("restrictive")) defer cleanup() - By("Running a restricted pod") + ginkgo.By("Running a restricted pod") pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed")) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) @@ -98,8 +98,8 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { }) }) - It("should allow pods under the privileged policy.PodSecurityPolicy", func() { - By("Creating & Binding a privileged policy for the test service account") + ginkgo.It("should allow pods under the privileged policy.PodSecurityPolicy", func() { + ginkgo.By("Creating & Binding a privileged policy for the test service account") // Ensure that the permissive policy is used even in the presence of the restricted policy. _, cleanup := createAndBindPSP(f, restrictedPSP("restrictive")) defer cleanup() @@ -115,26 +115,26 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) validated, found := p.Annotations[psputil.ValidatedPSPAnnotation] - Expect(found).To(BeTrue(), "PSP annotation not found") - Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP") + gomega.Expect(found).To(gomega.BeTrue(), "PSP annotation not found") + gomega.Expect(validated).To(gomega.Equal(expectedPSP.Name), "Unexpected validated PSP") }) }) }) func expectForbidden(err error) { - Expect(err).To(HaveOccurred(), "should be forbidden") - Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error") + gomega.Expect(err).To(gomega.HaveOccurred(), "should be forbidden") + gomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), "should be forbidden error") } func testPrivilegedPods(tester func(pod *v1.Pod)) { - By("Running a privileged pod", func() { + ginkgo.By("Running a privileged pod", func() { privileged := restrictedPod("privileged") privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true) privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil tester(privileged) }) - By("Running a HostPath pod", func() { + ginkgo.By("Running a HostPath pod", func() { hostpath := restrictedPod("hostpath") hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{ Name: "hp", @@ -149,26 +149,26 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) { tester(hostpath) }) - By("Running a HostNetwork pod", func() { + ginkgo.By("Running a HostNetwork pod", func() { hostnet := restrictedPod("hostnet") hostnet.Spec.HostNetwork = true tester(hostnet) }) - By("Running a HostPID pod", func() { + ginkgo.By("Running a HostPID pod", func() { hostpid := restrictedPod("hostpid") hostpid.Spec.HostPID = true tester(hostpid) }) - By("Running a HostIPC pod", func() { + ginkgo.By("Running a HostIPC pod", func() { hostipc := restrictedPod("hostipc") hostipc.Spec.HostIPC = true tester(hostipc) }) if common.IsAppArmorSupported() { - By("Running a custom AppArmor profile pod", func() { + ginkgo.By("Running a custom AppArmor profile pod", func() { aa := restrictedPod("apparmor") // Every node is expected to have the docker-default profile. aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default" @@ -176,13 +176,13 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) { }) } - By("Running an unconfined Seccomp pod", func() { + ginkgo.By("Running an unconfined Seccomp pod", func() { unconfined := restrictedPod("seccomp") unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined" tester(unconfined) }) - By("Running a SYS_ADMIN pod", func() { + ginkgo.By("Running a SYS_ADMIN pod", func() { sysadmin := restrictedPod("sysadmin") sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{ Add: []v1.Capability{"SYS_ADMIN"}, @@ -191,14 +191,14 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) { tester(sysadmin) }) - By("Running a RunAsGroup pod", func() { + ginkgo.By("Running a RunAsGroup pod", func() { sysadmin := restrictedPod("runasgroup") gid := int64(0) sysadmin.Spec.Containers[0].SecurityContext.RunAsGroup = &gid tester(sysadmin) }) - By("Running a RunAsUser pod", func() { + ginkgo.By("Running a RunAsUser pod", func() { sysadmin := restrictedPod("runasuser") uid := int64(0) sysadmin.Spec.Containers[0].SecurityContext.RunAsUser = &uid diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 2140e9c0c9b..e3ca96000aa 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -33,8 +33,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var mountImage = imageutils.GetE2EImage(imageutils.Mounttest) @@ -42,11 +42,11 @@ var mountImage = imageutils.GetE2EImage(imageutils.Mounttest) var _ = SIGDescribe("ServiceAccounts", func() { f := framework.NewDefaultFramework("svcaccounts") - It("should ensure a single API token exists", func() { + ginkgo.It("should ensure a single API token exists", func() { // wait for the service account to reference a single secret var secrets []v1.ObjectReference framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { - By("waiting for a single token reference") + ginkgo.By("waiting for a single token reference") sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if apierrors.IsNotFound(err) { e2elog.Logf("default service account was not found") @@ -71,20 +71,20 @@ var _ = SIGDescribe("ServiceAccounts", func() { // make sure the reference doesn't flutter { - By("ensuring the single token reference persists") + ginkgo.By("ensuring the single token reference persists") time.Sleep(2 * time.Second) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(sa.Secrets).To(Equal(secrets)) + gomega.Expect(sa.Secrets).To(gomega.Equal(secrets)) } // delete the referenced secret - By("deleting the service account token") + ginkgo.By("deleting the service account token") framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil)) // wait for the referenced secret to be removed, and another one autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { - By("waiting for a new token reference") + ginkgo.By("waiting for a new token reference") sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if err != nil { e2elog.Logf("error getting default service account: %v", err) @@ -109,15 +109,15 @@ var _ = SIGDescribe("ServiceAccounts", func() { // make sure the reference doesn't flutter { - By("ensuring the single token reference persists") + ginkgo.By("ensuring the single token reference persists") time.Sleep(2 * time.Second) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(sa.Secrets).To(Equal(secrets)) + gomega.Expect(sa.Secrets).To(gomega.Equal(secrets)) } // delete the reference from the service account - By("deleting the reference to the service account token") + ginkgo.By("deleting the reference to the service account token") { sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) @@ -128,7 +128,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { // wait for another one to be autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { - By("waiting for a new token to be created and added") + ginkgo.By("waiting for a new token to be created and added") sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if err != nil { e2elog.Logf("error getting default service account: %v", err) @@ -149,11 +149,11 @@ var _ = SIGDescribe("ServiceAccounts", func() { // make sure the reference doesn't flutter { - By("ensuring the single token reference persists") + ginkgo.By("ensuring the single token reference persists") time.Sleep(2 * time.Second) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(sa.Secrets).To(Equal(secrets)) + gomega.Expect(sa.Secrets).To(gomega.Equal(secrets)) } }) @@ -174,7 +174,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { - By("getting the auto-created API token") + ginkgo.By("getting the auto-created API token") sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{}) if apierrors.IsNotFound(err) { e2elog.Logf("mount-test service account was not found") @@ -231,19 +231,19 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectNoError(err) // CA and namespace should be identical - Expect(mountedCA).To(Equal(rootCAContent)) - Expect(mountedNamespace).To(Equal(f.Namespace.Name)) + gomega.Expect(mountedCA).To(gomega.Equal(rootCAContent)) + gomega.Expect(mountedNamespace).To(gomega.Equal(f.Namespace.Name)) // Token should be a valid credential that identifies the pod's service account tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}} tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(tokenReview) framework.ExpectNoError(err) - Expect(tokenReview.Status.Authenticated).To(Equal(true)) - Expect(tokenReview.Status.Error).To(Equal("")) - Expect(tokenReview.Status.User.Username).To(Equal("system:serviceaccount:" + f.Namespace.Name + ":" + sa.Name)) + gomega.Expect(tokenReview.Status.Authenticated).To(gomega.Equal(true)) + gomega.Expect(tokenReview.Status.Error).To(gomega.Equal("")) + gomega.Expect(tokenReview.Status.User.Username).To(gomega.Equal("system:serviceaccount:" + f.Namespace.Name + ":" + sa.Name)) groups := sets.NewString(tokenReview.Status.User.Groups...) - Expect(groups.Has("system:authenticated")).To(Equal(true), fmt.Sprintf("expected system:authenticated group, had %v", groups.List())) - Expect(groups.Has("system:serviceaccounts")).To(Equal(true), fmt.Sprintf("expected system:serviceaccounts group, had %v", groups.List())) - Expect(groups.Has("system:serviceaccounts:"+f.Namespace.Name)).To(Equal(true), fmt.Sprintf("expected system:serviceaccounts:"+f.Namespace.Name+" group, had %v", groups.List())) + gomega.Expect(groups.Has("system:authenticated")).To(gomega.Equal(true), fmt.Sprintf("expected system:authenticated group, had %v", groups.List())) + gomega.Expect(groups.Has("system:serviceaccounts")).To(gomega.Equal(true), fmt.Sprintf("expected system:serviceaccounts group, had %v", groups.List())) + gomega.Expect(groups.Has("system:serviceaccounts:"+f.Namespace.Name)).To(gomega.Equal(true), fmt.Sprintf("expected system:serviceaccounts:"+f.Namespace.Name+" group, had %v", groups.List())) }) /* @@ -285,7 +285,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { - By("getting the auto-created API token") + ginkgo.By("getting the auto-created API token") sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { e2elog.Logf("mount service account was not found") From 54a308bdd7cc10ff5ea98bf081280940c3dfb1a6 Mon Sep 17 00:00:00 2001 From: Yago Nobre Date: Sat, 11 May 2019 00:50:50 -0300 Subject: [PATCH 126/194] Improve error message when user provide invalid certificate key --- cmd/kubeadm/app/phases/copycerts/copycerts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/phases/copycerts/copycerts.go b/cmd/kubeadm/app/phases/copycerts/copycerts.go index ae7ad8af13d..07f9472570c 100644 --- a/cmd/kubeadm/app/phases/copycerts/copycerts.go +++ b/cmd/kubeadm/app/phases/copycerts/copycerts.go @@ -89,7 +89,7 @@ func UploadCerts(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, fmt.Printf("[upload-certs] Storing the certificates in Secret %q in the %q Namespace\n", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem) decodedKey, err := hex.DecodeString(key) if err != nil { - return err + return errors.Wrap(err, "error decoding certificate key") } tokenID, err := createShortLivedBootstrapToken(client) if err != nil { From 503ac59abe106f6cfc894d33b46b4ddfafe38aef Mon Sep 17 00:00:00 2001 From: yameiwang Date: Mon, 13 May 2019 08:29:03 +0800 Subject: [PATCH 127/194] fix typo in kuberuntime_manager.go --- pkg/kubelet/kuberuntime/kuberuntime_container.go | 2 +- pkg/kubelet/kuberuntime/kuberuntime_manager.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index a6369916939..32590604c40 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -546,7 +546,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec pod, containerSpec = restoredPod, restoredContainer } - // From this point , pod and container must be non-nil. + // From this point, pod and container must be non-nil. gracePeriod := int64(minimumGracePeriodInSeconds) switch { case pod.DeletionGracePeriodSeconds != nil: diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 6d84aac3b19..c94a9280834 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -377,7 +377,7 @@ type containerToKillInfo struct { type podActions struct { // Stop all running (regular and init) containers and the sandbox for the pod. KillPod bool - // Whether need to create a new sandbox. If needed to kill pod and create a + // Whether need to create a new sandbox. If needed to kill pod and create // a new pod sandbox, all init containers need to be purged (i.e., removed). CreateSandbox bool // The id of existing sandbox. It is used for starting containers in ContainersToStart. From 3892f6698072827506dfb04acdbf4c6d67a3879d Mon Sep 17 00:00:00 2001 From: "fansong.cfs" Date: Mon, 13 May 2019 15:00:16 +0800 Subject: [PATCH 128/194] fix data race in unittest --- .../apiserver/pkg/storage/cacher/cacher_whitebox_test.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go index a737b9ccb7a..b3de84633fb 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -678,12 +678,16 @@ func TestDispatchingBookmarkEventsWithConcurrentStop(t *testing.T) { t.Fatalf("failure to update version of object (%d) %#v", bookmark.ResourceVersion, bookmark.Object) } + wg := sync.WaitGroup{} + wg.Add(2) go func() { cacher.dispatchEvent(bookmark) + wg.Done() }() go func() { w.Stop() + wg.Done() }() done := make(chan struct{}) @@ -700,5 +704,6 @@ func TestDispatchingBookmarkEventsWithConcurrentStop(t *testing.T) { t.Fatal("receive result timeout") } w.Stop() + wg.Wait() } } From 326999e30c6b0a61dfc5f8f5e47133ba0f16b217 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Mon, 13 May 2019 16:40:23 +0530 Subject: [PATCH 129/194] Add rules for release-1.15 branch --- staging/publishing/rules.yaml | 283 ++++++++++++++++++++++++++++++++++ 1 file changed, 283 insertions(+) diff --git a/staging/publishing/rules.yaml b/staging/publishing/rules.yaml index ade62f19cda..89e09c77352 100644 --- a/staging/publishing/rules.yaml +++ b/staging/publishing/rules.yaml @@ -11,6 +11,11 @@ rules: branch: master dir: staging/src/k8s.io/code-generator name: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/code-generator + name: release-1.15 + go: 1.12.5 - destination: apimachinery library: true branches: @@ -18,6 +23,11 @@ rules: branch: master dir: staging/src/k8s.io/apimachinery name: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/apimachinery + name: release-1.15 + go: 1.12.5 - destination: api library: true branches: @@ -28,6 +38,14 @@ rules: dependencies: - repository: apimachinery branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/api + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 - destination: client-go library: true branches: @@ -40,6 +58,16 @@ rules: branch: master - repository: api branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/client-go + name: release-12.0 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 smoke-test: | # assumes GO111MODULE=on go build ./... @@ -54,6 +82,14 @@ rules: dependencies: - repository: apimachinery branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/component-base + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 - destination: apiserver library: true branches: @@ -70,6 +106,20 @@ rules: branch: master - repository: component-base branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/apiserver + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: component-base + branch: release-1.15 - destination: kube-aggregator branches: - source: @@ -89,6 +139,24 @@ rules: branch: master - repository: code-generator branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/kube-aggregator + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: apiserver + branch: release-1.15 + - repository: component-base + branch: release-1.15 + - repository: code-generator + branch: release-1.15 - destination: sample-apiserver branches: - source: @@ -110,6 +178,26 @@ rules: branch: master required-packages: - k8s.io/code-generator + - source: + branch: release-1.15 + dir: staging/src/k8s.io/sample-apiserver + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: apiserver + branch: release-1.15 + - repository: code-generator + branch: release-1.15 + - repository: component-base + branch: release-1.15 + required-packages: + - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build . @@ -132,6 +220,24 @@ rules: branch: master required-packages: - k8s.io/code-generator + - source: + branch: release-1.15 + dir: staging/src/k8s.io/sample-controller + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: code-generator + branch: release-1.15 + - repository: component-base + branch: release-1.15 + required-packages: + - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build . @@ -156,6 +262,26 @@ rules: branch: master required-packages: - k8s.io/code-generator + - source: + branch: release-1.15 + dir: staging/src/k8s.io/apiextensions-apiserver + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: apiserver + branch: release-1.15 + - repository: code-generator + branch: release-1.15 + - repository: component-base + branch: release-1.15 + required-packages: + - k8s.io/code-generator - destination: metrics library: true branches: @@ -172,6 +298,20 @@ rules: branch: master - repository: code-generator branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/metrics + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: code-generator + branch: release-1.15 - destination: cli-runtime library: true branches: @@ -186,6 +326,18 @@ rules: branch: master - repository: client-go branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/cli-runtime + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: api + branch: release-1.15 + - repository: apimachinery + branch: release-1.15 + - repository: client-go + branch: release-12.0 - destination: sample-cli-plugin library: false branches: @@ -204,6 +356,22 @@ rules: branch: master - repository: component-base branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/sample-cli-plugin + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: api + branch: release-1.15 + - repository: apimachinery + branch: release-1.15 + - repository: cli-runtime + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: component-base + branch: release-1.15 - destination: kube-proxy library: true branches: @@ -216,6 +384,16 @@ rules: branch: master - repository: component-base branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/kube-proxy + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: component-base + branch: release-1.15 - destination: kubelet library: true branches: @@ -230,6 +408,18 @@ rules: branch: master - repository: component-base branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/kubelet + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 + - repository: component-base + branch: release-1.15 - destination: kube-scheduler library: true branches: @@ -244,6 +434,18 @@ rules: branch: master - repository: component-base branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/kube-scheduler + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: apiserver + branch: release-1.15 + - repository: component-base + branch: release-1.15 - destination: kube-controller-manager library: true branches: @@ -258,6 +460,18 @@ rules: branch: master - repository: component-base branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/kube-controller-manager + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: apiserver + branch: release-1.15 + - repository: component-base + branch: release-1.15 - destination: cluster-bootstrap library: true branches: @@ -270,6 +484,16 @@ rules: branch: master - repository: api branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/cluster-bootstrap + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: apimachinery + branch: release-1.15 + - repository: api + branch: release-1.15 - destination: cloud-provider library: true branches: @@ -284,6 +508,18 @@ rules: branch: master - repository: client-go branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/cloud-provider + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: api + branch: release-1.15 + - repository: apimachinery + branch: release-1.15 + - repository: client-go + branch: release-12.0 - destination: csi-translation-lib library: true branches: @@ -298,6 +534,18 @@ rules: branch: master - repository: cloud-provider branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/csi-translation-lib + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: api + branch: release-1.15 + - repository: apimachinery + branch: release-1.15 + - repository: cloud-provider + branch: release-1.15 - destination: legacy-cloud-providers library: true branches: @@ -316,6 +564,22 @@ rules: branch: master - repository: csi-translation-lib branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/legacy-cloud-providers + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: api + branch: release-1.15 + - repository: apimachinery + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: cloud-provider + branch: release-1.15 + - repository: csi-translation-lib + branch: release-1.15 - destination: node-api library: true branches: @@ -332,6 +596,20 @@ rules: branch: master - repository: code-generator branch: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/node-api + name: release-1.15 + go: 1.12.5 + dependencies: + - repository: api + branch: release-1.15 + - repository: apimachinery + branch: release-1.15 + - repository: client-go + branch: release-12.0 + - repository: code-generator + branch: release-1.15 - destination: cri-api library: true branches: @@ -339,3 +617,8 @@ rules: branch: master dir: staging/src/k8s.io/cri-api name: master + - source: + branch: release-1.15 + dir: staging/src/k8s.io/cri-api + name: release-1.15 + go: 1.12.5 From 62ffdde50d8fb8c50bdc6d9df7f2c712bd2acd2b Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Mon, 13 May 2019 16:40:37 +0530 Subject: [PATCH 130/194] Remove rules for release-1.11 branch --- staging/publishing/rules-godeps.yaml | 118 --------------------------- 1 file changed, 118 deletions(-) diff --git a/staging/publishing/rules-godeps.yaml b/staging/publishing/rules-godeps.yaml index 798ce98e513..bb6d9ea8eeb 100644 --- a/staging/publishing/rules-godeps.yaml +++ b/staging/publishing/rules-godeps.yaml @@ -7,11 +7,6 @@ recursive-delete-patterns: rules: - destination: code-generator branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/code-generator - name: release-1.11 - go: 1.10.2 - source: branch: release-1.12 dir: staging/src/k8s.io/code-generator @@ -30,11 +25,6 @@ rules: - destination: apimachinery library: true branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/apimachinery - name: release-1.11 - go: 1.10.2 - source: branch: release-1.12 dir: staging/src/k8s.io/apimachinery @@ -53,14 +43,6 @@ rules: - destination: api library: true branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/api - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - source: branch: release-1.12 dir: staging/src/k8s.io/api @@ -88,16 +70,6 @@ rules: - destination: client-go library: true branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/client-go - name: release-8.0 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - source: branch: release-1.12 dir: staging/src/k8s.io/client-go @@ -146,18 +118,6 @@ rules: - destination: apiserver library: true branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/apiserver - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - - repository: client-go - branch: release-8.0 - source: branch: release-1.12 dir: staging/src/k8s.io/apiserver @@ -198,20 +158,6 @@ rules: branch: release-1.14 - destination: kube-aggregator branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/kube-aggregator - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - - repository: client-go - branch: release-8.0 - - repository: apiserver - branch: release-1.11 - source: branch: release-1.12 dir: staging/src/k8s.io/kube-aggregator @@ -258,24 +204,6 @@ rules: branch: release-1.14 - destination: sample-apiserver branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/sample-apiserver - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - - repository: client-go - branch: release-8.0 - - repository: apiserver - branch: release-1.11 - - repository: code-generator - branch: release-1.11 - required-packages: - - k8s.io/code-generator - source: branch: release-1.12 dir: staging/src/k8s.io/sample-apiserver @@ -342,22 +270,6 @@ rules: go build . - destination: sample-controller branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/sample-controller - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - - repository: client-go - branch: release-8.0 - - repository: code-generator - branch: release-1.11 - required-packages: - - k8s.io/code-generator - source: branch: release-1.12 dir: staging/src/k8s.io/sample-controller @@ -419,24 +331,6 @@ rules: go build . - destination: apiextensions-apiserver branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/apiextensions-apiserver - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - - repository: client-go - branch: release-8.0 - - repository: apiserver - branch: release-1.11 - - repository: code-generator - branch: release-1.11 - required-packages: - - k8s.io/code-generator - source: branch: release-1.12 dir: staging/src/k8s.io/apiextensions-apiserver @@ -496,18 +390,6 @@ rules: - destination: metrics library: true branches: - - source: - branch: release-1.11 - dir: staging/src/k8s.io/metrics - name: release-1.11 - go: 1.10.2 - dependencies: - - repository: apimachinery - branch: release-1.11 - - repository: api - branch: release-1.11 - - repository: client-go - branch: release-8.0 - source: branch: release-1.12 dir: staging/src/k8s.io/metrics From 867fa61f0cfcbf1875d58d50afc56f71a5031256 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 13 May 2019 08:41:02 -0400 Subject: [PATCH 131/194] github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 bump dependency to version preferred by our version of prometheus --- go.mod | 6 +--- go.sum | 4 +-- .../src/k8s.io/apiextensions-apiserver/go.mod | 1 - .../src/k8s.io/apiextensions-apiserver/go.sum | 4 +-- staging/src/k8s.io/apiserver/go.mod | 1 - staging/src/k8s.io/apiserver/go.sum | 4 +-- staging/src/k8s.io/component-base/go.mod | 1 - staging/src/k8s.io/component-base/go.sum | 4 +-- staging/src/k8s.io/kube-aggregator/go.mod | 1 - staging/src/k8s.io/kube-aggregator/go.sum | 4 +-- .../src/k8s.io/kube-controller-manager/go.mod | 1 - .../src/k8s.io/kube-controller-manager/go.sum | 2 +- staging/src/k8s.io/kube-proxy/go.mod | 1 - staging/src/k8s.io/kube-proxy/go.sum | 2 +- staging/src/k8s.io/kube-scheduler/go.mod | 1 - staging/src/k8s.io/kube-scheduler/go.sum | 2 +- .../src/k8s.io/legacy-cloud-providers/go.mod | 1 - .../src/k8s.io/legacy-cloud-providers/go.sum | 4 +-- staging/src/k8s.io/sample-apiserver/go.mod | 1 - staging/src/k8s.io/sample-apiserver/go.sum | 4 +-- .../beorn7/perks/quantile/stream.go | 34 ++++++++++++++++--- vendor/modules.txt | 2 +- 22 files changed, 48 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index f7e2599dd65..acd4a7d9249 100644 --- a/go.mod +++ b/go.mod @@ -218,7 +218,7 @@ replace ( github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.16.26 github.com/bazelbuild/bazel-gazelle => github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e github.com/bazelbuild/buildtools => github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a + github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/blang/semver => github.com/blang/semver v3.5.0+incompatible github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 github.com/cespare/prettybench => github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c @@ -355,7 +355,6 @@ replace ( github.com/pborman/uuid => github.com/pborman/uuid v1.2.0 github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0 github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible - github.com/philhofer/fwd => github.com/philhofer/fwd v1.0.0 github.com/pkg/errors => github.com/pkg/errors v0.8.0 github.com/pkg/sftp => github.com/pkg/sftp v0.0.0-20160930220758-4d0e916071f6 github.com/pmezard/go-difflib => github.com/pmezard/go-difflib v1.0.0 @@ -366,7 +365,6 @@ replace ( github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/quobyte/api => github.com/quobyte/api v0.1.2 - github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.0.0-20160922212217-09693a8743ba github.com/remyoudompheng/bigfft => github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 github.com/robfig/cron => github.com/robfig/cron v0.0.0-20170309132418-df38d32658d8 github.com/rubiojr/go-vhd => github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c @@ -389,9 +387,7 @@ replace ( github.com/stretchr/objx => github.com/stretchr/objx v0.1.1 github.com/stretchr/testify => github.com/stretchr/testify v1.2.2 github.com/syndtr/gocapability => github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4 - github.com/tinylib/msgp => github.com/tinylib/msgp v1.1.0 github.com/tmc/grpc-websocket-proxy => github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 - github.com/ugorji/go => github.com/ugorji/go v0.0.0-20171019201919-bdcc60b419d1 github.com/urfave/negroni => github.com/urfave/negroni v1.0.0 github.com/vishvananda/netlink => github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e github.com/vishvananda/netns => github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 diff --git a/go.sum b/go.sum index 23afc8f834d..35416ab20c2 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e h1:k7E/Rd github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e/go.mod h1:uHBSeeATKpVazAACZBDPL/Nk/UhQDDsJWDlqYJo8/Us= github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e h1:VuTBHPJNCQ88Okm9ld5SyLCvU50soWJYQYjQFdcDxew= github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index 8c30b0a10ed..1072d701460 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -38,7 +38,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 91060c9d90e..73a51fdeec6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -12,8 +12,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index 892c1db7052..58dad118855 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -70,7 +70,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 34c20ee5e7e..3db1319429a 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -10,8 +10,8 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVk github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index cb4544f0a18..d72e130ad12 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -17,7 +17,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index e81e5e716f1..70cb47ea1ff 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -1,5 +1,5 @@ -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index 7432f760afa..d5cfc6d1e94 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -27,7 +27,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index bb76f7010cb..c7f2831571b 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -10,8 +10,8 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVk github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index 7254db761ae..82ffa43b55d 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 2190fea5588..52c9218ed97 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -1,4 +1,4 @@ -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index 2bbe571516e..339b8eb0efb 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 2190fea5588..52c9218ed97 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -1,4 +1,4 @@ -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index ffcfe6f63e4..3f59fcfc37e 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 2190fea5588..52c9218ed97 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -1,4 +1,4 @@ -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod index f94355e0e9e..fd4e0953084 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.mod +++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod @@ -34,7 +34,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum index aa186ef2d71..1f0689dd625 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.sum +++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum @@ -8,8 +8,8 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20181220005116-f8e99590 github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20181220005116-f8e995905100/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/aws/aws-sdk-go v1.16.26 h1:GWkl3rkRO/JGRTWoLLIqwf7AWC4/W/1hMOUZqmX0js4= github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda h1:NyywMz59neOoVRFDz+ccfKWxn784fiHMDnZSy6T+JXY= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index c63ade6ac40..aafff806994 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -17,7 +17,6 @@ require ( ) replace ( - github.com/beorn7/perks => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index 024c7628207..6f93a29faa0 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -10,8 +10,8 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVk github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index f4cabd66956..d7d14f8eb63 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream { // is guaranteed to be within (Quantile±Epsilon). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + ƒ := func(s *stream, r float64) float64 { var m = math.MaxFloat64 var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) } if f < m { m = f @@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream { return newStream(ƒ) } +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + // Stream computes quantiles for a stream of float64s. It is not thread-safe by // design. Take care when using across multiple goroutines. type Stream struct { diff --git a/vendor/modules.txt b/vendor/modules.txt index 12ec4d50cc1..9ed0853bec4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -127,7 +127,7 @@ github.com/bazelbuild/buildtools/file github.com/bazelbuild/buildtools/lang github.com/bazelbuild/buildtools/tables github.com/bazelbuild/buildtools/wspace -# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 => github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a +# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 => github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible => github.com/blang/semver v3.5.0+incompatible github.com/blang/semver From ac2d38ffd8110a3df5a27ab1220963e534958ee6 Mon Sep 17 00:00:00 2001 From: Brandon Mabey Date: Mon, 13 May 2019 09:49:15 -0400 Subject: [PATCH 132/194] Fix error injection surface in FakeRuntimeService --- .../pkg/apis/testing/fake_runtime_service.go | 69 ++++++++++++++++++- 1 file changed, 66 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go index 95e16b56de7..4d9190a84e5 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go @@ -126,6 +126,7 @@ func (r *FakeRuntimeService) popError(f string) error { return nil } err, errs := errs[0], errs[1:] + r.Errors[f] = errs return err } @@ -144,6 +145,9 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResp defer r.Unlock() r.Called = append(r.Called, "Version") + if err := r.popError("Version"); err != nil { + return nil, err + } return &runtimeapi.VersionResponse{ Version: FakeVersion, @@ -158,6 +162,9 @@ func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { defer r.Unlock() r.Called = append(r.Called, "Status") + if err := r.popError("Status"); err != nil { + return nil, err + } return r.FakeStatus, nil } @@ -167,6 +174,9 @@ func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, defer r.Unlock() r.Called = append(r.Called, "RunPodSandbox") + if err := r.popError("RunPodSandbox"); err != nil { + return "", err + } // PodSandboxID should be randomized for real container runtime, but here just use // fixed name from BuildSandboxName() for easily making fake sandboxes. @@ -196,6 +206,9 @@ func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error { defer r.Unlock() r.Called = append(r.Called, "StopPodSandbox") + if err := r.popError("StopPodSandbox"); err != nil { + return err + } if s, ok := r.Sandboxes[podSandboxID]; ok { s.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY @@ -211,6 +224,9 @@ func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error { defer r.Unlock() r.Called = append(r.Called, "RemovePodSandbox") + if err := r.popError("RemovePodSandbox"); err != nil { + return err + } // Remove the pod sandbox delete(r.Sandboxes, podSandboxID) @@ -223,6 +239,9 @@ func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi. defer r.Unlock() r.Called = append(r.Called, "PodSandboxStatus") + if err := r.popError("PodSandboxStatus"); err != nil { + return nil, err + } s, ok := r.Sandboxes[podSandboxID] if !ok { @@ -238,6 +257,9 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) defer r.Unlock() r.Called = append(r.Called, "ListPodSandbox") + if err := r.popError("ListPodSandbox"); err != nil { + return nil, err + } result := make([]*runtimeapi.PodSandbox, 0) for id, s := range r.Sandboxes { @@ -272,6 +294,10 @@ func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runti defer r.Unlock() r.Called = append(r.Called, "PortForward") + if err := r.popError("PortForward"); err != nil { + return nil, err + } + return &runtimeapi.PortForwardResponse{}, nil } @@ -280,6 +306,9 @@ func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtim defer r.Unlock() r.Called = append(r.Called, "CreateContainer") + if err := r.popError("CreateContainer"); err != nil { + return "", err + } // ContainerID should be randomized for real container runtime, but here just use // fixed BuildContainerName() for easily making fake containers. @@ -309,6 +338,9 @@ func (r *FakeRuntimeService) StartContainer(containerID string) error { defer r.Unlock() r.Called = append(r.Called, "StartContainer") + if err := r.popError("StartContainer"); err != nil { + return err + } c, ok := r.Containers[containerID] if !ok { @@ -327,6 +359,9 @@ func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) er defer r.Unlock() r.Called = append(r.Called, "StopContainer") + if err := r.popError("StopContainer"); err != nil { + return err + } c, ok := r.Containers[containerID] if !ok { @@ -347,6 +382,9 @@ func (r *FakeRuntimeService) RemoveContainer(containerID string) error { defer r.Unlock() r.Called = append(r.Called, "RemoveContainer") + if err := r.popError("RemoveContainer"); err != nil { + return err + } // Remove the container delete(r.Containers, containerID) @@ -359,6 +397,9 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) defer r.Unlock() r.Called = append(r.Called, "ListContainers") + if err := r.popError("ListContainers"); err != nil { + return nil, err + } result := make([]*runtimeapi.Container, 0) for _, s := range r.Containers { @@ -398,6 +439,9 @@ func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.Co defer r.Unlock() r.Called = append(r.Called, "ContainerStatus") + if err := r.popError("ContainerStatus"); err != nil { + return nil, err + } c, ok := r.Containers[containerID] if !ok { @@ -413,7 +457,7 @@ func (r *FakeRuntimeService) UpdateContainerResources(string, *runtimeapi.LinuxC defer r.Unlock() r.Called = append(r.Called, "UpdateContainerResources") - return nil + return r.popError("UpdateContainerResources") } func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { @@ -421,7 +465,8 @@ func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout defer r.Unlock() r.Called = append(r.Called, "ExecSync") - return nil, nil, nil + err = r.popError("ExecSync") + return } func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { @@ -429,6 +474,10 @@ func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResp defer r.Unlock() r.Called = append(r.Called, "Exec") + if err := r.popError("Exec"); err != nil { + return nil, err + } + return &runtimeapi.ExecResponse{}, nil } @@ -437,11 +486,19 @@ func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi. defer r.Unlock() r.Called = append(r.Called, "Attach") + if err := r.popError("Attach"); err != nil { + return nil, err + } + return &runtimeapi.AttachResponse{}, nil } func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error { - return nil + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "UpdateRuntimeConfig") + return r.popError("UpdateRuntimeConfig") } func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*runtimeapi.ContainerStats) { @@ -459,6 +516,9 @@ func (r *FakeRuntimeService) ContainerStats(containerID string) (*runtimeapi.Con defer r.Unlock() r.Called = append(r.Called, "ContainerStats") + if err := r.popError("ContainerStats"); err != nil { + return nil, err + } s, found := r.FakeContainerStats[containerID] if !found { @@ -472,6 +532,9 @@ func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStat defer r.Unlock() r.Called = append(r.Called, "ListContainerStats") + if err := r.popError("ListContainerStats"); err != nil { + return nil, err + } var result []*runtimeapi.ContainerStats for _, c := range r.Containers { From 60e5717f4f9e346692bc895ce6e3566f434dc7ff Mon Sep 17 00:00:00 2001 From: Marian Lobur Date: Mon, 13 May 2019 16:27:25 +0200 Subject: [PATCH 133/194] Bump image of event-exporter. Image has a new base image that have some security issue fixes. --- cluster/addons/fluentd-gcp/event-exporter.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cluster/addons/fluentd-gcp/event-exporter.yaml b/cluster/addons/fluentd-gcp/event-exporter.yaml index 918742e0124..5554602d863 100644 --- a/cluster/addons/fluentd-gcp/event-exporter.yaml +++ b/cluster/addons/fluentd-gcp/event-exporter.yaml @@ -29,11 +29,11 @@ subjects: apiVersion: apps/v1 kind: Deployment metadata: - name: event-exporter-v0.2.4 + name: event-exporter-v0.2.5 namespace: kube-system labels: k8s-app: event-exporter - version: v0.2.4 + version: v0.2.5 kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: @@ -41,17 +41,17 @@ spec: selector: matchLabels: k8s-app: event-exporter - version: v0.2.4 + version: v0.2.5 template: metadata: labels: k8s-app: event-exporter - version: v0.2.4 + version: v0.2.5 spec: serviceAccountName: event-exporter-sa containers: - name: event-exporter - image: k8s.gcr.io/event-exporter:v0.2.4 + image: k8s.gcr.io/event-exporter:v0.2.5 command: - /event-exporter - -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }} From 91716989b697956d5cede5483f2ec9d5b2acb871 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 7 May 2019 16:09:16 -0500 Subject: [PATCH 134/194] pkg/proxy: add sig-network-approvers/sig-network-reviewers to OWNERS files This PR also adds m1093782566 (Jun Du) to sig-network-reviewers in recognition of his contributions to the proxy. --- OWNERS_ALIASES | 1 + pkg/proxy/OWNERS | 14 ++------------ pkg/proxy/config/OWNERS | 3 +-- pkg/proxy/healthcheck/OWNERS | 4 ---- pkg/proxy/iptables/OWNERS | 5 +---- pkg/proxy/ipvs/OWNERS | 5 ++--- pkg/proxy/userspace/OWNERS | 9 ++------- 7 files changed, 9 insertions(+), 32 deletions(-) delete mode 100644 pkg/proxy/healthcheck/OWNERS diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 81b85babbf8..fba827fe6b9 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -226,6 +226,7 @@ aliases: - thockin - rramkumar1 - cmluciano + - m1093782566 sig-apps-approvers: - kow3ns - janetkuo diff --git a/pkg/proxy/OWNERS b/pkg/proxy/OWNERS index 22480e625aa..c0d7663ba60 100644 --- a/pkg/proxy/OWNERS +++ b/pkg/proxy/OWNERS @@ -1,18 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- thockin -- matchstick +- sig-network-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- brendandburns -- vishh -- justinsb -- freehan -- dcbw -- m1093782566 -- danwinship +- sig-network-reviewers labels: - sig/network diff --git a/pkg/proxy/config/OWNERS b/pkg/proxy/config/OWNERS index c7e36e636c8..1a00bfbfb6d 100644 --- a/pkg/proxy/config/OWNERS +++ b/pkg/proxy/config/OWNERS @@ -1,10 +1,9 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin +- sig-network-reviewers - lavalamp - smarterclayton - brendandburns -- freehan labels: - sig/network diff --git a/pkg/proxy/healthcheck/OWNERS b/pkg/proxy/healthcheck/OWNERS deleted file mode 100644 index 6e357e0e915..00000000000 --- a/pkg/proxy/healthcheck/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- m1093782566 diff --git a/pkg/proxy/iptables/OWNERS b/pkg/proxy/iptables/OWNERS index ce99e97466e..5cda5bc17c8 100644 --- a/pkg/proxy/iptables/OWNERS +++ b/pkg/proxy/iptables/OWNERS @@ -1,11 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin +- sig-network-reviewers - smarterclayton - justinsb -- freehan -- dcbw -- danwinship labels: - sig/network diff --git a/pkg/proxy/ipvs/OWNERS b/pkg/proxy/ipvs/OWNERS index c49cd694eba..d1fb225a6c8 100644 --- a/pkg/proxy/ipvs/OWNERS +++ b/pkg/proxy/ipvs/OWNERS @@ -1,12 +1,11 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin +- sig-network-reviewers - brendandburns -- m1093782566 - Lion-Wei approvers: -- thockin +- sig-network-approvers - brendandburns - m1093782566 labels: diff --git a/pkg/proxy/userspace/OWNERS b/pkg/proxy/userspace/OWNERS index efdc686a265..7fb99c84d84 100644 --- a/pkg/proxy/userspace/OWNERS +++ b/pkg/proxy/userspace/OWNERS @@ -1,15 +1,10 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- thockin -- dcbw -- danwinship +- sig-network-approvers reviewers: -- thockin +- sig-network-reviewers - lavalamp - smarterclayton -- freehan -- dcbw -- danwinship labels: - sig/network From fe3c9c8b6fa01dfd1000d3a8aee9c6b1b3567446 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kevin=20Wiesm=C3=BCller?= Date: Mon, 13 May 2019 18:32:32 +0200 Subject: [PATCH 135/194] add test to make sure managedFields can be reset --- .../integration/apiserver/apply/apply_test.go | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index 9ead9155ed1..037b300188d 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -830,3 +830,59 @@ func TestApplyConvertsManagedFieldsVersion(t *testing.T) { t.Fatalf("expected:\n%v\nbut got:\n%v", expected, actual) } } + +// TestClearManagedFields verifies it's possible to clear the managedFields +func TestClearManagedFields(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + Namespace("default"). + Resource("configmaps"). + Name("test-cm"). + Param("fieldManager", "apply_test"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to create object using Apply patch: %v", err) + } + + _, err = client.CoreV1().RESTClient().Patch(types.MergePatchType). + Namespace("default"). + Resource("configmaps"). + Name("test-cm"). + Body([]byte(`{"metadata":{"managedFields": [{}]}}`)).Do().Get() + if err != nil { + t.Fatalf("Failed to patch object: %v", err) + } + + object, err := client.CoreV1().RESTClient().Get().Namespace("default").Resource("configmaps").Name("test-cm").Do().Get() + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + accessor, err := meta.Accessor(object) + if err != nil { + t.Fatalf("Failed to get meta accessor: %v", err) + } + + if managedFields := accessor.GetManagedFields(); len(managedFields) != 0 { + t.Fatalf("Failed to clear managedFields, got: %v", managedFields) + } +} From 79b46f47f638e143df024f9bef10264eb7d2dc03 Mon Sep 17 00:00:00 2001 From: stgleb Date: Mon, 13 May 2019 20:33:37 +0300 Subject: [PATCH 136/194] Fix typo in IPVS acronym --- cmd/kubeadm/app/preflight/checks.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 93c223f667a..0586e68b566 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -1008,7 +1008,7 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.JoinConfigura func RunOptionalJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.ClusterConfiguration, ignorePreflightErrors sets.String) error { checks := []Checker{} - // Check if IVPS kube-proxy mode is supported + // Check if IPVS kube-proxy mode is supported if cfg.ComponentConfigs.KubeProxy != nil && cfg.ComponentConfigs.KubeProxy.Mode == ipvsutil.IPVSProxyMode { checks = append(checks, IPVSProxierCheck{exec: execer}) } From 9418affa4df2ca15d7ba67507b74c443a8c281d3 Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Tue, 14 May 2019 03:11:19 +0800 Subject: [PATCH 137/194] add comment on rev history length limit --- pkg/controller/deployment/sync.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index a92904dd3d5..b0a6e7e3f14 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -125,6 +125,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deploym } const ( + // limit revision history length to 100 element (~2000 chars) maxRevHistoryLengthInChars = 2000 ) From ee215ba705500c864f207668a0ba1309e0880cfb Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 13 May 2019 10:58:54 -0400 Subject: [PATCH 138/194] Graceful custom resource storage teardown --- .../pkg/apiserver/BUILD | 3 + .../pkg/apiserver/apiserver.go | 1 + .../pkg/apiserver/customresource_handler.go | 88 ++++++++---- .../test/integration/BUILD | 1 + .../test/integration/change_test.go | 125 ++++++++++++++++++ 5 files changed, 195 insertions(+), 23 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index d624136ac96..60686c95c91 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -53,7 +53,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", @@ -68,6 +70,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go index 1a1496ec8f6..9883471c647 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go @@ -187,6 +187,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) c.ExtraConfig.AuthResolverWrapper, c.ExtraConfig.MasterCount, s.GenericAPIServer.Authorizer, + c.GenericConfig.RequestTimeout, ) if err != nil { return nil, err diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index a06070c5724..0cfbe187295 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -52,6 +52,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/versioning" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/handlers" @@ -62,6 +64,7 @@ import ( "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/storage/storagebackend" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/webhook" @@ -100,6 +103,9 @@ type crdHandler struct { // so that we can do create on update. authorizer authorizer.Authorizer + + // request timeout we should delay storage teardown for + requestTimeout time.Duration } // crdInfo stores enough information to serve the storage for the custom resource @@ -123,6 +129,8 @@ type crdInfo struct { // storageVersion is the CRD version used when storing the object in etcd. storageVersion string + + waitGroup *utilwaitgroup.SafeWaitGroup } // crdStorageMap goes from customresourcedefinition to its storage @@ -139,7 +147,8 @@ func NewCustomResourceDefinitionHandler( serviceResolver webhook.ServiceResolver, authResolverWrapper webhook.AuthenticationInfoResolverWrapper, masterCount int, - authorizer authorizer.Authorizer) (*crdHandler, error) { + authorizer authorizer.Authorizer, + requestTimeout time.Duration) (*crdHandler, error) { ret := &crdHandler{ versionDiscoveryHandler: versionDiscoveryHandler, groupDiscoveryHandler: groupDiscoveryHandler, @@ -151,6 +160,7 @@ func NewCustomResourceDefinitionHandler( establishingController: establishingController, masterCount: masterCount, authorizer: authorizer, + requestTimeout: requestTimeout, } crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: ret.updateCustomResourceDefinition, @@ -169,6 +179,11 @@ func NewCustomResourceDefinitionHandler( return ret, nil } +// watches are expected to handle storage disruption gracefully, +// both on the server-side (by terminating the watch connection) +// and on the client side (by restarting the watch) +var longRunningFilter = genericfilters.BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString()) + func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { ctx := req.Context() requestInfo, ok := apirequest.RequestInfoFrom(ctx) @@ -238,7 +253,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { supportedTypes = append(supportedTypes, string(types.ApplyPatchType)) } - var handler http.HandlerFunc + var handlerFunc http.HandlerFunc subresources, err := apiextensions.GetSubresourcesForVersion(crd, requestInfo.APIVersion) if err != nil { utilruntime.HandleError(err) @@ -247,18 +262,19 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } switch { case subresource == "status" && subresources != nil && subresources.Status != nil: - handler = r.serveStatus(w, req, requestInfo, crdInfo, terminating, supportedTypes) + handlerFunc = r.serveStatus(w, req, requestInfo, crdInfo, terminating, supportedTypes) case subresource == "scale" && subresources != nil && subresources.Scale != nil: - handler = r.serveScale(w, req, requestInfo, crdInfo, terminating, supportedTypes) + handlerFunc = r.serveScale(w, req, requestInfo, crdInfo, terminating, supportedTypes) case len(subresource) == 0: - handler = r.serveResource(w, req, requestInfo, crdInfo, terminating, supportedTypes) + handlerFunc = r.serveResource(w, req, requestInfo, crdInfo, terminating, supportedTypes) default: http.Error(w, "the server could not find the requested resource", http.StatusNotFound) } - if handler != nil { - handler = metrics.InstrumentHandlerFunc(verb, requestInfo.APIGroup, requestInfo.APIVersion, resource, subresource, scope, metrics.APIServerComponent, handler) - handler(w, req) + if handlerFunc != nil { + handlerFunc = metrics.InstrumentHandlerFunc(verb, requestInfo.APIGroup, requestInfo.APIVersion, resource, subresource, scope, metrics.APIServerComponent, handlerFunc) + handler := genericfilters.WithWaitGroup(handlerFunc, longRunningFilter, crdInfo.waitGroup) + handler.ServeHTTP(w, req) return } } @@ -365,18 +381,18 @@ func (r *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) klog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) - // Copy because we cannot write to storageMap without a race - // as it is used without locking elsewhere. - storageMap2 := storageMap.clone() - if oldInfo, ok := storageMap2[types.UID(oldCRD.UID)]; ok { - for _, storage := range oldInfo.storages { - // destroy only the main storage. Those for the subresources share cacher and etcd clients. - storage.CustomResource.DestroyFunc() - } - delete(storageMap2, types.UID(oldCRD.UID)) - } + if oldInfo, ok := storageMap[types.UID(oldCRD.UID)]; ok { + // Copy because we cannot write to storageMap without a race + // as it is used without locking elsewhere. + storageMap2 := storageMap.clone() - r.customStorage.Store(storageMap2) + // Remove from the CRD info map and store the map + delete(storageMap2, types.UID(oldCRD.UID)) + r.customStorage.Store(storageMap2) + + // Tear down the old storage + go r.tearDown(oldInfo) + } } // removeDeadStorage removes REST storage that isn't being used @@ -390,6 +406,7 @@ func (r *crdHandler) removeDeadStorage() { r.customStorageLock.Lock() defer r.customStorageLock.Unlock() + oldInfos := []*crdInfo{} storageMap := r.customStorage.Load().(crdStorageMap) // Copy because we cannot write to storageMap without a race // as it is used without locking elsewhere @@ -404,14 +421,38 @@ func (r *crdHandler) removeDeadStorage() { } if !found { klog.V(4).Infof("Removing dead CRD storage for %s/%s", s.spec.Group, s.spec.Names.Kind) - for _, storage := range s.storages { - // destroy only the main storage. Those for the subresources share cacher and etcd clients. - storage.CustomResource.DestroyFunc() - } + oldInfos = append(oldInfos, s) delete(storageMap2, uid) } } r.customStorage.Store(storageMap2) + + for _, s := range oldInfos { + go r.tearDown(s) + } +} + +// Wait up to a minute for requests to drain, then tear down storage +func (r *crdHandler) tearDown(oldInfo *crdInfo) { + requestsDrained := make(chan struct{}) + go func() { + defer close(requestsDrained) + // Allow time for in-flight requests with a handle to the old info to register themselves + time.Sleep(time.Second) + // Wait for in-flight requests to drain + oldInfo.waitGroup.Wait() + }() + + select { + case <-time.After(r.requestTimeout * 2): + klog.Warningf("timeout waiting for requests to drain for %s/%s, tearing down storage", oldInfo.spec.Group, oldInfo.spec.Names.Kind) + case <-requestsDrained: + } + + for _, storage := range oldInfo.storages { + // destroy only the main storage. Those for the subresources share cacher and etcd clients. + storage.CustomResource.DestroyFunc() + } } // GetCustomResourceListerCollectionDeleter returns the ListerCollectionDeleter of @@ -622,6 +663,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResource scaleRequestScopes: scaleScopes, statusRequestScopes: statusScopes, storageVersion: storageVersion, + waitGroup: &utilwaitgroup.SafeWaitGroup{}, } // Copy because we cannot write to storageMap without a race diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index 546aa4920fe..eebb1c7511b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -11,6 +11,7 @@ go_test( srcs = [ "apply_test.go", "basic_test.go", + "change_test.go", "finalization_test.go", "objectmeta_test.go", "registration_test.go", diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go new file mode 100644 index 00000000000..554a1c80691 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "sync" + "testing" + "time" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apiextensions-apiserver/test/integration/fixtures" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" +) + +func TestChangeCRD(t *testing.T) { + tearDown, config, _, err := fixtures.StartDefaultServer(t) + if err != nil { + t.Fatal(err) + } + defer tearDown() + config.QPS = 1000 + config.Burst = 1000 + apiExtensionsClient, err := clientset.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + + noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) + noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionsClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + ns := "default" + noxuNamespacedResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1beta1") + + stopChan := make(chan struct{}) + + wg := &sync.WaitGroup{} + + // Set up loop to modify CRD in the background + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-stopChan: + return + default: + } + + noxuDefinitionToUpdate, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(noxuDefinitionToUpdate.Spec.Versions) == 1 { + v2 := noxuDefinitionToUpdate.Spec.Versions[0] + v2.Name = "v2" + v2.Served = true + v2.Storage = false + noxuDefinitionToUpdate.Spec.Versions = append(noxuDefinitionToUpdate.Spec.Versions, v2) + } else { + noxuDefinitionToUpdate.Spec.Versions = noxuDefinitionToUpdate.Spec.Versions[0:1] + } + if _, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(noxuDefinitionToUpdate); err != nil && !apierrors.IsConflict(err) { + t.Fatal(err) + } + time.Sleep(10 * time.Millisecond) + } + }() + + // Set up 100 loops creating and reading custom resources + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + noxuInstanceToCreate := fixtures.NewNoxuInstance(ns, fmt.Sprintf("foo-%d", i)) + if _, err := noxuNamespacedResourceClient.Create(noxuInstanceToCreate, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + for { + select { + case <-stopChan: + return + default: + if _, err := noxuNamespacedResourceClient.Get(noxuInstanceToCreate.GetName(), metav1.GetOptions{}); err != nil { + t.Fatal(err) + } + } + time.Sleep(10 * time.Millisecond) + } + }(i) + } + + // Let all the established get request loops soak + time.Sleep(5 * time.Second) + + // Tear down + close(stopChan) + + // Let loops drain + wg.Wait() +} From 66086c32cfbb01ce70bdfcd8c57ca07e98f33bf5 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 9 May 2019 11:35:36 -0700 Subject: [PATCH 139/194] Bump cluster-proportional-vertical-autoscaler to 0.7.1 --- .../calico-node-vertical-autoscaler-deployment.yaml | 2 +- .../typha-vertical-autoscaler-deployment.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml index 9d07ad2c375..fc6fab3dd0f 100644 --- a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml @@ -21,7 +21,7 @@ spec: spec: priorityClassName: system-cluster-critical containers: - - image: k8s.gcr.io/cpvpa-amd64:v0.6.0 + - image: k8s.gcr.io/cpvpa-amd64:v0.7.1 name: autoscaler command: - /cpvpa diff --git a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml index 8fecf5860df..0e4f22355a1 100644 --- a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml @@ -21,7 +21,7 @@ spec: spec: priorityClassName: system-cluster-critical containers: - - image: k8s.gcr.io/cpvpa-amd64:v0.6.0 + - image: k8s.gcr.io/cpvpa-amd64:v0.7.1 name: autoscaler command: - /cpvpa From d014591a11198e3eec905cb3d64141ead83ae359 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 10 May 2019 19:48:55 +0200 Subject: [PATCH 140/194] apiextensions: forbid false x-kubernetes-preserve-unknown-fields --- .../pkg/apis/apiextensions/deepcopy.go | 10 ++++++ .../apis/apiextensions/types_jsonschema.go | 3 +- .../apis/apiextensions/v1beta1/deepcopy.go | 10 ++++++ .../apiextensions/v1beta1/generated.proto | 1 + .../apiextensions/v1beta1/types_jsonschema.go | 3 +- .../pkg/apis/apiextensions/validation/BUILD | 1 + .../apiextensions/validation/validation.go | 4 +++ .../validation/validation_test.go | 36 +++++++++++++++++++ .../pkg/apiserver/schema/convert.go | 18 +++++++--- .../pkg/apiserver/schema/structural.go | 2 ++ .../pkg/apiserver/validation/validation.go | 4 +-- 11 files changed, 83 insertions(+), 9 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go index 37b4d1df9fe..51fb72df3cf 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go @@ -258,5 +258,15 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return out } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index e0cba964731..78223934628 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -61,7 +61,8 @@ type JSONSchemaProps struct { // in the validation schema. This affects fields recursively, // but switches back to normal pruning behaviour if nested // properties or additionalProperties are specified in the schema. - XPreserveUnknownFields bool + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool // x-kubernetes-embedded-resource defines that the value is an // embedded Kubernetes runtime.Object, with TypeMeta and diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go index f6a114e2b39..f67f4418125 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go @@ -234,5 +234,15 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return out } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index a0c23a44f56..8961120d4cb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -449,6 +449,7 @@ message JSONSchemaProps { // in the validation schema. This affects fields recursively, // but switches back to normal pruning behaviour if nested // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. optional bool xKubernetesPreserveUnknownFields = 38; // x-kubernetes-embedded-resource defines that the value is an diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 84f26e600af..da5e857f110 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -61,7 +61,8 @@ type JSONSchemaProps struct { // in the validation schema. This affects fields recursively, // but switches back to normal pruning behaviour if nested // properties or additionalProperties are specified in the schema. - XPreserveUnknownFields bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` // x-kubernetes-embedded-resource defines that the value is an // embedded Kubernetes runtime.Object, with TypeMeta and diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD index 5f974a41f5e..319042d44d5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD @@ -34,6 +34,7 @@ go_test( "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index e91de0eaa88..2950ffaae27 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -673,6 +673,10 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch } } + if schema.XPreserveUnknownFields != nil && *schema.XPreserveUnknownFields == false { + allErrs = append(allErrs, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), *schema.XPreserveUnknownFields, "must be true or undefined")) + } + return allErrs } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go index 33387c6af3a..babd1c309bc 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go @@ -22,6 +22,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/pointer" ) type validationMatch struct { @@ -935,6 +936,41 @@ func TestValidateCustomResourceDefinition(t *testing.T) { invalid("spec", "versions"), }, }, + { + name: "x-kubernetes-preserve-unknown-field: false", + resource: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Version: "version", + Validation: &apiextensions.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensions.JSONSchemaProps{ + XPreserveUnknownFields: pointer.BoolPtr(false), + }, + }, + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.NamespaceScoped, + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "Plural", + ListKind: "PluralList", + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{"version"}, + }, + }, + errors: []validationMatch{ + invalid("spec", "validation", "openAPIV3Schema", "x-kubernetes-preserve-unknown-fields"), + }, + }, } for _, tc := range tests { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go index 2ed71b2618c..388fb0a47e1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go @@ -241,11 +241,19 @@ func newExtensions(s *apiextensions.JSONSchemaProps) (*Extensions, error) { return nil, nil } - return &Extensions{ - XPreserveUnknownFields: s.XPreserveUnknownFields, - XEmbeddedResource: s.XEmbeddedResource, - XIntOrString: s.XIntOrString, - }, nil + ret := &Extensions{ + XEmbeddedResource: s.XEmbeddedResource, + XIntOrString: s.XIntOrString, + } + + if s.XPreserveUnknownFields != nil { + if !*s.XPreserveUnknownFields { + return nil, fmt.Errorf("'x-kubernetes-preserve-unknown-fields' must be true or undefined") + } + ret.XPreserveUnknownFields = true + } + + return ret, nil } // validateUnsupportedFields checks that those fields rejected by validation are actually unset. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go index 996336c7dc7..a060644a7cd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go @@ -66,6 +66,8 @@ type Extensions struct { // in the validation schema. This affects fields recursively, // but switches back to normal pruning behaviour if nested // properties or additionalProperties are specified in the schema. + // False means that the pruning behaviour is inherited from the parent. + // False does not mean to activate pruning. XPreserveUnknownFields bool // x-kubernetes-embedded-resource defines that the value is an diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go index 00a8ac93c1b..039c37b1246 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go @@ -195,8 +195,8 @@ func ConvertJSONSchemaPropsWithPostProcess(in *apiextensions.JSONSchemaProps, ou } } - if in.XPreserveUnknownFields { - out.VendorExtensible.AddExtension("x-kubernetes-preserve-unknown-fields", true) + if in.XPreserveUnknownFields != nil { + out.VendorExtensible.AddExtension("x-kubernetes-preserve-unknown-fields", *in.XPreserveUnknownFields) } if in.XEmbeddedResource { out.VendorExtensible.AddExtension("x-kubernetes-embedded-resource", true) From 69c50a70f64fb7e239f5270b9abcdc21687bd665 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 10 May 2019 19:53:59 +0200 Subject: [PATCH 141/194] Update generated files --- api/openapi-spec/swagger.json | 2 +- .../apiextensions/v1beta1/generated.pb.go | 389 +++++++++--------- .../v1beta1/zz_generated.conversion.go | 4 +- 3 files changed, 200 insertions(+), 195 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 08d6edc1c84..93373c59d42 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -16822,7 +16822,7 @@ "type": "boolean" }, "x-kubernetes-preserve-unknown-fields": { - "description": "x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema.", + "description": "x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.", "type": "boolean" } }, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index c928a97d2a6..0be4a2ea828 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -1431,16 +1431,18 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0 } i++ - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x2 - i++ - if m.XPreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.XPreserveUnknownFields != nil { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x2 + i++ + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - i++ dAtA[i] = 0xb8 i++ dAtA[i] = 0x2 @@ -2110,7 +2112,9 @@ func (m *JSONSchemaProps) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } n += 3 - n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } n += 3 n += 3 return n @@ -2511,7 +2515,7 @@ func (this *JSONSchemaProps) String() string { `ExternalDocs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalDocs), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, `Example:` + strings.Replace(fmt.Sprintf("%v", this.Example), "JSON", "JSON", 1) + `,`, `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, - `XPreserveUnknownFields:` + fmt.Sprintf("%v", this.XPreserveUnknownFields) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, `}`, @@ -6658,7 +6662,8 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { break } } - m.XPreserveUnknownFields = bool(v != 0) + b := bool(v != 0) + m.XPreserveUnknownFields = &b case 39: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) @@ -7463,184 +7468,184 @@ func init() { var fileDescriptorGenerated = []byte{ // 2884 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, - 0x15, 0xf7, 0xac, 0xb4, 0xd2, 0xaa, 0x25, 0x59, 0x52, 0xdb, 0x56, 0xc6, 0x8a, 0xb3, 0x2b, 0x6f, - 0x48, 0x10, 0xc1, 0x5e, 0x25, 0x26, 0x21, 0x21, 0x55, 0x1c, 0xb4, 0x92, 0x92, 0x52, 0x62, 0x7d, - 0xd0, 0x6b, 0x27, 0x86, 0x7c, 0xb6, 0x76, 0x5a, 0xab, 0xb1, 0xe6, 0xcb, 0xdd, 0x33, 0x2b, 0xa9, - 0x02, 0x14, 0x1f, 0x95, 0x82, 0xa2, 0x80, 0x50, 0x24, 0x17, 0x0a, 0x38, 0x04, 0x8a, 0x0b, 0x07, - 0x38, 0xc0, 0x0d, 0xfe, 0x80, 0x1c, 0x53, 0x9c, 0x72, 0xa0, 0xb6, 0xf0, 0xe6, 0x2f, 0xa0, 0x8a, - 0x2a, 0xaa, 0x74, 0xa2, 0xfa, 0x63, 0x7a, 0x66, 0x67, 0x77, 0x6d, 0x57, 0xbc, 0x1b, 0x73, 0xd3, - 0xbc, 0xaf, 0xdf, 0xeb, 0xd7, 0xaf, 0x5f, 0xbf, 0x7e, 0x2b, 0xb0, 0x77, 0xf0, 0x1c, 0xab, 0xd8, - 0xfe, 0xf2, 0x41, 0xb4, 0x4b, 0xa8, 0x47, 0x42, 0xc2, 0x96, 0x9b, 0xc4, 0xb3, 0x7c, 0xba, 0xac, - 0x18, 0x38, 0xb0, 0xc9, 0x51, 0x48, 0x3c, 0x66, 0xfb, 0x1e, 0xbb, 0x8c, 0x03, 0x9b, 0x11, 0xda, - 0x24, 0x74, 0x39, 0x38, 0x68, 0x70, 0x1e, 0xeb, 0x14, 0x58, 0x6e, 0x3e, 0xb5, 0x4b, 0x42, 0xfc, - 0xd4, 0x72, 0x83, 0x78, 0x84, 0xe2, 0x90, 0x58, 0x95, 0x80, 0xfa, 0xa1, 0x0f, 0xbf, 0x2e, 0xcd, - 0x55, 0x3a, 0xa4, 0xdf, 0xd2, 0xe6, 0x2a, 0xc1, 0x41, 0x83, 0xf3, 0x58, 0xa7, 0x40, 0x45, 0x99, - 0x5b, 0xb8, 0xdc, 0xb0, 0xc3, 0xfd, 0x68, 0xb7, 0x52, 0xf7, 0xdd, 0xe5, 0x86, 0xdf, 0xf0, 0x97, - 0x85, 0xd5, 0xdd, 0x68, 0x4f, 0x7c, 0x89, 0x0f, 0xf1, 0x97, 0x44, 0x5b, 0x78, 0x3a, 0x71, 0xde, - 0xc5, 0xf5, 0x7d, 0xdb, 0x23, 0xf4, 0x38, 0xf1, 0xd8, 0x25, 0x21, 0x5e, 0x6e, 0x76, 0xf9, 0xb8, - 0xb0, 0xdc, 0x4f, 0x8b, 0x46, 0x5e, 0x68, 0xbb, 0xa4, 0x4b, 0xe1, 0xab, 0x77, 0x53, 0x60, 0xf5, - 0x7d, 0xe2, 0xe2, 0xac, 0x5e, 0xf9, 0xc4, 0x00, 0x73, 0xab, 0xbe, 0xd7, 0x24, 0x94, 0xaf, 0x12, - 0x91, 0x5b, 0x11, 0x61, 0x21, 0xac, 0x82, 0x91, 0xc8, 0xb6, 0x4c, 0x63, 0xd1, 0x58, 0x9a, 0xa8, - 0x3e, 0xf9, 0x51, 0xab, 0x74, 0xaa, 0xdd, 0x2a, 0x8d, 0x5c, 0xdf, 0x58, 0x3b, 0x69, 0x95, 0x2e, - 0xf6, 0x43, 0x0a, 0x8f, 0x03, 0xc2, 0x2a, 0xd7, 0x37, 0xd6, 0x10, 0x57, 0x86, 0x2f, 0x82, 0x39, - 0x8b, 0x30, 0x9b, 0x12, 0x6b, 0x65, 0x67, 0xe3, 0x15, 0x69, 0xdf, 0xcc, 0x09, 0x8b, 0xe7, 0x95, - 0xc5, 0xb9, 0xb5, 0xac, 0x00, 0xea, 0xd6, 0x81, 0x37, 0xc0, 0xb8, 0xbf, 0x7b, 0x93, 0xd4, 0x43, - 0x66, 0x8e, 0x2c, 0x8e, 0x2c, 0x4d, 0x5e, 0xb9, 0x5c, 0x49, 0x76, 0x50, 0xbb, 0x20, 0xb6, 0x4d, - 0x2d, 0xb6, 0x82, 0xf0, 0xe1, 0x7a, 0xbc, 0x73, 0xd5, 0x19, 0x85, 0x36, 0xbe, 0x2d, 0xad, 0xa0, - 0xd8, 0x5c, 0xf9, 0xf7, 0x39, 0x00, 0xd3, 0x8b, 0x67, 0x81, 0xef, 0x31, 0x32, 0x90, 0xd5, 0x33, - 0x30, 0x5b, 0x17, 0x96, 0x43, 0x62, 0x29, 0x5c, 0x33, 0xf7, 0x59, 0xbc, 0x37, 0x15, 0xfe, 0xec, - 0x6a, 0xc6, 0x1c, 0xea, 0x02, 0x80, 0xd7, 0xc0, 0x18, 0x25, 0x2c, 0x72, 0x42, 0x73, 0x64, 0xd1, - 0x58, 0x9a, 0xbc, 0x72, 0xa9, 0x2f, 0x94, 0xc8, 0x6f, 0x9e, 0x7c, 0x95, 0xe6, 0x53, 0x95, 0x5a, - 0x88, 0xc3, 0x88, 0x55, 0x4f, 0x2b, 0xa4, 0x31, 0x24, 0x6c, 0x20, 0x65, 0xab, 0xfc, 0xe3, 0x1c, - 0x98, 0x4d, 0x47, 0xa9, 0x69, 0x93, 0x43, 0x78, 0x08, 0xc6, 0xa9, 0x4c, 0x16, 0x11, 0xa7, 0xc9, - 0x2b, 0x3b, 0x95, 0xfb, 0x3a, 0x56, 0x95, 0xae, 0x24, 0xac, 0x4e, 0xf2, 0x3d, 0x53, 0x1f, 0x28, - 0x46, 0x83, 0xef, 0x80, 0x02, 0x55, 0x1b, 0x25, 0xb2, 0x69, 0xf2, 0xca, 0x37, 0x06, 0x88, 0x2c, - 0x0d, 0x57, 0xa7, 0xda, 0xad, 0x52, 0x21, 0xfe, 0x42, 0x1a, 0xb0, 0xfc, 0x7e, 0x0e, 0x14, 0x57, - 0x23, 0x16, 0xfa, 0x2e, 0x22, 0xcc, 0x8f, 0x68, 0x9d, 0xac, 0xfa, 0x4e, 0xe4, 0x7a, 0x6b, 0x64, - 0xcf, 0xf6, 0xec, 0x90, 0x67, 0xeb, 0x22, 0x18, 0xf5, 0xb0, 0x4b, 0x54, 0xf6, 0x4c, 0xa9, 0x98, - 0x8e, 0x6e, 0x61, 0x97, 0x20, 0xc1, 0xe1, 0x12, 0x3c, 0x59, 0xd4, 0x59, 0xd0, 0x12, 0xd7, 0x8e, - 0x03, 0x82, 0x04, 0x07, 0x3e, 0x0e, 0xc6, 0xf6, 0x7c, 0xea, 0x62, 0xb9, 0x8f, 0x13, 0xc9, 0xce, - 0xbc, 0x20, 0xa8, 0x48, 0x71, 0xe1, 0x33, 0x60, 0xd2, 0x22, 0xac, 0x4e, 0xed, 0x80, 0x43, 0x9b, - 0xa3, 0x42, 0xf8, 0x8c, 0x12, 0x9e, 0x5c, 0x4b, 0x58, 0x28, 0x2d, 0x07, 0x2f, 0x81, 0x42, 0x40, - 0x6d, 0x9f, 0xda, 0xe1, 0xb1, 0x99, 0x5f, 0x34, 0x96, 0xf2, 0xd5, 0x59, 0xa5, 0x53, 0xd8, 0x51, - 0x74, 0xa4, 0x25, 0xe0, 0x22, 0x28, 0xbc, 0x54, 0xdb, 0xde, 0xda, 0xc1, 0xe1, 0xbe, 0x39, 0x26, - 0x10, 0x46, 0xb9, 0x34, 0x2a, 0xdc, 0x54, 0xd4, 0xf2, 0x3f, 0x73, 0xc0, 0xcc, 0x46, 0x25, 0x0e, - 0x29, 0x7c, 0x01, 0x14, 0x58, 0xc8, 0x2b, 0x4e, 0xe3, 0x58, 0xc5, 0xe4, 0x89, 0x18, 0xac, 0xa6, - 0xe8, 0x27, 0xad, 0xd2, 0x7c, 0xa2, 0x11, 0x53, 0x45, 0x3c, 0xb4, 0x2e, 0xfc, 0xad, 0x01, 0xce, - 0x1c, 0x92, 0xdd, 0x7d, 0xdf, 0x3f, 0x58, 0x75, 0x6c, 0xe2, 0x85, 0xab, 0xbe, 0xb7, 0x67, 0x37, - 0x54, 0x0e, 0xa0, 0xfb, 0xcc, 0x81, 0x57, 0xbb, 0x2d, 0x57, 0x1f, 0x6a, 0xb7, 0x4a, 0x67, 0x7a, - 0x30, 0x50, 0x2f, 0x3f, 0xe0, 0x0d, 0x60, 0xd6, 0x33, 0x87, 0x44, 0x15, 0x30, 0x59, 0xb6, 0x26, - 0xaa, 0x17, 0xda, 0xad, 0x92, 0xb9, 0xda, 0x47, 0x06, 0xf5, 0xd5, 0x2e, 0xff, 0x70, 0x24, 0x1b, - 0xde, 0x54, 0xba, 0xbd, 0x0d, 0x0a, 0xfc, 0x18, 0x5b, 0x38, 0xc4, 0xea, 0x20, 0x3e, 0x79, 0x6f, - 0x87, 0x5e, 0xd6, 0x8c, 0x4d, 0x12, 0xe2, 0x2a, 0x54, 0x1b, 0x02, 0x12, 0x1a, 0xd2, 0x56, 0xe1, - 0x77, 0xc0, 0x28, 0x0b, 0x48, 0x5d, 0x05, 0xfa, 0xb5, 0xfb, 0x3d, 0x6c, 0x7d, 0x16, 0x52, 0x0b, - 0x48, 0x3d, 0x39, 0x0b, 0xfc, 0x0b, 0x09, 0x58, 0xf8, 0xae, 0x01, 0xc6, 0x98, 0x28, 0x50, 0xaa, - 0xa8, 0xbd, 0x31, 0x2c, 0x0f, 0x32, 0x55, 0x50, 0x7e, 0x23, 0x05, 0x5e, 0xfe, 0x4f, 0x0e, 0x5c, - 0xec, 0xa7, 0xba, 0xea, 0x7b, 0x96, 0xdc, 0x8e, 0x0d, 0x75, 0xb6, 0x65, 0xa6, 0x3f, 0x93, 0x3e, - 0xdb, 0x27, 0xad, 0xd2, 0x63, 0x77, 0x35, 0x90, 0x2a, 0x02, 0x5f, 0xd3, 0xeb, 0x96, 0x85, 0xe2, - 0x62, 0xa7, 0x63, 0x27, 0xad, 0xd2, 0x8c, 0x56, 0xeb, 0xf4, 0x15, 0x36, 0x01, 0x74, 0x30, 0x0b, - 0xaf, 0x51, 0xec, 0x31, 0x69, 0xd6, 0x76, 0x89, 0x0a, 0xdf, 0x13, 0xf7, 0x96, 0x1e, 0x5c, 0xa3, - 0xba, 0xa0, 0x20, 0xe1, 0xd5, 0x2e, 0x6b, 0xa8, 0x07, 0x02, 0xaf, 0x5b, 0x94, 0x60, 0xa6, 0x4b, - 0x51, 0xea, 0x46, 0xe1, 0x54, 0xa4, 0xb8, 0xf0, 0x4b, 0x60, 0xdc, 0x25, 0x8c, 0xe1, 0x06, 0x11, - 0xf5, 0x67, 0x22, 0xb9, 0xa2, 0x37, 0x25, 0x19, 0xc5, 0x7c, 0xde, 0x9f, 0x5c, 0xe8, 0x17, 0xb5, - 0xab, 0x36, 0x0b, 0xe1, 0xeb, 0x5d, 0x07, 0xa0, 0x72, 0x6f, 0x2b, 0xe4, 0xda, 0x22, 0xfd, 0x75, - 0xf1, 0x8b, 0x29, 0xa9, 0xe4, 0xff, 0x36, 0xc8, 0xdb, 0x21, 0x71, 0xe3, 0xbb, 0xfb, 0xd5, 0x21, - 0xe5, 0x5e, 0x75, 0x5a, 0xf9, 0x90, 0xdf, 0xe0, 0x68, 0x48, 0x82, 0x96, 0xff, 0x90, 0x03, 0x8f, - 0xf4, 0x53, 0xe1, 0x17, 0x0a, 0xe3, 0x11, 0x0f, 0x9c, 0x88, 0x62, 0x47, 0x65, 0x9c, 0x8e, 0xf8, - 0x8e, 0xa0, 0x22, 0xc5, 0xe5, 0x25, 0x9f, 0xd9, 0x5e, 0x23, 0x72, 0x30, 0x55, 0xe9, 0xa4, 0x57, - 0x5d, 0x53, 0x74, 0xa4, 0x25, 0x60, 0x05, 0x00, 0xb6, 0xef, 0xd3, 0x50, 0x60, 0xa8, 0xea, 0x75, - 0x9a, 0x17, 0x88, 0x9a, 0xa6, 0xa2, 0x94, 0x04, 0xbf, 0xd1, 0x0e, 0x6c, 0xcf, 0x52, 0xbb, 0xae, - 0x4f, 0xf1, 0xcb, 0xb6, 0x67, 0x21, 0xc1, 0xe1, 0xf8, 0x8e, 0xcd, 0x42, 0x4e, 0x51, 0x5b, 0xde, - 0x11, 0x75, 0x21, 0xa9, 0x25, 0x38, 0x7e, 0x9d, 0x57, 0x7d, 0x9f, 0xda, 0x84, 0x99, 0x63, 0x09, - 0xfe, 0xaa, 0xa6, 0xa2, 0x94, 0x44, 0xf9, 0xd7, 0x85, 0xfe, 0x49, 0xc2, 0x4b, 0x09, 0x7c, 0x14, - 0xe4, 0x1b, 0xd4, 0x8f, 0x02, 0x15, 0x25, 0x1d, 0xed, 0x17, 0x39, 0x11, 0x49, 0x1e, 0xcf, 0xca, - 0x66, 0x47, 0x9b, 0xaa, 0xb3, 0x32, 0x6e, 0x4e, 0x63, 0x3e, 0xfc, 0xbe, 0x01, 0xf2, 0x9e, 0x0a, - 0x0e, 0x4f, 0xb9, 0xd7, 0x87, 0x94, 0x17, 0x22, 0xbc, 0x89, 0xbb, 0x32, 0xf2, 0x12, 0x19, 0x3e, - 0x0d, 0xf2, 0xac, 0xee, 0x07, 0x44, 0x45, 0xbd, 0x18, 0x0b, 0xd5, 0x38, 0xf1, 0xa4, 0x55, 0x9a, - 0x8e, 0xcd, 0x09, 0x02, 0x92, 0xc2, 0xf0, 0x47, 0x06, 0x00, 0x4d, 0xec, 0xd8, 0x16, 0x16, 0x2d, - 0x43, 0x5e, 0xb8, 0x3f, 0xd8, 0xb4, 0x7e, 0x45, 0x9b, 0x97, 0x9b, 0x96, 0x7c, 0xa3, 0x14, 0x34, - 0x7c, 0xcf, 0x00, 0x53, 0x2c, 0xda, 0xa5, 0x4a, 0x8b, 0x89, 0xe6, 0x62, 0xf2, 0xca, 0x37, 0x07, - 0xea, 0x4b, 0x2d, 0x05, 0x50, 0x9d, 0x6d, 0xb7, 0x4a, 0x53, 0x69, 0x0a, 0xea, 0x70, 0x00, 0xfe, - 0xd4, 0x00, 0x85, 0x66, 0x7c, 0x67, 0x8f, 0x8b, 0x03, 0xff, 0xe6, 0x90, 0x36, 0x56, 0x65, 0x54, - 0x72, 0x0a, 0x74, 0x1f, 0xa0, 0x3d, 0x80, 0x7f, 0x33, 0x80, 0x89, 0x2d, 0x59, 0xe0, 0xb1, 0xb3, - 0x43, 0x6d, 0x2f, 0x24, 0x54, 0xf6, 0x9b, 0xcc, 0x2c, 0x08, 0xf7, 0x06, 0x7b, 0x17, 0x66, 0x7b, - 0xd9, 0xea, 0xa2, 0xf2, 0xce, 0x5c, 0xe9, 0xe3, 0x06, 0xea, 0xeb, 0xa0, 0x48, 0xb4, 0xa4, 0xa5, - 0x31, 0x27, 0x86, 0x90, 0x68, 0x49, 0x2f, 0xa5, 0xaa, 0x43, 0xd2, 0x41, 0xa5, 0xa0, 0xcb, 0xef, - 0x8d, 0x64, 0x9b, 0xf6, 0xec, 0xa5, 0x0f, 0x3f, 0x90, 0xce, 0xca, 0xa5, 0x30, 0xd3, 0x10, 0xc1, - 0x7d, 0x7b, 0x48, 0x7b, 0xaf, 0x6f, 0xed, 0xa4, 0xf1, 0xd2, 0x24, 0x86, 0x52, 0x7e, 0xc0, 0x5f, - 0x19, 0x60, 0x1a, 0xd7, 0xeb, 0x24, 0x08, 0x89, 0x25, 0x6b, 0x71, 0xee, 0x73, 0x28, 0x37, 0xe7, - 0x94, 0x57, 0xd3, 0x2b, 0x69, 0x68, 0xd4, 0xe9, 0x09, 0x7c, 0x1e, 0x9c, 0x66, 0xa1, 0x4f, 0x89, - 0x95, 0xe9, 0x72, 0x61, 0xbb, 0x55, 0x3a, 0x5d, 0xeb, 0xe0, 0xa0, 0x8c, 0x64, 0xf9, 0xd3, 0x51, - 0x50, 0xba, 0xcb, 0xc9, 0xb8, 0x87, 0x77, 0xd4, 0xe3, 0x60, 0x4c, 0x2c, 0xd7, 0x12, 0x51, 0x29, - 0xa4, 0x3a, 0x37, 0x41, 0x45, 0x8a, 0xcb, 0xeb, 0x3a, 0xc7, 0xe7, 0xdd, 0xc6, 0x88, 0x10, 0xd4, - 0x75, 0xbd, 0x26, 0xc9, 0x28, 0xe6, 0xc3, 0x77, 0xc0, 0x98, 0x9c, 0x93, 0x88, 0xa2, 0x3a, 0xc4, - 0xc2, 0x08, 0x84, 0x9f, 0x02, 0x0a, 0x29, 0xc8, 0xee, 0x82, 0x98, 0x7f, 0xd0, 0x05, 0xf1, 0x8e, - 0x15, 0x68, 0xec, 0xff, 0xbc, 0x02, 0x95, 0xff, 0x6b, 0x64, 0xcf, 0x7d, 0x6a, 0xa9, 0xb5, 0x3a, - 0x76, 0x08, 0x5c, 0x03, 0xb3, 0xfc, 0x91, 0x81, 0x48, 0xe0, 0xd8, 0x75, 0xcc, 0xc4, 0x1b, 0x57, - 0x26, 0x9c, 0x1e, 0xbb, 0xd4, 0x32, 0x7c, 0xd4, 0xa5, 0x01, 0x5f, 0x02, 0x50, 0x36, 0xde, 0x1d, - 0x76, 0x64, 0x0f, 0xa1, 0x5b, 0xe8, 0x5a, 0x97, 0x04, 0xea, 0xa1, 0x05, 0x57, 0xc1, 0x9c, 0x83, - 0x77, 0x89, 0x53, 0x23, 0x0e, 0xa9, 0x87, 0x3e, 0x15, 0xa6, 0xe4, 0x14, 0xe0, 0x5c, 0xbb, 0x55, - 0x9a, 0xbb, 0x9a, 0x65, 0xa2, 0x6e, 0xf9, 0xf2, 0xc5, 0xec, 0xf1, 0x4a, 0x2f, 0x5c, 0x3e, 0x67, - 0x3e, 0xcc, 0x81, 0x85, 0xfe, 0x99, 0x01, 0x7f, 0x90, 0xbc, 0xba, 0x64, 0x53, 0xfd, 0xe6, 0xb0, - 0xb2, 0x50, 0x3d, 0xbb, 0x40, 0xf7, 0x93, 0x0b, 0x7e, 0x97, 0x77, 0x38, 0xd8, 0x89, 0xe7, 0x3c, - 0x6f, 0x0c, 0xcd, 0x05, 0x0e, 0x52, 0x9d, 0x90, 0xcd, 0x13, 0x76, 0x44, 0xaf, 0x84, 0x1d, 0x52, - 0xfe, 0xa3, 0x91, 0x7d, 0x78, 0x27, 0x27, 0x18, 0xfe, 0xcc, 0x00, 0x33, 0x7e, 0x40, 0xbc, 0x95, - 0x9d, 0x8d, 0x57, 0xbe, 0x22, 0x4f, 0xb2, 0x0a, 0xd5, 0xd6, 0x7d, 0xfa, 0xf9, 0x52, 0x6d, 0x7b, - 0x4b, 0x1a, 0xdc, 0xa1, 0x7e, 0xc0, 0xaa, 0x67, 0xda, 0xad, 0xd2, 0xcc, 0x76, 0x27, 0x14, 0xca, - 0x62, 0x97, 0x5d, 0x70, 0x6e, 0xfd, 0x28, 0x24, 0xd4, 0xc3, 0xce, 0x9a, 0x5f, 0x8f, 0x5c, 0xe2, - 0x85, 0xd2, 0xd1, 0xcc, 0x90, 0xc8, 0xb8, 0xc7, 0x21, 0xd1, 0x23, 0x60, 0x24, 0xa2, 0x8e, 0xca, - 0xe2, 0x49, 0x3d, 0x04, 0x45, 0x57, 0x11, 0xa7, 0x97, 0x2f, 0x82, 0x51, 0xee, 0x27, 0x3c, 0x0f, - 0x46, 0x28, 0x3e, 0x14, 0x56, 0xa7, 0xaa, 0xe3, 0x5c, 0x04, 0xe1, 0x43, 0xc4, 0x69, 0xe5, 0x7f, - 0x17, 0xc1, 0x4c, 0x66, 0x2d, 0x70, 0x01, 0xe4, 0xf4, 0x64, 0x15, 0x28, 0xa3, 0xb9, 0x8d, 0x35, - 0x94, 0xb3, 0x2d, 0xf8, 0xac, 0x2e, 0xbe, 0x12, 0xb4, 0xa4, 0xeb, 0xb9, 0xa0, 0xf2, 0x96, 0x36, - 0x31, 0xc7, 0x1d, 0x89, 0x0b, 0x27, 0xf7, 0x81, 0xec, 0xa9, 0x53, 0x22, 0x7d, 0x20, 0x7b, 0x88, - 0xd3, 0x3e, 0xeb, 0x84, 0x2c, 0x1e, 0xd1, 0xe5, 0xef, 0x61, 0x44, 0x37, 0x76, 0xc7, 0x11, 0xdd, - 0xa3, 0x20, 0x1f, 0xda, 0xa1, 0x43, 0xcc, 0xf1, 0xce, 0x97, 0xc7, 0x35, 0x4e, 0x44, 0x92, 0x07, - 0x6f, 0x82, 0x71, 0x8b, 0xec, 0xe1, 0xc8, 0x09, 0xcd, 0x82, 0x48, 0xa1, 0xd5, 0x01, 0xa4, 0x90, - 0x9c, 0x9f, 0xae, 0x49, 0xbb, 0x28, 0x06, 0x80, 0x8f, 0x81, 0x71, 0x17, 0x1f, 0xd9, 0x6e, 0xe4, - 0x8a, 0x9e, 0xcc, 0x90, 0x62, 0x9b, 0x92, 0x84, 0x62, 0x1e, 0xaf, 0x8c, 0xe4, 0xa8, 0xee, 0x44, - 0xcc, 0x6e, 0x12, 0xc5, 0x34, 0x81, 0xb8, 0x3d, 0x75, 0x65, 0x5c, 0xcf, 0xf0, 0x51, 0x97, 0x86, - 0x00, 0xb3, 0x3d, 0xa1, 0x3c, 0x99, 0x02, 0x93, 0x24, 0x14, 0xf3, 0x3a, 0xc1, 0x94, 0xfc, 0x54, - 0x3f, 0x30, 0xa5, 0xdc, 0xa5, 0x01, 0xbf, 0x0c, 0x26, 0x5c, 0x7c, 0x74, 0x95, 0x78, 0x8d, 0x70, - 0xdf, 0x9c, 0x5e, 0x34, 0x96, 0x46, 0xaa, 0xd3, 0xed, 0x56, 0x69, 0x62, 0x33, 0x26, 0xa2, 0x84, - 0x2f, 0x84, 0x6d, 0x4f, 0x09, 0x9f, 0x4e, 0x09, 0xc7, 0x44, 0x94, 0xf0, 0x79, 0x07, 0x11, 0xe0, - 0x90, 0x1f, 0x2e, 0x73, 0xa6, 0xf3, 0x65, 0xb8, 0x23, 0xc9, 0x28, 0xe6, 0xc3, 0x25, 0x50, 0x70, - 0xf1, 0x91, 0x78, 0xc5, 0x9b, 0xb3, 0xc2, 0xac, 0x98, 0x25, 0x6f, 0x2a, 0x1a, 0xd2, 0x5c, 0x21, - 0x69, 0x7b, 0x52, 0x72, 0x2e, 0x25, 0xa9, 0x68, 0x48, 0x73, 0x79, 0x12, 0x47, 0x9e, 0x7d, 0x2b, - 0x22, 0x52, 0x18, 0x8a, 0xc8, 0xe8, 0x24, 0xbe, 0x9e, 0xb0, 0x50, 0x5a, 0x8e, 0xbf, 0xa2, 0xdd, - 0xc8, 0x09, 0xed, 0xc0, 0x21, 0xdb, 0x7b, 0xe6, 0x19, 0x11, 0x7f, 0xd1, 0x27, 0x6f, 0x6a, 0x2a, - 0x4a, 0x49, 0x40, 0x02, 0x46, 0x89, 0x17, 0xb9, 0xe6, 0x59, 0x71, 0xb1, 0x0f, 0x24, 0x05, 0xf5, - 0xc9, 0x59, 0xf7, 0x22, 0x17, 0x09, 0xf3, 0xf0, 0x59, 0x30, 0xed, 0xe2, 0x23, 0x5e, 0x0e, 0x08, - 0x0d, 0xf9, 0xfb, 0xfe, 0x9c, 0x58, 0xfc, 0x1c, 0xef, 0x38, 0x37, 0xd3, 0x0c, 0xd4, 0x29, 0x27, - 0x14, 0x6d, 0x2f, 0xa5, 0x38, 0x9f, 0x52, 0x4c, 0x33, 0x50, 0xa7, 0x1c, 0x8f, 0x34, 0x25, 0xb7, - 0x22, 0x9b, 0x12, 0xcb, 0x7c, 0x48, 0x34, 0xa9, 0x6a, 0xbe, 0x2f, 0x69, 0x48, 0x73, 0x61, 0x33, - 0x1e, 0xf7, 0x98, 0xe2, 0x18, 0x5e, 0x1f, 0x6c, 0x25, 0xdf, 0xa6, 0x2b, 0x94, 0xe2, 0x63, 0x79, - 0xd3, 0xa4, 0x07, 0x3d, 0x90, 0x81, 0x3c, 0x76, 0x9c, 0xed, 0x3d, 0xf3, 0xbc, 0x88, 0xfd, 0xa0, - 0x6f, 0x10, 0x5d, 0x75, 0x56, 0x38, 0x08, 0x92, 0x58, 0x1c, 0xd4, 0xf7, 0x78, 0x6a, 0x2c, 0x0c, - 0x17, 0x74, 0x9b, 0x83, 0x20, 0x89, 0x25, 0x56, 0xea, 0x1d, 0x6f, 0xef, 0x99, 0x0f, 0x0f, 0x79, - 0xa5, 0x1c, 0x04, 0x49, 0x2c, 0x68, 0x83, 0x11, 0xcf, 0x0f, 0xcd, 0x0b, 0x43, 0xb9, 0x9e, 0xc5, - 0x85, 0xb3, 0xe5, 0x87, 0x88, 0x63, 0xc0, 0x5f, 0x1a, 0x00, 0x04, 0x49, 0x8a, 0x3e, 0x32, 0x90, - 0x29, 0x42, 0x06, 0xb2, 0x92, 0xe4, 0xf6, 0xba, 0x17, 0xd2, 0xe3, 0xe4, 0x1d, 0x99, 0x3a, 0x03, - 0x29, 0x2f, 0xe0, 0xef, 0x0c, 0x70, 0x36, 0xdd, 0x26, 0x6b, 0xf7, 0x8a, 0x22, 0x22, 0xd7, 0x06, - 0x9d, 0xe6, 0x55, 0xdf, 0x77, 0xaa, 0x66, 0xbb, 0x55, 0x3a, 0xbb, 0xd2, 0x03, 0x15, 0xf5, 0xf4, - 0x05, 0xfe, 0xc9, 0x00, 0x73, 0xaa, 0x8a, 0xa6, 0x3c, 0x2c, 0x89, 0x00, 0x92, 0x41, 0x07, 0x30, - 0x8b, 0x23, 0xe3, 0xa8, 0x7f, 0x97, 0xee, 0xe2, 0xa3, 0x6e, 0xd7, 0xe0, 0x5f, 0x0d, 0x30, 0x65, - 0x91, 0x80, 0x78, 0x16, 0xf1, 0xea, 0xdc, 0xd7, 0xc5, 0x81, 0x8c, 0x0d, 0xb2, 0xbe, 0xae, 0xa5, - 0x20, 0xa4, 0x9b, 0x15, 0xe5, 0xe6, 0x54, 0x9a, 0x75, 0xd2, 0x2a, 0xcd, 0x27, 0xaa, 0x69, 0x0e, - 0xea, 0xf0, 0x12, 0xbe, 0x6f, 0x80, 0x99, 0x64, 0x03, 0xe4, 0x95, 0x72, 0x71, 0x88, 0x79, 0x20, - 0xda, 0xd7, 0x95, 0x4e, 0x40, 0x94, 0xf5, 0x00, 0xfe, 0xd9, 0xe0, 0x9d, 0x5a, 0xfc, 0xee, 0x63, - 0x66, 0x59, 0xc4, 0xf2, 0xad, 0x81, 0xc7, 0x52, 0x23, 0xc8, 0x50, 0x5e, 0x4a, 0x5a, 0x41, 0xcd, - 0x39, 0x69, 0x95, 0xce, 0xa5, 0x23, 0xa9, 0x19, 0x28, 0xed, 0x21, 0xfc, 0x89, 0x01, 0xa6, 0x48, - 0xd2, 0x71, 0x33, 0xf3, 0xd1, 0x81, 0x04, 0xb1, 0x67, 0x13, 0x2f, 0x5f, 0xea, 0x29, 0x16, 0x43, - 0x1d, 0xd8, 0xbc, 0x83, 0x24, 0x47, 0xd8, 0x0d, 0x1c, 0x62, 0x7e, 0x61, 0xc0, 0x1d, 0xe4, 0xba, - 0xb4, 0x8b, 0x62, 0x00, 0x78, 0x09, 0x14, 0xbc, 0xc8, 0x71, 0xf0, 0xae, 0x43, 0xcc, 0xc7, 0x44, - 0x2f, 0xa2, 0xa7, 0x98, 0x5b, 0x8a, 0x8e, 0xb4, 0x04, 0xbc, 0x09, 0x16, 0x8f, 0x5e, 0xd6, 0xff, - 0xd1, 0xb3, 0x43, 0x89, 0xc0, 0xbf, 0xee, 0x1d, 0x78, 0xfe, 0xa1, 0xf7, 0x82, 0x4d, 0x1c, 0x8b, - 0x99, 0x8f, 0x0b, 0x2b, 0xf1, 0x04, 0x7b, 0xfe, 0x46, 0x4f, 0x29, 0x74, 0x57, 0x3b, 0xf0, 0x35, - 0xf0, 0x70, 0x4a, 0x66, 0xdd, 0xdd, 0x25, 0x96, 0x45, 0xac, 0xf8, 0xf1, 0x66, 0x7e, 0x51, 0xc0, - 0xe8, 0x43, 0x7e, 0x23, 0x2b, 0x80, 0xee, 0xa4, 0x0d, 0xaf, 0x82, 0xf9, 0x14, 0x7b, 0xc3, 0x0b, - 0xb7, 0x69, 0x2d, 0xa4, 0xb6, 0xd7, 0x30, 0x97, 0x84, 0xdd, 0xb3, 0xf1, 0xa9, 0xbc, 0x91, 0xe2, - 0xa1, 0x3e, 0x3a, 0x0b, 0xfc, 0xf9, 0x98, 0x29, 0x3f, 0x70, 0x16, 0x8c, 0x1c, 0x10, 0xf5, 0x2b, - 0x39, 0xe2, 0x7f, 0x42, 0x0b, 0xe4, 0x9b, 0xd8, 0x89, 0xe2, 0x17, 0xf0, 0x80, 0xaf, 0x2e, 0x24, - 0x8d, 0x3f, 0x9f, 0x7b, 0xce, 0x58, 0xf8, 0xc0, 0x00, 0xf3, 0xbd, 0xab, 0xe2, 0x03, 0x75, 0xeb, - 0x37, 0x06, 0x98, 0xeb, 0x2a, 0x80, 0x3d, 0x3c, 0xba, 0xd5, 0xe9, 0xd1, 0x6b, 0x83, 0xae, 0x64, - 0x72, 0xd7, 0x44, 0xfb, 0x96, 0x76, 0xef, 0xe7, 0x06, 0x98, 0xcd, 0xd6, 0x94, 0x07, 0x19, 0xaf, - 0xf2, 0x07, 0x39, 0x30, 0xdf, 0xbb, 0xeb, 0x84, 0x54, 0x3f, 0xaf, 0x87, 0x33, 0xa6, 0xe8, 0x35, - 0xd2, 0x7c, 0xd7, 0x00, 0x93, 0x37, 0xb5, 0x5c, 0xfc, 0x2b, 0xea, 0xc0, 0x07, 0x24, 0x71, 0x11, - 0x4f, 0x18, 0x0c, 0xa5, 0x71, 0xcb, 0x7f, 0x31, 0xc0, 0xb9, 0x9e, 0xb7, 0x13, 0x7f, 0xc7, 0x63, - 0xc7, 0xf1, 0x0f, 0xe5, 0x9c, 0x2b, 0x35, 0x44, 0x5e, 0x11, 0x54, 0xa4, 0xb8, 0xa9, 0xe8, 0xe5, - 0x3e, 0xaf, 0xe8, 0x95, 0xff, 0x6e, 0x80, 0x0b, 0x77, 0xca, 0xc4, 0x07, 0xb2, 0xa5, 0x4b, 0xa0, - 0xa0, 0x3a, 0xcb, 0x63, 0xb1, 0x9d, 0xea, 0x31, 0xa5, 0x8a, 0x86, 0xf8, 0xc7, 0x21, 0xf9, 0x57, - 0xf9, 0x43, 0x03, 0xcc, 0xd6, 0x08, 0x6d, 0xda, 0x75, 0x82, 0xc8, 0x1e, 0xa1, 0xc4, 0xab, 0x13, - 0xb8, 0x0c, 0x26, 0xc4, 0xcf, 0x97, 0x01, 0xae, 0xc7, 0xb3, 0xfd, 0x39, 0x15, 0xf2, 0x89, 0xad, - 0x98, 0x81, 0x12, 0x19, 0xfd, 0x3b, 0x40, 0xae, 0xef, 0xef, 0x00, 0x17, 0xc0, 0x68, 0x90, 0x4c, - 0x49, 0x0b, 0x9c, 0x2b, 0x06, 0xa3, 0x82, 0x2a, 0xb8, 0x3e, 0x0d, 0xc5, 0xe8, 0x27, 0xaf, 0xb8, - 0x3e, 0x0d, 0x91, 0xa0, 0x96, 0xff, 0x61, 0x80, 0x5e, 0xff, 0xe2, 0x03, 0x9b, 0x60, 0x9c, 0x49, - 0xd7, 0x55, 0x68, 0xb7, 0xef, 0x33, 0xb4, 0xd9, 0x40, 0xc8, 0xbb, 0x35, 0xa6, 0xc6, 0x60, 0x3c, - 0xba, 0x75, 0x5c, 0x8d, 0x3c, 0x4b, 0x4d, 0x3d, 0xa7, 0x64, 0x74, 0x57, 0x57, 0x24, 0x0d, 0x69, - 0x2e, 0x3c, 0x2f, 0xe7, 0x73, 0xa9, 0xa1, 0x57, 0x3c, 0x9b, 0xab, 0x5e, 0xfe, 0xe8, 0x76, 0xf1, - 0xd4, 0xc7, 0xb7, 0x8b, 0xa7, 0x3e, 0xb9, 0x5d, 0x3c, 0xf5, 0xbd, 0x76, 0xd1, 0xf8, 0xa8, 0x5d, - 0x34, 0x3e, 0x6e, 0x17, 0x8d, 0x4f, 0xda, 0x45, 0xe3, 0x5f, 0xed, 0xa2, 0xf1, 0x8b, 0x4f, 0x8b, - 0xa7, 0xbe, 0x35, 0xae, 0x5c, 0xfb, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0x57, 0xd1, 0xa5, - 0x78, 0x2b, 0x00, 0x00, + 0x15, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb2, 0xa4, 0xb6, 0xad, 0x8c, 0x15, 0x67, 0x57, 0xde, + 0x90, 0x20, 0x82, 0xbd, 0x4a, 0x4c, 0x42, 0x42, 0xaa, 0x38, 0x68, 0x25, 0x25, 0xa5, 0xc4, 0xfa, + 0xa0, 0xd7, 0x4e, 0x0c, 0xf9, 0x6c, 0xcd, 0xf4, 0xae, 0xc6, 0x9a, 0x2f, 0x4f, 0xcf, 0xac, 0xa4, + 0x0a, 0x50, 0x7c, 0x54, 0x0a, 0x8a, 0x02, 0x42, 0x91, 0x5c, 0x28, 0xe0, 0x10, 0x28, 0x2e, 0x1c, + 0xe0, 0x00, 0x37, 0xf8, 0x03, 0x72, 0x4c, 0x71, 0xca, 0x81, 0xda, 0xc2, 0x9b, 0x33, 0x37, 0xaa, + 0xa8, 0xd2, 0x89, 0xea, 0x8f, 0xe9, 0x99, 0x9d, 0xdd, 0xb5, 0x5d, 0xf1, 0x6e, 0xcc, 0x4d, 0xf3, + 0xbe, 0x7e, 0xaf, 0x5f, 0xbf, 0x7e, 0xfd, 0xfa, 0xad, 0x40, 0xe3, 0xe0, 0x39, 0x5a, 0xb5, 0xbc, + 0x95, 0x83, 0x68, 0x8f, 0x04, 0x2e, 0x09, 0x09, 0x5d, 0x69, 0x11, 0xd7, 0xf4, 0x82, 0x15, 0xc9, + 0xc0, 0xbe, 0x45, 0x8e, 0x42, 0xe2, 0x52, 0xcb, 0x73, 0xe9, 0x65, 0xec, 0x5b, 0x94, 0x04, 0x2d, + 0x12, 0xac, 0xf8, 0x07, 0x4d, 0xc6, 0xa3, 0xdd, 0x02, 0x2b, 0xad, 0xa7, 0xf6, 0x48, 0x88, 0x9f, + 0x5a, 0x69, 0x12, 0x97, 0x04, 0x38, 0x24, 0x66, 0xd5, 0x0f, 0xbc, 0xd0, 0x83, 0x5f, 0x17, 0xe6, + 0xaa, 0x5d, 0xd2, 0x6f, 0x29, 0x73, 0x55, 0xff, 0xa0, 0xc9, 0x78, 0xb4, 0x5b, 0xa0, 0x2a, 0xcd, + 0x2d, 0x5e, 0x6e, 0x5a, 0xe1, 0x7e, 0xb4, 0x57, 0x35, 0x3c, 0x67, 0xa5, 0xe9, 0x35, 0xbd, 0x15, + 0x6e, 0x75, 0x2f, 0x6a, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0x4f, 0x27, 0xce, 0x3b, + 0xd8, 0xd8, 0xb7, 0x5c, 0x12, 0x1c, 0x27, 0x1e, 0x3b, 0x24, 0xc4, 0x2b, 0xad, 0x1e, 0x1f, 0x17, + 0x57, 0x06, 0x69, 0x05, 0x91, 0x1b, 0x5a, 0x0e, 0xe9, 0x51, 0xf8, 0xea, 0xdd, 0x14, 0xa8, 0xb1, + 0x4f, 0x1c, 0x9c, 0xd5, 0xab, 0x9c, 0x68, 0x60, 0x7e, 0xcd, 0x73, 0x5b, 0x24, 0x60, 0xab, 0x44, + 0xe4, 0x56, 0x44, 0x68, 0x08, 0x6b, 0x20, 0x1f, 0x59, 0xa6, 0xae, 0x2d, 0x69, 0xcb, 0x93, 0xb5, + 0x27, 0x3f, 0x6a, 0x97, 0x4f, 0x75, 0xda, 0xe5, 0xfc, 0xf5, 0xcd, 0xf5, 0x93, 0x76, 0xf9, 0xe2, + 0x20, 0xa4, 0xf0, 0xd8, 0x27, 0xb4, 0x7a, 0x7d, 0x73, 0x1d, 0x31, 0x65, 0xf8, 0x22, 0x98, 0x37, + 0x09, 0xb5, 0x02, 0x62, 0xae, 0xee, 0x6e, 0xbe, 0x22, 0xec, 0xeb, 0x39, 0x6e, 0xf1, 0xbc, 0xb4, + 0x38, 0xbf, 0x9e, 0x15, 0x40, 0xbd, 0x3a, 0xf0, 0x06, 0x98, 0xf0, 0xf6, 0x6e, 0x12, 0x23, 0xa4, + 0x7a, 0x7e, 0x29, 0xbf, 0x3c, 0x75, 0xe5, 0x72, 0x35, 0xd9, 0x41, 0xe5, 0x02, 0xdf, 0x36, 0xb9, + 0xd8, 0x2a, 0xc2, 0x87, 0x1b, 0xf1, 0xce, 0xd5, 0x66, 0x25, 0xda, 0xc4, 0x8e, 0xb0, 0x82, 0x62, + 0x73, 0x95, 0xdf, 0xe7, 0x00, 0x4c, 0x2f, 0x9e, 0xfa, 0x9e, 0x4b, 0xc9, 0x50, 0x56, 0x4f, 0xc1, + 0x9c, 0xc1, 0x2d, 0x87, 0xc4, 0x94, 0xb8, 0x7a, 0xee, 0xb3, 0x78, 0xaf, 0x4b, 0xfc, 0xb9, 0xb5, + 0x8c, 0x39, 0xd4, 0x03, 0x00, 0xaf, 0x81, 0xf1, 0x80, 0xd0, 0xc8, 0x0e, 0xf5, 0xfc, 0x92, 0xb6, + 0x3c, 0x75, 0xe5, 0xd2, 0x40, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, 0xaa, 0xad, 0xa7, 0xaa, 0xf5, 0x10, + 0x87, 0x11, 0xad, 0x9d, 0x96, 0x48, 0xe3, 0x88, 0xdb, 0x40, 0xd2, 0x56, 0xe5, 0xc7, 0x39, 0x30, + 0x97, 0x8e, 0x52, 0xcb, 0x22, 0x87, 0xf0, 0x10, 0x4c, 0x04, 0x22, 0x59, 0x78, 0x9c, 0xa6, 0xae, + 0xec, 0x56, 0xef, 0xeb, 0x58, 0x55, 0x7b, 0x92, 0xb0, 0x36, 0xc5, 0xf6, 0x4c, 0x7e, 0xa0, 0x18, + 0x0d, 0xbe, 0x03, 0x8a, 0x81, 0xdc, 0x28, 0x9e, 0x4d, 0x53, 0x57, 0xbe, 0x31, 0x44, 0x64, 0x61, + 0xb8, 0x36, 0xdd, 0x69, 0x97, 0x8b, 0xf1, 0x17, 0x52, 0x80, 0x95, 0xf7, 0x73, 0xa0, 0xb4, 0x16, + 0xd1, 0xd0, 0x73, 0x10, 0xa1, 0x5e, 0x14, 0x18, 0x64, 0xcd, 0xb3, 0x23, 0xc7, 0x5d, 0x27, 0x0d, + 0xcb, 0xb5, 0x42, 0x96, 0xad, 0x4b, 0x60, 0xcc, 0xc5, 0x0e, 0x91, 0xd9, 0x33, 0x2d, 0x63, 0x3a, + 0xb6, 0x8d, 0x1d, 0x82, 0x38, 0x87, 0x49, 0xb0, 0x64, 0x91, 0x67, 0x41, 0x49, 0x5c, 0x3b, 0xf6, + 0x09, 0xe2, 0x1c, 0xf8, 0x38, 0x18, 0x6f, 0x78, 0x81, 0x83, 0xc5, 0x3e, 0x4e, 0x26, 0x3b, 0xf3, + 0x02, 0xa7, 0x22, 0xc9, 0x85, 0xcf, 0x80, 0x29, 0x93, 0x50, 0x23, 0xb0, 0x7c, 0x06, 0xad, 0x8f, + 0x71, 0xe1, 0x33, 0x52, 0x78, 0x6a, 0x3d, 0x61, 0xa1, 0xb4, 0x1c, 0xbc, 0x04, 0x8a, 0x7e, 0x60, + 0x79, 0x81, 0x15, 0x1e, 0xeb, 0x85, 0x25, 0x6d, 0xb9, 0x50, 0x9b, 0x93, 0x3a, 0xc5, 0x5d, 0x49, + 0x47, 0x4a, 0x02, 0x2e, 0x81, 0xe2, 0x4b, 0xf5, 0x9d, 0xed, 0x5d, 0x1c, 0xee, 0xeb, 0xe3, 0x1c, + 0x61, 0x8c, 0x49, 0xa3, 0xe2, 0x4d, 0x49, 0xad, 0xfc, 0x33, 0x07, 0xf4, 0x6c, 0x54, 0xe2, 0x90, + 0xc2, 0x17, 0x40, 0x91, 0x86, 0xac, 0xe2, 0x34, 0x8f, 0x65, 0x4c, 0x9e, 0x88, 0xc1, 0xea, 0x92, + 0x7e, 0xd2, 0x2e, 0x2f, 0x24, 0x1a, 0x31, 0x95, 0xc7, 0x43, 0xe9, 0xc2, 0xdf, 0x6a, 0xe0, 0xcc, + 0x21, 0xd9, 0xdb, 0xf7, 0xbc, 0x83, 0x35, 0xdb, 0x22, 0x6e, 0xb8, 0xe6, 0xb9, 0x0d, 0xab, 0x29, + 0x73, 0x00, 0xdd, 0x67, 0x0e, 0xbc, 0xda, 0x6b, 0xb9, 0xf6, 0x50, 0xa7, 0x5d, 0x3e, 0xd3, 0x87, + 0x81, 0xfa, 0xf9, 0x01, 0x6f, 0x00, 0xdd, 0xc8, 0x1c, 0x12, 0x59, 0xc0, 0x44, 0xd9, 0x9a, 0xac, + 0x5d, 0xe8, 0xb4, 0xcb, 0xfa, 0xda, 0x00, 0x19, 0x34, 0x50, 0xbb, 0xf2, 0xc3, 0x7c, 0x36, 0xbc, + 0xa9, 0x74, 0x7b, 0x1b, 0x14, 0xd9, 0x31, 0x36, 0x71, 0x88, 0xe5, 0x41, 0x7c, 0xf2, 0xde, 0x0e, + 0xbd, 0xa8, 0x19, 0x5b, 0x24, 0xc4, 0x35, 0x28, 0x37, 0x04, 0x24, 0x34, 0xa4, 0xac, 0xc2, 0xef, + 0x80, 0x31, 0xea, 0x13, 0x43, 0x06, 0xfa, 0xb5, 0xfb, 0x3d, 0x6c, 0x03, 0x16, 0x52, 0xf7, 0x89, + 0x91, 0x9c, 0x05, 0xf6, 0x85, 0x38, 0x2c, 0x7c, 0x57, 0x03, 0xe3, 0x94, 0x17, 0x28, 0x59, 0xd4, + 0xde, 0x18, 0x95, 0x07, 0x99, 0x2a, 0x28, 0xbe, 0x91, 0x04, 0xaf, 0xfc, 0x27, 0x07, 0x2e, 0x0e, + 0x52, 0x5d, 0xf3, 0x5c, 0x53, 0x6c, 0xc7, 0xa6, 0x3c, 0xdb, 0x22, 0xd3, 0x9f, 0x49, 0x9f, 0xed, + 0x93, 0x76, 0xf9, 0xb1, 0xbb, 0x1a, 0x48, 0x15, 0x81, 0xaf, 0xa9, 0x75, 0x8b, 0x42, 0x71, 0xb1, + 0xdb, 0xb1, 0x93, 0x76, 0x79, 0x56, 0xa9, 0x75, 0xfb, 0x0a, 0x5b, 0x00, 0xda, 0x98, 0x86, 0xd7, + 0x02, 0xec, 0x52, 0x61, 0xd6, 0x72, 0x88, 0x0c, 0xdf, 0x13, 0xf7, 0x96, 0x1e, 0x4c, 0xa3, 0xb6, + 0x28, 0x21, 0xe1, 0xd5, 0x1e, 0x6b, 0xa8, 0x0f, 0x02, 0xab, 0x5b, 0x01, 0xc1, 0x54, 0x95, 0xa2, + 0xd4, 0x8d, 0xc2, 0xa8, 0x48, 0x72, 0xe1, 0x97, 0xc0, 0x84, 0x43, 0x28, 0xc5, 0x4d, 0xc2, 0xeb, + 0xcf, 0x64, 0x72, 0x45, 0x6f, 0x09, 0x32, 0x8a, 0xf9, 0xac, 0x3f, 0xb9, 0x30, 0x28, 0x6a, 0x57, + 0x2d, 0x1a, 0xc2, 0xd7, 0x7b, 0x0e, 0x40, 0xf5, 0xde, 0x56, 0xc8, 0xb4, 0x79, 0xfa, 0xab, 0xe2, + 0x17, 0x53, 0x52, 0xc9, 0xff, 0x6d, 0x50, 0xb0, 0x42, 0xe2, 0xc4, 0x77, 0xf7, 0xab, 0x23, 0xca, + 0xbd, 0xda, 0x8c, 0xf4, 0xa1, 0xb0, 0xc9, 0xd0, 0x90, 0x00, 0xad, 0xfc, 0x21, 0x07, 0x1e, 0x19, + 0xa4, 0xc2, 0x2e, 0x14, 0xca, 0x22, 0xee, 0xdb, 0x51, 0x80, 0x6d, 0x99, 0x71, 0x2a, 0xe2, 0xbb, + 0x9c, 0x8a, 0x24, 0x97, 0x95, 0x7c, 0x6a, 0xb9, 0xcd, 0xc8, 0xc6, 0x81, 0x4c, 0x27, 0xb5, 0xea, + 0xba, 0xa4, 0x23, 0x25, 0x01, 0xab, 0x00, 0xd0, 0x7d, 0x2f, 0x08, 0x39, 0x86, 0xac, 0x5e, 0xa7, + 0x59, 0x81, 0xa8, 0x2b, 0x2a, 0x4a, 0x49, 0xb0, 0x1b, 0xed, 0xc0, 0x72, 0x4d, 0xb9, 0xeb, 0xea, + 0x14, 0xbf, 0x6c, 0xb9, 0x26, 0xe2, 0x1c, 0x86, 0x6f, 0x5b, 0x34, 0x64, 0x14, 0xb9, 0xe5, 0x5d, + 0x51, 0xe7, 0x92, 0x4a, 0x82, 0xe1, 0x1b, 0xac, 0xea, 0x7b, 0x81, 0x45, 0xa8, 0x3e, 0x9e, 0xe0, + 0xaf, 0x29, 0x2a, 0x4a, 0x49, 0x54, 0x7e, 0x5d, 0x1c, 0x9c, 0x24, 0xac, 0x94, 0xc0, 0x47, 0x41, + 0xa1, 0x19, 0x78, 0x91, 0x2f, 0xa3, 0xa4, 0xa2, 0xfd, 0x22, 0x23, 0x22, 0xc1, 0x63, 0x59, 0xd9, + 0xea, 0x6a, 0x53, 0x55, 0x56, 0xc6, 0xcd, 0x69, 0xcc, 0x87, 0xdf, 0xd7, 0x40, 0xc1, 0x95, 0xc1, + 0x61, 0x29, 0xf7, 0xfa, 0x88, 0xf2, 0x82, 0x87, 0x37, 0x71, 0x57, 0x44, 0x5e, 0x20, 0xc3, 0xa7, + 0x41, 0x81, 0x1a, 0x9e, 0x4f, 0x64, 0xd4, 0x4b, 0xb1, 0x50, 0x9d, 0x11, 0x4f, 0xda, 0xe5, 0x99, + 0xd8, 0x1c, 0x27, 0x20, 0x21, 0x0c, 0x7f, 0xa4, 0x01, 0xd0, 0xc2, 0xb6, 0x65, 0x62, 0xde, 0x32, + 0x14, 0xb8, 0xfb, 0xc3, 0x4d, 0xeb, 0x57, 0x94, 0x79, 0xb1, 0x69, 0xc9, 0x37, 0x4a, 0x41, 0xc3, + 0xf7, 0x34, 0x30, 0x4d, 0xa3, 0xbd, 0x40, 0x6a, 0x51, 0xde, 0x5c, 0x4c, 0x5d, 0xf9, 0xe6, 0x50, + 0x7d, 0xa9, 0xa7, 0x00, 0x6a, 0x73, 0x9d, 0x76, 0x79, 0x3a, 0x4d, 0x41, 0x5d, 0x0e, 0xc0, 0x9f, + 0x6a, 0xa0, 0xd8, 0x8a, 0xef, 0xec, 0x09, 0x7e, 0xe0, 0xdf, 0x1c, 0xd1, 0xc6, 0xca, 0x8c, 0x4a, + 0x4e, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x6f, 0x1a, 0xd0, 0xb1, 0x29, 0x0a, 0x3c, 0xb6, 0x77, + 0x03, 0xcb, 0x0d, 0x49, 0x20, 0xfa, 0x4d, 0xaa, 0x17, 0xb9, 0x7b, 0xc3, 0xbd, 0x0b, 0xb3, 0xbd, + 0x6c, 0x6d, 0x49, 0x7a, 0xa7, 0xaf, 0x0e, 0x70, 0x03, 0x0d, 0x74, 0x90, 0x27, 0x5a, 0xd2, 0xd2, + 0xe8, 0x93, 0x23, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, 0xa0, 0x52, 0xd0, 0x95, 0xf7, + 0xf2, 0xd9, 0xa6, 0x3d, 0x7b, 0xe9, 0xc3, 0x0f, 0x84, 0xb3, 0x62, 0x29, 0x54, 0xd7, 0x78, 0x70, + 0xdf, 0x1e, 0xd1, 0xde, 0xab, 0x5b, 0x3b, 0x69, 0xbc, 0x14, 0x89, 0xa2, 0x94, 0x1f, 0xf0, 0x57, + 0x1a, 0x98, 0xc1, 0x86, 0x41, 0xfc, 0x90, 0x98, 0xa2, 0x16, 0xe7, 0x3e, 0x87, 0x72, 0x73, 0x4e, + 0x7a, 0x35, 0xb3, 0x9a, 0x86, 0x46, 0xdd, 0x9e, 0xc0, 0xe7, 0xc1, 0x69, 0x1a, 0x7a, 0x01, 0x31, + 0x33, 0x5d, 0x2e, 0xec, 0xb4, 0xcb, 0xa7, 0xeb, 0x5d, 0x1c, 0x94, 0x91, 0xac, 0x7c, 0x3a, 0x06, + 0xca, 0x77, 0x39, 0x19, 0xf7, 0xf0, 0x8e, 0x7a, 0x1c, 0x8c, 0xf3, 0xe5, 0x9a, 0x3c, 0x2a, 0xc5, + 0x54, 0xe7, 0xc6, 0xa9, 0x48, 0x72, 0x59, 0x5d, 0x67, 0xf8, 0xac, 0xdb, 0xc8, 0x73, 0x41, 0x55, + 0xd7, 0xeb, 0x82, 0x8c, 0x62, 0x3e, 0x7c, 0x07, 0x8c, 0x8b, 0x39, 0x09, 0x2f, 0xaa, 0x23, 0x2c, + 0x8c, 0x80, 0xfb, 0xc9, 0xa1, 0x90, 0x84, 0xec, 0x2d, 0x88, 0x85, 0x07, 0x5d, 0x10, 0xef, 0x58, + 0x81, 0xc6, 0xff, 0xcf, 0x2b, 0x50, 0xe5, 0xbf, 0x5a, 0xf6, 0xdc, 0xa7, 0x96, 0x5a, 0x37, 0xb0, + 0x4d, 0xe0, 0x3a, 0x98, 0x63, 0x8f, 0x0c, 0x44, 0x7c, 0xdb, 0x32, 0x30, 0xe5, 0x6f, 0x5c, 0x91, + 0x70, 0x6a, 0xec, 0x52, 0xcf, 0xf0, 0x51, 0x8f, 0x06, 0x7c, 0x09, 0x40, 0xd1, 0x78, 0x77, 0xd9, + 0x11, 0x3d, 0x84, 0x6a, 0xa1, 0xeb, 0x3d, 0x12, 0xa8, 0x8f, 0x16, 0x5c, 0x03, 0xf3, 0x36, 0xde, + 0x23, 0x76, 0x9d, 0xd8, 0xc4, 0x08, 0xbd, 0x80, 0x9b, 0x12, 0x53, 0x80, 0x73, 0x9d, 0x76, 0x79, + 0xfe, 0x6a, 0x96, 0x89, 0x7a, 0xe5, 0x2b, 0x17, 0xb3, 0xc7, 0x2b, 0xbd, 0x70, 0xf1, 0x9c, 0xf9, + 0x30, 0x07, 0x16, 0x07, 0x67, 0x06, 0xfc, 0x41, 0xf2, 0xea, 0x12, 0x4d, 0xf5, 0x9b, 0xa3, 0xca, + 0x42, 0xf9, 0xec, 0x02, 0xbd, 0x4f, 0x2e, 0xf8, 0x5d, 0xd6, 0xe1, 0x60, 0x3b, 0x9e, 0xf3, 0xbc, + 0x31, 0x32, 0x17, 0x18, 0x48, 0x6d, 0x52, 0x34, 0x4f, 0xd8, 0xe6, 0xbd, 0x12, 0xb6, 0x49, 0xe5, + 0x8f, 0x5a, 0xf6, 0xe1, 0x9d, 0x9c, 0x60, 0xf8, 0x33, 0x0d, 0xcc, 0x7a, 0x3e, 0x71, 0x57, 0x77, + 0x37, 0x5f, 0xf9, 0x8a, 0x38, 0xc9, 0x32, 0x54, 0xdb, 0xf7, 0xe9, 0xe7, 0x4b, 0xf5, 0x9d, 0x6d, + 0x61, 0x70, 0x37, 0xf0, 0x7c, 0x5a, 0x3b, 0xd3, 0x69, 0x97, 0x67, 0x77, 0xba, 0xa1, 0x50, 0x16, + 0xbb, 0xe2, 0x80, 0x73, 0x1b, 0x47, 0x21, 0x09, 0x5c, 0x6c, 0xaf, 0x7b, 0x46, 0xe4, 0x10, 0x37, + 0x14, 0x8e, 0x66, 0x86, 0x44, 0xda, 0x3d, 0x0e, 0x89, 0x1e, 0x01, 0xf9, 0x28, 0xb0, 0x65, 0x16, + 0x4f, 0xa9, 0x21, 0x28, 0xba, 0x8a, 0x18, 0xbd, 0x72, 0x11, 0x8c, 0x31, 0x3f, 0xe1, 0x79, 0x90, + 0x0f, 0xf0, 0x21, 0xb7, 0x3a, 0x5d, 0x9b, 0x60, 0x22, 0x08, 0x1f, 0x22, 0x46, 0xab, 0xfc, 0xbb, + 0x04, 0x66, 0x33, 0x6b, 0x81, 0x8b, 0x20, 0xa7, 0x26, 0xab, 0x40, 0x1a, 0xcd, 0x6d, 0xae, 0xa3, + 0x9c, 0x65, 0xc2, 0x67, 0x55, 0xf1, 0x15, 0xa0, 0x65, 0x55, 0xcf, 0x39, 0x95, 0xb5, 0xb4, 0x89, + 0x39, 0xe6, 0x48, 0x5c, 0x38, 0x99, 0x0f, 0xa4, 0x21, 0x4f, 0x89, 0xf0, 0x81, 0x34, 0x10, 0xa3, + 0x7d, 0xd6, 0x09, 0x59, 0x3c, 0xa2, 0x2b, 0xdc, 0xc3, 0x88, 0x6e, 0xfc, 0x8e, 0x23, 0xba, 0x47, + 0x41, 0x21, 0xb4, 0x42, 0x9b, 0xe8, 0x13, 0xdd, 0x2f, 0x8f, 0x6b, 0x8c, 0x88, 0x04, 0x0f, 0xde, + 0x04, 0x13, 0x26, 0x69, 0xe0, 0xc8, 0x0e, 0xf5, 0x22, 0x4f, 0xa1, 0xb5, 0x21, 0xa4, 0x90, 0x98, + 0x9f, 0xae, 0x0b, 0xbb, 0x28, 0x06, 0x80, 0x8f, 0x81, 0x09, 0x07, 0x1f, 0x59, 0x4e, 0xe4, 0xf0, + 0x9e, 0x4c, 0x13, 0x62, 0x5b, 0x82, 0x84, 0x62, 0x1e, 0xab, 0x8c, 0xe4, 0xc8, 0xb0, 0x23, 0x6a, + 0xb5, 0x88, 0x64, 0xea, 0x80, 0xdf, 0x9e, 0xaa, 0x32, 0x6e, 0x64, 0xf8, 0xa8, 0x47, 0x83, 0x83, + 0x59, 0x2e, 0x57, 0x9e, 0x4a, 0x81, 0x09, 0x12, 0x8a, 0x79, 0xdd, 0x60, 0x52, 0x7e, 0x7a, 0x10, + 0x98, 0x54, 0xee, 0xd1, 0x80, 0x5f, 0x06, 0x93, 0x0e, 0x3e, 0xba, 0x4a, 0xdc, 0x66, 0xb8, 0xaf, + 0xcf, 0x2c, 0x69, 0xcb, 0xf9, 0xda, 0x4c, 0xa7, 0x5d, 0x9e, 0xdc, 0x8a, 0x89, 0x28, 0xe1, 0x73, + 0x61, 0xcb, 0x95, 0xc2, 0xa7, 0x53, 0xc2, 0x31, 0x11, 0x25, 0x7c, 0xd6, 0x41, 0xf8, 0x38, 0x64, + 0x87, 0x4b, 0x9f, 0xed, 0x7e, 0x19, 0xee, 0x0a, 0x32, 0x8a, 0xf9, 0x70, 0x19, 0x14, 0x1d, 0x7c, + 0xc4, 0x5f, 0xf1, 0xfa, 0x1c, 0x37, 0xcb, 0x67, 0xc9, 0x5b, 0x92, 0x86, 0x14, 0x97, 0x4b, 0x5a, + 0xae, 0x90, 0x9c, 0x4f, 0x49, 0x4a, 0x1a, 0x52, 0x5c, 0x96, 0xc4, 0x91, 0x6b, 0xdd, 0x8a, 0x88, + 0x10, 0x86, 0x3c, 0x32, 0x2a, 0x89, 0xaf, 0x27, 0x2c, 0x94, 0x96, 0x63, 0xaf, 0x68, 0x27, 0xb2, + 0x43, 0xcb, 0xb7, 0xc9, 0x4e, 0x43, 0x3f, 0xc3, 0xe3, 0xcf, 0xfb, 0xe4, 0x2d, 0x45, 0x45, 0x29, + 0x09, 0x48, 0xc0, 0x18, 0x71, 0x23, 0x47, 0x3f, 0xcb, 0x2f, 0xf6, 0xa1, 0xa4, 0xa0, 0x3a, 0x39, + 0x1b, 0x6e, 0xe4, 0x20, 0x6e, 0x1e, 0x3e, 0x0b, 0x66, 0x1c, 0x7c, 0xc4, 0xca, 0x01, 0x09, 0x42, + 0xf6, 0xbe, 0x3f, 0xc7, 0x17, 0x3f, 0xcf, 0x3a, 0xce, 0xad, 0x34, 0x03, 0x75, 0xcb, 0x71, 0x45, + 0xcb, 0x4d, 0x29, 0x2e, 0xa4, 0x14, 0xd3, 0x0c, 0xd4, 0x2d, 0xc7, 0x22, 0x1d, 0x90, 0x5b, 0x91, + 0x15, 0x10, 0x53, 0x7f, 0x88, 0x37, 0xa9, 0x72, 0xbe, 0x2f, 0x68, 0x48, 0x71, 0x61, 0x2b, 0x1e, + 0xf7, 0xe8, 0xfc, 0x18, 0x5e, 0x1f, 0x6e, 0x25, 0xdf, 0x09, 0x56, 0x83, 0x00, 0x1f, 0x8b, 0x9b, + 0x26, 0x3d, 0xe8, 0x81, 0x14, 0x14, 0xb0, 0x6d, 0xef, 0x34, 0xf4, 0xf3, 0x3c, 0xf6, 0xc3, 0xbe, + 0x41, 0x54, 0xd5, 0x59, 0x65, 0x20, 0x48, 0x60, 0x31, 0x50, 0xcf, 0x65, 0xa9, 0xb1, 0x38, 0x5a, + 0xd0, 0x1d, 0x06, 0x82, 0x04, 0x16, 0x5f, 0xa9, 0x7b, 0xbc, 0xd3, 0xd0, 0x1f, 0x1e, 0xf1, 0x4a, + 0x19, 0x08, 0x12, 0x58, 0xd0, 0x02, 0x79, 0xd7, 0x0b, 0xf5, 0x0b, 0x23, 0xb9, 0x9e, 0xf9, 0x85, + 0xb3, 0xed, 0x85, 0x88, 0x61, 0xc0, 0x5f, 0x6a, 0x00, 0xf8, 0x49, 0x8a, 0x3e, 0x32, 0x94, 0x29, + 0x42, 0x06, 0xb2, 0x9a, 0xe4, 0xf6, 0x86, 0x1b, 0x06, 0xc7, 0xc9, 0x3b, 0x32, 0x75, 0x06, 0x52, + 0x5e, 0xc0, 0xdf, 0x69, 0xe0, 0x6c, 0xba, 0x4d, 0x56, 0xee, 0x95, 0x78, 0x44, 0xae, 0x0d, 0x3b, + 0xcd, 0x6b, 0x9e, 0x67, 0xd7, 0xf4, 0x4e, 0xbb, 0x7c, 0x76, 0xb5, 0x0f, 0x2a, 0xea, 0xeb, 0x0b, + 0xfc, 0x93, 0x06, 0xe6, 0x65, 0x15, 0x4d, 0x79, 0x58, 0xe6, 0x01, 0x24, 0xc3, 0x0e, 0x60, 0x16, + 0x47, 0xc4, 0x51, 0xfd, 0x2e, 0xdd, 0xc3, 0x47, 0xbd, 0xae, 0xc1, 0xbf, 0x6a, 0x60, 0xda, 0x24, + 0x3e, 0x71, 0x4d, 0xe2, 0x1a, 0xcc, 0xd7, 0xa5, 0xa1, 0x8c, 0x0d, 0xb2, 0xbe, 0xae, 0xa7, 0x20, + 0x84, 0x9b, 0x55, 0xe9, 0xe6, 0x74, 0x9a, 0x75, 0xd2, 0x2e, 0x2f, 0x24, 0xaa, 0x69, 0x0e, 0xea, + 0xf2, 0x12, 0xbe, 0xaf, 0x81, 0xd9, 0x64, 0x03, 0xc4, 0x95, 0x72, 0x71, 0x84, 0x79, 0xc0, 0xdb, + 0xd7, 0xd5, 0x6e, 0x40, 0x94, 0xf5, 0x00, 0xfe, 0x59, 0x63, 0x9d, 0x5a, 0xfc, 0xee, 0xa3, 0x7a, + 0x85, 0xc7, 0xf2, 0xad, 0xa1, 0xc7, 0x52, 0x21, 0x88, 0x50, 0x5e, 0x4a, 0x5a, 0x41, 0xc5, 0x39, + 0x69, 0x97, 0xcf, 0xa5, 0x23, 0xa9, 0x18, 0x28, 0xed, 0x21, 0xfc, 0x89, 0x06, 0xa6, 0x49, 0xd2, + 0x71, 0x53, 0xfd, 0xd1, 0xa1, 0x04, 0xb1, 0x6f, 0x13, 0x2f, 0x5e, 0xea, 0x29, 0x16, 0x45, 0x5d, + 0xd8, 0xac, 0x83, 0x24, 0x47, 0xd8, 0xf1, 0x6d, 0xa2, 0x7f, 0x61, 0xc8, 0x1d, 0xe4, 0x86, 0xb0, + 0x8b, 0x62, 0x00, 0x78, 0x09, 0x14, 0xdd, 0xc8, 0xb6, 0xf1, 0x9e, 0x4d, 0xf4, 0xc7, 0x78, 0x2f, + 0xa2, 0xa6, 0x98, 0xdb, 0x92, 0x8e, 0x94, 0x04, 0x6c, 0x80, 0xa5, 0xa3, 0x97, 0xd5, 0x7f, 0xf4, + 0xec, 0x06, 0x84, 0xe3, 0x5f, 0x77, 0x0f, 0x5c, 0xef, 0xd0, 0x7d, 0xc1, 0x22, 0xb6, 0x49, 0xf5, + 0xc7, 0xb9, 0x95, 0xc5, 0x4e, 0xbb, 0xbc, 0x70, 0xa3, 0xaf, 0x04, 0xba, 0xab, 0x0d, 0xf8, 0x1a, + 0x78, 0x38, 0x25, 0xb3, 0xe1, 0xec, 0x11, 0xd3, 0x24, 0x66, 0xfc, 0x70, 0xd3, 0xbf, 0xc8, 0x21, + 0xd4, 0x01, 0xbf, 0x91, 0x15, 0x40, 0x77, 0xd2, 0x86, 0x57, 0xc1, 0x42, 0x8a, 0xbd, 0xe9, 0x86, + 0x3b, 0x41, 0x3d, 0x0c, 0x2c, 0xb7, 0xa9, 0x2f, 0x73, 0xbb, 0x67, 0xe3, 0x13, 0x79, 0x23, 0xc5, + 0x43, 0x03, 0x74, 0x16, 0xd9, 0xd3, 0x31, 0x53, 0x7a, 0xe0, 0x1c, 0xc8, 0x1f, 0x10, 0xf9, 0x0b, + 0x39, 0x62, 0x7f, 0x42, 0x13, 0x14, 0x5a, 0xd8, 0x8e, 0xe2, 0xd7, 0xef, 0x90, 0xaf, 0x2d, 0x24, + 0x8c, 0x3f, 0x9f, 0x7b, 0x4e, 0x5b, 0xfc, 0x40, 0x03, 0x0b, 0xfd, 0x2b, 0xe2, 0x03, 0x75, 0xeb, + 0x37, 0x1a, 0x98, 0xef, 0x29, 0x7e, 0x7d, 0x3c, 0xba, 0xd5, 0xed, 0xd1, 0x6b, 0xc3, 0xae, 0x62, + 0x62, 0xd7, 0x78, 0xeb, 0x96, 0x76, 0xef, 0xe7, 0x1a, 0x98, 0xcb, 0xd6, 0x93, 0x07, 0x19, 0xaf, + 0xca, 0x07, 0x39, 0xb0, 0xd0, 0xbf, 0xe3, 0x84, 0x81, 0x7a, 0x5a, 0x8f, 0x66, 0x44, 0xd1, 0x6f, + 0x9c, 0xf9, 0xae, 0x06, 0xa6, 0x6e, 0x2a, 0xb9, 0xf8, 0x17, 0xd4, 0xa1, 0x0f, 0x47, 0xe2, 0x02, + 0x9e, 0x30, 0x28, 0x4a, 0xe3, 0x56, 0xfe, 0xa2, 0x81, 0x73, 0x7d, 0x6f, 0x26, 0xf6, 0x86, 0xc7, + 0xb6, 0xed, 0x1d, 0x8a, 0x19, 0x57, 0x6a, 0x80, 0xbc, 0xca, 0xa9, 0x48, 0x72, 0x53, 0xd1, 0xcb, + 0x7d, 0x5e, 0xd1, 0xab, 0xfc, 0x5d, 0x03, 0x17, 0xee, 0x94, 0x89, 0x0f, 0x64, 0x4b, 0x97, 0x41, + 0x51, 0x76, 0x95, 0xc7, 0x7c, 0x3b, 0xe5, 0x43, 0x4a, 0x16, 0x0d, 0xfe, 0x4f, 0x43, 0xe2, 0xaf, + 0xca, 0x87, 0x1a, 0x98, 0xab, 0x93, 0xa0, 0x65, 0x19, 0x04, 0x91, 0x06, 0x09, 0x88, 0x6b, 0x10, + 0xb8, 0x02, 0x26, 0xf9, 0x4f, 0x97, 0x3e, 0x36, 0xe2, 0xb9, 0xfe, 0xbc, 0x0c, 0xf9, 0xe4, 0x76, + 0xcc, 0x40, 0x89, 0x8c, 0xfa, 0x0d, 0x20, 0x37, 0xf0, 0x37, 0x80, 0x0b, 0x60, 0xcc, 0x4f, 0x26, + 0xa4, 0x45, 0xc6, 0xe5, 0x43, 0x51, 0x4e, 0xe5, 0x5c, 0x2f, 0x08, 0xf9, 0xd8, 0xa7, 0x20, 0xb9, + 0x5e, 0x10, 0x22, 0x4e, 0xad, 0xfc, 0x43, 0x03, 0xfd, 0xfe, 0xbd, 0x07, 0xb6, 0xc0, 0x04, 0x15, + 0xae, 0xcb, 0xd0, 0xee, 0xdc, 0x67, 0x68, 0xb3, 0x81, 0x10, 0xf7, 0x6a, 0x4c, 0x8d, 0xc1, 0x58, + 0x74, 0x0d, 0x5c, 0x8b, 0x5c, 0x53, 0x4e, 0x3c, 0xa7, 0x45, 0x74, 0xd7, 0x56, 0x05, 0x0d, 0x29, + 0x2e, 0x3c, 0x2f, 0x66, 0x73, 0xa9, 0x81, 0x57, 0x3c, 0x97, 0xab, 0x5d, 0xfe, 0xe8, 0x76, 0xe9, + 0xd4, 0xc7, 0xb7, 0x4b, 0xa7, 0x3e, 0xb9, 0x5d, 0x3a, 0xf5, 0xbd, 0x4e, 0x49, 0xfb, 0xa8, 0x53, + 0xd2, 0x3e, 0xee, 0x94, 0xb4, 0x4f, 0x3a, 0x25, 0xed, 0x5f, 0x9d, 0x92, 0xf6, 0x8b, 0x4f, 0x4b, + 0xa7, 0xbe, 0x35, 0x21, 0x5d, 0xfb, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe1, 0x05, 0x6b, 0x7f, + 0x74, 0x2b, 0x00, 0x00, } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 00a558a7fc9..13ce5e288e6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -938,7 +938,7 @@ func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JS out.Example = nil } out.Nullable = in.Nullable - out.XPreserveUnknownFields = in.XPreserveUnknownFields + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) out.XEmbeddedResource = in.XEmbeddedResource out.XIntOrString = in.XIntOrString return nil @@ -1123,7 +1123,7 @@ func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *ap } else { out.Example = nil } - out.XPreserveUnknownFields = in.XPreserveUnknownFields + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) out.XEmbeddedResource = in.XEmbeddedResource out.XIntOrString = in.XIntOrString return nil From 2001feead2625803557e3d789ec0ae66e77966f8 Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Tue, 14 May 2019 05:48:21 +0800 Subject: [PATCH 142/194] Move the array of plugin names to inside the last if block in VolumePluginMgr#FindPluginBySpec --- pkg/volume/plugins.go | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 2fc776b50b2..d64a11f17aa 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -647,19 +647,16 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) { return nil, fmt.Errorf("Could not find plugin because volume spec is nil") } - matchedPluginNames := []string{} matches := []VolumePlugin{} - for k, v := range pm.plugins { + for _, v := range pm.plugins { if v.CanSupport(spec) { - matchedPluginNames = append(matchedPluginNames, k) matches = append(matches, v) } } pm.refreshProbedPlugins() - for pluginName, plugin := range pm.probedPlugins { + for _, plugin := range pm.probedPlugins { if plugin.CanSupport(spec) { - matchedPluginNames = append(matchedPluginNames, pluginName) matches = append(matches, plugin) } } @@ -668,6 +665,10 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) { return nil, fmt.Errorf("no volume plugin matched") } if len(matches) > 1 { + matchedPluginNames := []string{} + for _, plugin := range matches { + matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName()) + } return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ",")) } return matches[0], nil @@ -684,11 +685,9 @@ func (pm *VolumePluginMgr) IsPluginMigratableBySpec(spec *Spec) (bool, error) { return false, fmt.Errorf("could not find if plugin is migratable because volume spec is nil") } - matchedPluginNames := []string{} matches := []VolumePlugin{} - for k, v := range pm.plugins { + for _, v := range pm.plugins { if v.CanSupport(spec) { - matchedPluginNames = append(matchedPluginNames, k) matches = append(matches, v) } } @@ -698,6 +697,10 @@ func (pm *VolumePluginMgr) IsPluginMigratableBySpec(spec *Spec) (bool, error) { return false, nil } if len(matches) > 1 { + matchedPluginNames := []string{} + for _, plugin := range matches { + matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName()) + } return false, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ",")) } @@ -711,16 +714,13 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { defer pm.mutex.Unlock() // Once we can get rid of legacy names we can reduce this to a map lookup. - matchedPluginNames := []string{} matches := []VolumePlugin{} if v, found := pm.plugins[name]; found { - matchedPluginNames = append(matchedPluginNames, name) matches = append(matches, v) } pm.refreshProbedPlugins() if plugin, found := pm.probedPlugins[name]; found { - matchedPluginNames = append(matchedPluginNames, name) matches = append(matches, plugin) } @@ -728,6 +728,10 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { return nil, fmt.Errorf("no volume plugin matched") } if len(matches) > 1 { + matchedPluginNames := []string{} + for _, plugin := range matches { + matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName()) + } return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ",")) } return matches[0], nil From f25efd12e63f1d7db5f29fe28831ad0126200c0b Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Mon, 13 May 2019 14:13:00 -0400 Subject: [PATCH 143/194] PartialObjectMetadataList should nest values, not pointers for Items Typo during setting up PartialObjectMetadataList, it should be a slice of `PartialObjectMetadata`, not a slice of `*PartialObjectMetadata`. --- .../tableconvertor/tableconvertor_test.go | 2 +- .../pkg/apis/meta/v1/generated.pb.go | 350 +++++++++--------- .../apimachinery/pkg/apis/meta/v1/types.go | 2 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 8 +- .../pkg/apis/meta/v1beta1/generated.pb.go | 44 +-- .../pkg/apis/meta/v1beta1/types.go | 2 +- .../meta/v1beta1/zz_generated.deepcopy.go | 8 +- .../apiserver/pkg/endpoints/apiserver_test.go | 2 +- .../pkg/endpoints/handlers/response.go | 4 +- 9 files changed, 207 insertions(+), 215 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go index 092aae18e16..1b808c70be0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go @@ -123,7 +123,7 @@ func Test_convertor_ConvertToTable(t *testing.T) { }, args: args{ obj: &metav1beta1.PartialObjectMetadataList{ - Items: []*metav1beta1.PartialObjectMetadata{ + Items: []metav1beta1.PartialObjectMetadata{ {ObjectMeta: metav1.ObjectMeta{Name: "blah", CreationTimestamp: metav1.NewTime(time.Unix(1, 0))}}, {ObjectMeta: metav1.ObjectMeta{Name: "blah-2", CreationTimestamp: metav1.NewTime(time.Unix(2, 0))}}, }, diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index c1152943bd5..a8603197e0e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -3057,7 +3057,7 @@ func (this *PartialObjectMetadataList) String() string { } s := strings.Join([]string{`&PartialObjectMetadataList{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -7766,7 +7766,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &PartialObjectMetadata{}) + m.Items = append(m.Items, PartialObjectMetadata{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9521,182 +9521,182 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2825 bytes of a gzipped FileDescriptorProto + // 2823 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0xcf, 0x6f, 0x23, 0x57, 0x39, 0x63, 0xc7, 0x8e, 0xfd, 0x39, 0xde, 0x4d, 0xde, 0xee, 0x82, 0x1b, 0x44, 0x9c, 0x4e, 0x51, 0xb5, 0x85, 0xad, 0xd3, 0xdd, 0xd2, 0x6a, 0xd9, 0xd2, 0x42, 0x9c, 0x1f, 0xdb, 0xd0, 0x4d, 0x13, 0xbd, 0xec, 0x2e, 0x62, 0x59, 0xa1, 0x4e, 0x3c, 0x2f, 0xce, 0x90, 0xf1, 0x8c, 0xfb, 0xde, 0x38, 0xbb, 0x86, 0x03, 0x3d, 0x80, 0x00, 0x09, 0xaa, 0x1e, 0x39, 0xa1, 0x56, 0xf0, 0x17, 0x70, 0xe2, - 0xc4, 0xa9, 0x12, 0xbd, 0x20, 0x55, 0xe2, 0x40, 0x0f, 0xc8, 0x6a, 0x03, 0x12, 0xdc, 0x38, 0x71, - 0xc9, 0x09, 0xbd, 0x5f, 0x33, 0x6f, 0xec, 0x78, 0x33, 0x66, 0x4b, 0xc5, 0xc9, 0x33, 0xdf, 0xcf, - 0xf7, 0xbe, 0xf7, 0xbd, 0xef, 0xd7, 0x18, 0xb6, 0x0e, 0xaf, 0xb3, 0x86, 0x17, 0x2e, 0x1f, 0xf6, - 0xf6, 0x08, 0x0d, 0x48, 0x44, 0xd8, 0xf2, 0x11, 0x09, 0xdc, 0x90, 0x2e, 0x2b, 0x84, 0xd3, 0xf5, - 0x3a, 0x4e, 0xeb, 0xc0, 0x0b, 0x08, 0xed, 0x2f, 0x77, 0x0f, 0xdb, 0x1c, 0xc0, 0x96, 0x3b, 0x24, - 0x72, 0x96, 0x8f, 0xae, 0x2e, 0xb7, 0x49, 0x40, 0xa8, 0x13, 0x11, 0xb7, 0xd1, 0xa5, 0x61, 0x14, - 0xa2, 0x2f, 0x49, 0xae, 0x86, 0xc9, 0xd5, 0xe8, 0x1e, 0xb6, 0x39, 0x80, 0x35, 0x38, 0x57, 0xe3, - 0xe8, 0xea, 0xc2, 0xb3, 0x6d, 0x2f, 0x3a, 0xe8, 0xed, 0x35, 0x5a, 0x61, 0x67, 0xb9, 0x1d, 0xb6, - 0xc3, 0x65, 0xc1, 0xbc, 0xd7, 0xdb, 0x17, 0x6f, 0xe2, 0x45, 0x3c, 0x49, 0xa1, 0x0b, 0x63, 0x97, - 0x42, 0x7b, 0x41, 0xe4, 0x75, 0xc8, 0xf0, 0x2a, 0x16, 0x5e, 0x3c, 0x8b, 0x81, 0xb5, 0x0e, 0x48, - 0xc7, 0x19, 0xe6, 0xb3, 0xff, 0x98, 0x87, 0xd2, 0xca, 0xce, 0xe6, 0x4d, 0x1a, 0xf6, 0xba, 0x68, - 0x09, 0xa6, 0x03, 0xa7, 0x43, 0x6a, 0xd6, 0x92, 0x75, 0xb9, 0xdc, 0x9c, 0xfd, 0x60, 0x50, 0x9f, - 0x3a, 0x1e, 0xd4, 0xa7, 0x5f, 0x77, 0x3a, 0x04, 0x0b, 0x0c, 0xf2, 0xa1, 0x74, 0x44, 0x28, 0xf3, - 0xc2, 0x80, 0xd5, 0x72, 0x4b, 0xf9, 0xcb, 0x95, 0x6b, 0xaf, 0x34, 0xb2, 0xec, 0xbf, 0x21, 0x14, - 0xdc, 0x95, 0xac, 0x1b, 0x21, 0x5d, 0xf3, 0x58, 0x2b, 0x3c, 0x22, 0xb4, 0xdf, 0x9c, 0x53, 0x5a, - 0x4a, 0x0a, 0xc9, 0x70, 0xac, 0x01, 0xfd, 0xd8, 0x82, 0xb9, 0x2e, 0x25, 0xfb, 0x84, 0x52, 0xe2, - 0x2a, 0x7c, 0x2d, 0xbf, 0x64, 0x7d, 0x0a, 0x6a, 0x6b, 0x4a, 0xed, 0xdc, 0xce, 0x90, 0x7c, 0x3c, - 0xa2, 0x11, 0xfd, 0xc6, 0x82, 0x05, 0x46, 0xe8, 0x11, 0xa1, 0x2b, 0xae, 0x4b, 0x09, 0x63, 0xcd, - 0xfe, 0xaa, 0xef, 0x91, 0x20, 0x5a, 0xdd, 0x5c, 0xc3, 0xac, 0x36, 0x2d, 0xec, 0xf0, 0x8d, 0x6c, - 0x0b, 0xda, 0x1d, 0x27, 0xa7, 0x69, 0xab, 0x15, 0x2d, 0x8c, 0x25, 0x61, 0xf8, 0x11, 0xcb, 0xb0, - 0xf7, 0x61, 0x56, 0x1f, 0xe4, 0x2d, 0x8f, 0x45, 0xe8, 0x2e, 0x14, 0xdb, 0xfc, 0x85, 0xd5, 0x2c, - 0xb1, 0xc0, 0x46, 0xb6, 0x05, 0x6a, 0x19, 0xcd, 0x73, 0x6a, 0x3d, 0x45, 0xf1, 0xca, 0xb0, 0x92, - 0x66, 0xff, 0x7c, 0x1a, 0x2a, 0x2b, 0x3b, 0x9b, 0x98, 0xb0, 0xb0, 0x47, 0x5b, 0x24, 0x83, 0xd3, - 0x5c, 0x03, 0xe0, 0xbf, 0xac, 0xeb, 0xb4, 0x88, 0x5b, 0xcb, 0x2d, 0x59, 0x97, 0x4b, 0x4d, 0xa4, - 0xe8, 0xe0, 0xf5, 0x18, 0x83, 0x0d, 0x2a, 0x2e, 0xf5, 0xd0, 0x0b, 0x5c, 0x71, 0xda, 0x86, 0xd4, - 0xd7, 0xbc, 0xc0, 0xc5, 0x02, 0x83, 0x6e, 0x41, 0xe1, 0x88, 0xd0, 0x3d, 0x6e, 0x7f, 0xee, 0x10, - 0x5f, 0xc9, 0xb6, 0xbd, 0xbb, 0x9c, 0xa5, 0x59, 0x3e, 0x1e, 0xd4, 0x0b, 0xe2, 0x11, 0x4b, 0x21, - 0xa8, 0x01, 0xc0, 0x0e, 0x42, 0x1a, 0x89, 0xe5, 0xd4, 0x0a, 0x4b, 0xf9, 0xcb, 0xe5, 0xe6, 0x39, - 0xbe, 0xbe, 0xdd, 0x18, 0x8a, 0x0d, 0x0a, 0x74, 0x1d, 0x66, 0x99, 0x17, 0xb4, 0x7b, 0xbe, 0x43, - 0x39, 0xa0, 0x56, 0x14, 0xeb, 0xbc, 0xa8, 0xd6, 0x39, 0xbb, 0x6b, 0xe0, 0x70, 0x8a, 0x92, 0x6b, - 0x6a, 0x39, 0x11, 0x69, 0x87, 0xd4, 0x23, 0xac, 0x36, 0x93, 0x68, 0x5a, 0x8d, 0xa1, 0xd8, 0xa0, - 0x40, 0x4f, 0x41, 0x41, 0x58, 0xbe, 0x56, 0x12, 0x2a, 0xaa, 0x4a, 0x45, 0x41, 0x1c, 0x0b, 0x96, - 0x38, 0xf4, 0x0c, 0xcc, 0xa8, 0x5b, 0x53, 0x2b, 0x0b, 0xb2, 0xf3, 0x8a, 0x6c, 0x46, 0xbb, 0xb5, - 0xc6, 0xa3, 0x6f, 0x01, 0x62, 0x51, 0x48, 0x9d, 0x36, 0x51, 0xa8, 0x57, 0x1d, 0x76, 0x50, 0x03, - 0xc1, 0xb5, 0xa0, 0xb8, 0xd0, 0xee, 0x08, 0x05, 0x3e, 0x85, 0xcb, 0xfe, 0x9d, 0x05, 0xe7, 0x0d, - 0x5f, 0x10, 0x7e, 0x77, 0x1d, 0x66, 0xdb, 0xc6, 0xad, 0x53, 0x7e, 0x11, 0x5b, 0xc6, 0xbc, 0x91, - 0x38, 0x45, 0x89, 0x08, 0x94, 0xa9, 0x92, 0xa4, 0xa3, 0xcb, 0xd5, 0xcc, 0x4e, 0xab, 0xd7, 0x90, - 0x68, 0x32, 0x80, 0x0c, 0x27, 0x92, 0xed, 0x7f, 0x58, 0xc2, 0x81, 0x75, 0xbc, 0x41, 0x97, 0x8d, - 0x98, 0x66, 0x89, 0xe3, 0x98, 0x1d, 0x13, 0x8f, 0xce, 0x08, 0x04, 0xb9, 0xff, 0x8b, 0x40, 0x70, - 0xa3, 0xf4, 0xab, 0x77, 0xeb, 0x53, 0x6f, 0xfd, 0x75, 0x69, 0xca, 0xee, 0x40, 0x75, 0x95, 0x12, - 0x27, 0x22, 0xdb, 0xdd, 0x48, 0x6c, 0xc0, 0x86, 0xa2, 0x4b, 0xfb, 0xb8, 0x17, 0xa8, 0x8d, 0x02, - 0xbf, 0xdf, 0x6b, 0x02, 0x82, 0x15, 0x86, 0x9f, 0xdf, 0xbe, 0x47, 0x7c, 0x77, 0xcb, 0x09, 0x9c, - 0x36, 0xa1, 0xea, 0x06, 0xc6, 0x56, 0xdd, 0x30, 0x70, 0x38, 0x45, 0x69, 0xff, 0x34, 0x0f, 0xd5, - 0x35, 0xe2, 0x93, 0x44, 0xdf, 0x06, 0xa0, 0x36, 0x75, 0x5a, 0x64, 0x87, 0x50, 0x2f, 0x74, 0x77, - 0x49, 0x2b, 0x0c, 0x5c, 0x26, 0x3c, 0x22, 0xdf, 0xfc, 0x1c, 0xf7, 0xb3, 0x9b, 0x23, 0x58, 0x7c, - 0x0a, 0x07, 0xf2, 0xa1, 0xda, 0xa5, 0xe2, 0xd9, 0x8b, 0x54, 0xee, 0xe1, 0x77, 0xfe, 0xf9, 0x6c, - 0xa6, 0xde, 0x31, 0x59, 0x9b, 0xf3, 0xc7, 0x83, 0x7a, 0x35, 0x05, 0xc2, 0x69, 0xe1, 0xe8, 0x9b, - 0x30, 0x17, 0xd2, 0xee, 0x81, 0x13, 0xac, 0x91, 0x2e, 0x09, 0x5c, 0x12, 0x44, 0x4c, 0x58, 0xa1, - 0xd4, 0xbc, 0xc8, 0x33, 0xc6, 0xf6, 0x10, 0x0e, 0x8f, 0x50, 0xa3, 0x7b, 0x30, 0xdf, 0xa5, 0x61, - 0xd7, 0x69, 0x3b, 0x5c, 0xe2, 0x4e, 0xe8, 0x7b, 0xad, 0xbe, 0x88, 0x53, 0xe5, 0xe6, 0x95, 0xe3, - 0x41, 0x7d, 0x7e, 0x67, 0x18, 0x79, 0x32, 0xa8, 0x5f, 0x10, 0xa6, 0xe3, 0x90, 0x04, 0x89, 0x47, - 0xc5, 0x18, 0x67, 0x58, 0x18, 0x77, 0x86, 0xf6, 0x26, 0x94, 0xd6, 0x7a, 0x54, 0x70, 0xa1, 0x97, - 0xa1, 0xe4, 0xaa, 0x67, 0x65, 0xf9, 0x27, 0x75, 0xca, 0xd5, 0x34, 0x27, 0x83, 0x7a, 0x95, 0x17, - 0x09, 0x0d, 0x0d, 0xc0, 0x31, 0x8b, 0x7d, 0x1f, 0xaa, 0xeb, 0x0f, 0xbb, 0x21, 0x8d, 0xf4, 0x99, - 0x3e, 0x0d, 0x45, 0x22, 0x00, 0x42, 0x5a, 0x29, 0xc9, 0x13, 0x92, 0x0c, 0x2b, 0x2c, 0x8f, 0x5b, - 0xe4, 0xa1, 0xd3, 0x8a, 0x54, 0xc0, 0x8f, 0xe3, 0xd6, 0x3a, 0x07, 0x62, 0x89, 0xb3, 0xdf, 0xb7, - 0xa0, 0x28, 0x3c, 0x8a, 0xa1, 0xdb, 0x90, 0xef, 0x38, 0x5d, 0x95, 0xac, 0x5e, 0xc8, 0x76, 0xb2, - 0x92, 0xb5, 0xb1, 0xe5, 0x74, 0xd7, 0x83, 0x88, 0xf6, 0x9b, 0x15, 0xa5, 0x24, 0xbf, 0xe5, 0x74, - 0x31, 0x17, 0xb7, 0xe0, 0x42, 0x49, 0x63, 0xd1, 0x1c, 0xe4, 0x0f, 0x49, 0x5f, 0x06, 0x24, 0xcc, - 0x1f, 0x51, 0x13, 0x0a, 0x47, 0x8e, 0xdf, 0x23, 0xca, 0x9f, 0xae, 0x4c, 0xa2, 0x15, 0x4b, 0xd6, - 0x1b, 0xb9, 0xeb, 0x96, 0xbd, 0x0d, 0x70, 0x93, 0xc4, 0x16, 0x5a, 0x81, 0xf3, 0x3a, 0xda, 0xa4, - 0x83, 0xe0, 0xe7, 0xd5, 0xf2, 0xce, 0xe3, 0x34, 0x1a, 0x0f, 0xd3, 0xdb, 0xf7, 0xa1, 0x2c, 0x02, - 0x25, 0xcf, 0x77, 0x49, 0x06, 0xb0, 0x1e, 0x91, 0x01, 0x74, 0xc2, 0xcc, 0x8d, 0x4b, 0x98, 0x46, - 0x5c, 0xf0, 0xa1, 0x2a, 0x79, 0x75, 0x0e, 0xcf, 0xa4, 0xe1, 0x0a, 0x94, 0xf4, 0x32, 0x95, 0x96, - 0xb8, 0x76, 0xd3, 0x82, 0x70, 0x4c, 0x61, 0x68, 0x3b, 0x80, 0x54, 0xd0, 0xcf, 0xa6, 0xcc, 0x48, - 0x68, 0xb9, 0x47, 0x27, 0x34, 0x43, 0xd3, 0x8f, 0xa0, 0x36, 0xae, 0xe0, 0x7b, 0x8c, 0xb4, 0x94, - 0x7d, 0x29, 0xf6, 0xdb, 0x16, 0xcc, 0x99, 0x92, 0xb2, 0x1f, 0x5f, 0x76, 0x25, 0x67, 0x97, 0x46, - 0x86, 0x45, 0x7e, 0x6d, 0xc1, 0xc5, 0xd4, 0xd6, 0x26, 0x3a, 0xf1, 0x09, 0x16, 0x65, 0x3a, 0x47, - 0x7e, 0x02, 0xe7, 0x58, 0x86, 0xca, 0x66, 0xe0, 0x45, 0x9e, 0xe3, 0x7b, 0x3f, 0x20, 0xf4, 0xec, - 0x62, 0xd2, 0xfe, 0x83, 0x05, 0xb3, 0x06, 0x07, 0x43, 0xf7, 0x61, 0x86, 0xc7, 0x5d, 0x2f, 0x68, - 0xab, 0xd8, 0x91, 0xb1, 0x66, 0x30, 0x84, 0x24, 0xfb, 0xda, 0x91, 0x92, 0xb0, 0x16, 0x89, 0x76, - 0xa0, 0x48, 0x09, 0xeb, 0xf9, 0xd1, 0x64, 0x21, 0x62, 0x37, 0x72, 0xa2, 0x1e, 0x93, 0xb1, 0x19, - 0x0b, 0x7e, 0xac, 0xe4, 0xd8, 0x7f, 0xce, 0x41, 0xf5, 0x96, 0xb3, 0x47, 0xfc, 0x5d, 0xe2, 0x93, - 0x56, 0x14, 0x52, 0xf4, 0x43, 0xa8, 0x74, 0x9c, 0xa8, 0x75, 0x20, 0xa0, 0xba, 0x5c, 0x5f, 0xcb, - 0xa6, 0x28, 0x25, 0xa9, 0xb1, 0x95, 0x88, 0x91, 0x01, 0xf1, 0x82, 0xda, 0x58, 0xc5, 0xc0, 0x60, - 0x53, 0x9b, 0xe8, 0xb1, 0xc4, 0xfb, 0xfa, 0xc3, 0x2e, 0xaf, 0x25, 0x26, 0x6f, 0xed, 0x52, 0x4b, - 0xc0, 0xe4, 0xcd, 0x9e, 0x47, 0x49, 0x87, 0x04, 0x51, 0xd2, 0x63, 0x6d, 0x0d, 0xc9, 0xc7, 0x23, - 0x1a, 0x17, 0x5e, 0x81, 0xb9, 0xe1, 0xc5, 0x9f, 0x12, 0xaf, 0x2f, 0x9a, 0xf1, 0xba, 0x6c, 0x46, - 0xe0, 0xdf, 0x5a, 0x50, 0x1b, 0xb7, 0x10, 0xf4, 0x45, 0x43, 0x50, 0x92, 0x23, 0x5e, 0x23, 0x7d, - 0x29, 0x75, 0x1d, 0x4a, 0x61, 0x97, 0x77, 0xc5, 0x21, 0x55, 0x7e, 0xfe, 0x8c, 0xf6, 0xdd, 0x6d, - 0x05, 0x3f, 0x19, 0xd4, 0x2f, 0xa5, 0xc4, 0x6b, 0x04, 0x8e, 0x59, 0x79, 0x62, 0x16, 0xeb, 0xe1, - 0xc5, 0x42, 0x9c, 0x98, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xfb, 0xf7, 0x16, 0x4c, 0x8b, 0x2a, 0xf9, - 0x3e, 0x94, 0xb8, 0xfd, 0x5c, 0x27, 0x72, 0xc4, 0xba, 0x32, 0xf7, 0x67, 0x9c, 0x7b, 0x8b, 0x44, - 0x4e, 0x72, 0xbf, 0x34, 0x04, 0xc7, 0x12, 0x11, 0x86, 0x82, 0x17, 0x91, 0x8e, 0x3e, 0xc8, 0x67, - 0xc7, 0x8a, 0x56, 0xd3, 0x81, 0x06, 0x76, 0x1e, 0xac, 0x3f, 0x8c, 0x48, 0xc0, 0x0f, 0x23, 0x09, - 0x06, 0x9b, 0x5c, 0x06, 0x96, 0xa2, 0xec, 0x7f, 0x5b, 0x10, 0xab, 0xe2, 0xd7, 0x9d, 0x11, 0x7f, - 0xff, 0x96, 0x17, 0x1c, 0x2a, 0xb3, 0xc6, 0xcb, 0xd9, 0x55, 0x70, 0x1c, 0x53, 0x9c, 0x96, 0x10, - 0x73, 0x93, 0x25, 0x44, 0xae, 0xb0, 0x15, 0x06, 0x91, 0x17, 0xf4, 0x46, 0xe2, 0xcb, 0xaa, 0x82, - 0xe3, 0x98, 0x82, 0xf7, 0x38, 0x94, 0x74, 0x1c, 0x2f, 0xf0, 0x82, 0x36, 0xdf, 0xc4, 0x6a, 0xd8, - 0x0b, 0x22, 0x51, 0x80, 0xe5, 0x93, 0x1e, 0x07, 0x8f, 0x50, 0xe0, 0x53, 0xb8, 0xec, 0x3f, 0xe5, - 0xa1, 0xc2, 0xf7, 0xad, 0xb3, 0xfb, 0x4b, 0x50, 0xf5, 0x4d, 0x4f, 0x50, 0xfb, 0xbf, 0xa4, 0xc4, - 0xa6, 0xef, 0x36, 0x4e, 0xd3, 0x72, 0x66, 0x51, 0x32, 0xc7, 0xcc, 0xb9, 0x34, 0xf3, 0x86, 0x89, - 0xc4, 0x69, 0x5a, 0x1e, 0xb3, 0x1f, 0xf0, 0x3b, 0xa2, 0x8a, 0xd1, 0xf8, 0x98, 0xbe, 0xcd, 0x81, - 0x58, 0xe2, 0x4e, 0xb3, 0xf5, 0xf4, 0x84, 0xb6, 0xbe, 0x01, 0xe7, 0xb8, 0x53, 0x84, 0xbd, 0x48, - 0x57, 0xec, 0x05, 0x61, 0x39, 0x74, 0x3c, 0xa8, 0x9f, 0xbb, 0x9d, 0xc2, 0xe0, 0x21, 0x4a, 0xbe, - 0x46, 0xdf, 0xeb, 0x78, 0x51, 0x6d, 0x46, 0xb0, 0xc4, 0x6b, 0xbc, 0xc5, 0x81, 0x58, 0xe2, 0x52, - 0x87, 0x59, 0x3a, 0xf3, 0x30, 0xb7, 0xe0, 0x82, 0xe3, 0xfb, 0xe1, 0x03, 0xb1, 0xcd, 0x66, 0x18, - 0x1e, 0x76, 0x1c, 0x7a, 0xc8, 0x44, 0x9f, 0x5b, 0x6a, 0x7e, 0x41, 0x31, 0x5e, 0x58, 0x19, 0x25, - 0xc1, 0xa7, 0xf1, 0xd9, 0xff, 0xcc, 0x01, 0x92, 0x1d, 0x8b, 0x2b, 0x0b, 0x39, 0x19, 0x6c, 0x9e, - 0x81, 0x99, 0x8e, 0xea, 0x78, 0xac, 0x74, 0xae, 0xd3, 0xcd, 0x8e, 0xc6, 0xa3, 0x2d, 0x28, 0xcb, - 0x4b, 0x9f, 0x38, 0xf2, 0xb2, 0x22, 0x2e, 0x6f, 0x6b, 0xc4, 0xc9, 0xa0, 0xbe, 0x90, 0x52, 0x13, - 0x63, 0x6e, 0xf7, 0xbb, 0x04, 0x27, 0x12, 0xd0, 0x35, 0x00, 0xa7, 0xeb, 0x99, 0xe3, 0xad, 0x72, - 0x32, 0x1e, 0x49, 0x1a, 0x55, 0x6c, 0x50, 0xa1, 0x57, 0x61, 0x9a, 0x1b, 0x5e, 0xcd, 0x3e, 0xbe, - 0x9c, 0x2d, 0x74, 0xf0, 0xa3, 0x6b, 0x96, 0x78, 0x3e, 0xe5, 0x4f, 0x58, 0x48, 0x40, 0xf7, 0xa0, - 0x28, 0xbc, 0x4c, 0x1e, 0xf2, 0x84, 0x35, 0xb0, 0x68, 0x88, 0x54, 0x01, 0x7f, 0x12, 0x3f, 0x61, - 0x25, 0xd1, 0x7e, 0x13, 0xca, 0x5b, 0x5e, 0x8b, 0x86, 0x5c, 0x1d, 0x37, 0x30, 0x4b, 0x35, 0x80, - 0xb1, 0x81, 0xb5, 0x2f, 0x69, 0x3c, 0x77, 0xa2, 0xc0, 0x09, 0x42, 0xd9, 0xe6, 0x15, 0x12, 0x27, - 0x7a, 0x9d, 0x03, 0xb1, 0xc4, 0xdd, 0xb8, 0xc8, 0x6b, 0x88, 0x9f, 0xbd, 0x57, 0x9f, 0x7a, 0xe7, - 0xbd, 0xfa, 0xd4, 0xbb, 0xef, 0xa9, 0x7a, 0xe2, 0xef, 0x15, 0x80, 0xed, 0xbd, 0xef, 0x93, 0x96, - 0x8c, 0x53, 0x67, 0x0f, 0xa7, 0x78, 0x5d, 0xa8, 0x66, 0xa2, 0x62, 0x90, 0x93, 0x1b, 0xaa, 0x0b, - 0x0d, 0x1c, 0x4e, 0x51, 0xa2, 0x65, 0x28, 0xc7, 0x03, 0x2b, 0x75, 0x6c, 0xf3, 0xda, 0x0d, 0xe2, - 0xa9, 0x16, 0x4e, 0x68, 0x52, 0x41, 0x73, 0xfa, 0xcc, 0xa0, 0xd9, 0x84, 0x7c, 0xcf, 0x73, 0xc5, - 0xa9, 0x94, 0x9b, 0xcf, 0xe9, 0xa4, 0x75, 0x67, 0x73, 0xed, 0x64, 0x50, 0x7f, 0x72, 0xdc, 0xb4, - 0x37, 0xea, 0x77, 0x09, 0x6b, 0xdc, 0xd9, 0x5c, 0xc3, 0x9c, 0xf9, 0xb4, 0x60, 0x50, 0x9c, 0x30, - 0x18, 0x5c, 0x03, 0x50, 0xbb, 0xe6, 0xdc, 0xf2, 0x56, 0xc7, 0xde, 0x79, 0x33, 0xc6, 0x60, 0x83, - 0x0a, 0x31, 0x98, 0x6f, 0x51, 0x22, 0x9d, 0xdd, 0xeb, 0x10, 0x16, 0x39, 0x1d, 0x39, 0xbe, 0x9a, - 0xcc, 0x55, 0x9f, 0x50, 0x6a, 0xe6, 0x57, 0x87, 0x85, 0xe1, 0x51, 0xf9, 0x28, 0x84, 0x79, 0x57, - 0x75, 0xd0, 0x89, 0xd2, 0xf2, 0xc4, 0x4a, 0x2f, 0x71, 0x85, 0x6b, 0xc3, 0x82, 0xf0, 0xa8, 0x6c, - 0xf4, 0x3d, 0x58, 0xd0, 0xc0, 0xd1, 0x31, 0x86, 0x18, 0xa8, 0xe5, 0x9b, 0x8b, 0xc7, 0x83, 0xfa, - 0xc2, 0xda, 0x58, 0x2a, 0xfc, 0x08, 0x09, 0xc8, 0x85, 0xa2, 0x2f, 0x2b, 0xc2, 0x8a, 0xc8, 0xe2, - 0x5f, 0xcf, 0xb6, 0x8b, 0xc4, 0xfb, 0x1b, 0x66, 0x25, 0x18, 0xb7, 0xe9, 0xaa, 0x08, 0x54, 0xb2, - 0xd1, 0x43, 0xa8, 0x38, 0x41, 0x10, 0x46, 0x8e, 0x1c, 0xac, 0xcc, 0x0a, 0x55, 0x2b, 0x13, 0xab, - 0x5a, 0x49, 0x64, 0x0c, 0x55, 0x9e, 0x06, 0x06, 0x9b, 0xaa, 0xd0, 0x03, 0x38, 0x1f, 0x3e, 0x08, - 0x08, 0xc5, 0x64, 0x9f, 0x50, 0x12, 0xb4, 0x08, 0xab, 0x55, 0x85, 0xf6, 0xaf, 0x66, 0xd4, 0x9e, - 0x62, 0x4e, 0x5c, 0x3a, 0x0d, 0x67, 0x78, 0x58, 0x0b, 0x6a, 0x00, 0xec, 0x7b, 0x81, 0xea, 0x1f, - 0x6a, 0xe7, 0x92, 0x09, 0xec, 0x46, 0x0c, 0xc5, 0x06, 0x05, 0x7a, 0x01, 0x2a, 0x2d, 0xbf, 0xc7, - 0x22, 0x22, 0x47, 0xbd, 0xe7, 0xc5, 0x0d, 0x8a, 0xf7, 0xb7, 0x9a, 0xa0, 0xb0, 0x49, 0x87, 0x0e, - 0x60, 0xd6, 0x33, 0x1a, 0x95, 0xda, 0x9c, 0xf0, 0xc5, 0x6b, 0x13, 0x77, 0x27, 0xac, 0x39, 0xc7, - 0x23, 0x91, 0x09, 0xc1, 0x29, 0xc9, 0xa8, 0x07, 0xd5, 0x8e, 0x99, 0x6a, 0x6a, 0xf3, 0xc2, 0x8e, - 0xd7, 0xb3, 0xa9, 0x1a, 0x4d, 0x86, 0x49, 0x3d, 0x92, 0xc2, 0xe1, 0xb4, 0x96, 0x85, 0xaf, 0x41, - 0xe5, 0xbf, 0x2c, 0xd7, 0x79, 0xb9, 0x3f, 0xec, 0x31, 0x13, 0x95, 0xfb, 0xef, 0xe7, 0xe0, 0x5c, - 0xfa, 0x9c, 0xe3, 0xb6, 0xd8, 0x1a, 0xfb, 0xc5, 0x40, 0x27, 0x83, 0xfc, 0xd8, 0x64, 0xa0, 0x62, - 0xee, 0xf4, 0xe3, 0xc4, 0xdc, 0x74, 0x3a, 0x2f, 0x64, 0x4a, 0xe7, 0x0d, 0x00, 0x5e, 0xee, 0xd0, - 0xd0, 0xf7, 0x09, 0x15, 0x21, 0xba, 0xa4, 0xbe, 0x09, 0xc4, 0x50, 0x6c, 0x50, 0xa0, 0x0d, 0x40, - 0x7b, 0x7e, 0xd8, 0x3a, 0x14, 0x26, 0xd0, 0xe1, 0x45, 0x04, 0xe7, 0x92, 0x9c, 0xab, 0x36, 0x47, - 0xb0, 0xf8, 0x14, 0x0e, 0xbb, 0x0f, 0x97, 0x76, 0x1c, 0xca, 0x1d, 0x29, 0xb9, 0xca, 0xa2, 0x81, - 0x78, 0x63, 0xa4, 0x3d, 0x79, 0x6e, 0xd2, 0x90, 0x90, 0x6c, 0x3a, 0x81, 0x25, 0x2d, 0x8a, 0xfd, - 0x17, 0x0b, 0x9e, 0x38, 0x55, 0xf7, 0x67, 0xd0, 0x1e, 0xdd, 0x4f, 0xb7, 0x47, 0x2f, 0x65, 0x1c, - 0x23, 0x9f, 0xb6, 0x5a, 0xf9, 0x29, 0x29, 0xd5, 0x28, 0xcd, 0x40, 0x61, 0x87, 0x97, 0x9c, 0xf6, - 0x2f, 0x2d, 0x98, 0x15, 0x4f, 0x93, 0x8c, 0xdf, 0xeb, 0x50, 0xd8, 0x0f, 0xf5, 0x88, 0xad, 0x24, - 0xc5, 0x6f, 0x70, 0x00, 0x96, 0xf0, 0xc7, 0x98, 0xcf, 0xbf, 0x6d, 0x41, 0x7a, 0xf0, 0x8d, 0x5e, - 0x91, 0xfe, 0x6e, 0xc5, 0x93, 0xe9, 0x09, 0x7d, 0xfd, 0xe5, 0x71, 0x8d, 0xdd, 0x85, 0x4c, 0x53, - 0xce, 0x2b, 0x50, 0xc6, 0x61, 0x18, 0xed, 0x38, 0xd1, 0x01, 0xe3, 0x1b, 0xef, 0xf2, 0x07, 0x65, - 0x1b, 0xb1, 0x71, 0x81, 0xc1, 0x12, 0x6e, 0xff, 0xc2, 0x82, 0x27, 0xc6, 0x7e, 0x12, 0xe1, 0xd7, - 0xae, 0x15, 0xbf, 0xa9, 0x1d, 0xc5, 0x1e, 0x98, 0xd0, 0x61, 0x83, 0x8a, 0x77, 0x63, 0xa9, 0xef, - 0x28, 0xc3, 0xdd, 0x58, 0x4a, 0x1b, 0x4e, 0xd3, 0xda, 0xff, 0xca, 0x41, 0x51, 0x8e, 0x79, 0xfe, - 0xc7, 0xde, 0xfa, 0x34, 0x14, 0x99, 0xd0, 0xa3, 0x96, 0x17, 0x67, 0x72, 0xa9, 0x1d, 0x2b, 0xac, - 0xe8, 0x60, 0x08, 0x63, 0x4e, 0x5b, 0x47, 0xb8, 0xa4, 0x83, 0x91, 0x60, 0xac, 0xf1, 0xe8, 0x45, - 0x28, 0x52, 0xe2, 0xb0, 0xb8, 0x37, 0x5c, 0xd4, 0x22, 0xb1, 0x80, 0x9e, 0x0c, 0xea, 0xb3, 0x4a, - 0xb8, 0x78, 0xc7, 0x8a, 0x1a, 0xdd, 0x83, 0x19, 0x97, 0x44, 0x8e, 0xe7, 0xeb, 0x6e, 0xe1, 0xf9, - 0x49, 0xc6, 0x61, 0x6b, 0x92, 0xb5, 0x59, 0xe1, 0x6b, 0x52, 0x2f, 0x58, 0x0b, 0xe4, 0xd1, 0xb9, - 0x15, 0xba, 0xf2, 0x4b, 0x6a, 0x21, 0x89, 0xce, 0xab, 0xa1, 0x4b, 0xb0, 0xc0, 0xd8, 0xef, 0x58, - 0x50, 0x91, 0x92, 0x56, 0x9d, 0x1e, 0x23, 0xe8, 0x6a, 0xbc, 0x0b, 0x79, 0xdc, 0xba, 0x5e, 0x9c, - 0xe6, 0x1d, 0xd6, 0xc9, 0xa0, 0x5e, 0x16, 0x64, 0xa2, 0xdd, 0xd2, 0x1b, 0x30, 0x6c, 0x94, 0x3b, - 0xc3, 0x46, 0x4f, 0x41, 0x41, 0xdc, 0x1e, 0x65, 0xcc, 0xb8, 0x09, 0x11, 0x17, 0x0c, 0x4b, 0x9c, - 0xfd, 0x71, 0x0e, 0xaa, 0xa9, 0xcd, 0x65, 0xe8, 0x38, 0xe2, 0xd1, 0x6b, 0x2e, 0xc3, 0x38, 0x7f, - 0xfc, 0xf7, 0xef, 0xef, 0x40, 0xb1, 0xc5, 0xf7, 0xa7, 0xff, 0x80, 0x70, 0x75, 0x92, 0xa3, 0x10, - 0x96, 0x49, 0x3c, 0x49, 0xbc, 0x32, 0xac, 0x04, 0xa2, 0x9b, 0x30, 0x4f, 0x49, 0x44, 0xfb, 0x2b, - 0xfb, 0x11, 0xa1, 0xe6, 0x0c, 0xa0, 0x90, 0xd4, 0xe4, 0x78, 0x98, 0x00, 0x8f, 0xf2, 0xe8, 0x7c, - 0x5a, 0x7c, 0x8c, 0x7c, 0x6a, 0xef, 0xc1, 0xec, 0x6d, 0x67, 0xcf, 0x8f, 0xbf, 0x29, 0x62, 0xa8, - 0x7a, 0x41, 0xcb, 0xef, 0xb9, 0x44, 0x46, 0x62, 0x1d, 0xbd, 0xf4, 0xa5, 0xdd, 0x34, 0x91, 0x27, - 0x83, 0xfa, 0x85, 0x14, 0x40, 0x7e, 0x44, 0xc3, 0x69, 0x11, 0xb6, 0x0f, 0xd3, 0x9f, 0x61, 0x8f, - 0xfa, 0x5d, 0x28, 0x27, 0x5d, 0xc4, 0xa7, 0xac, 0xd2, 0x7e, 0x03, 0x4a, 0xdc, 0xe3, 0x75, 0xf7, - 0x7b, 0x46, 0x49, 0x94, 0x2e, 0x56, 0x72, 0x59, 0x8a, 0x15, 0xbb, 0x03, 0xd5, 0x3b, 0x5d, 0xf7, - 0x31, 0xbf, 0x2a, 0xe7, 0x32, 0x67, 0xad, 0x6b, 0x20, 0xff, 0xa9, 0xc1, 0x13, 0x84, 0xcc, 0xda, - 0x46, 0x82, 0x30, 0x13, 0xaf, 0xf1, 0x55, 0xe1, 0x27, 0x16, 0x80, 0x18, 0xfb, 0xac, 0x1f, 0x91, - 0x20, 0xe2, 0x76, 0xe0, 0x4e, 0x35, 0x6c, 0x07, 0x11, 0x19, 0x04, 0x06, 0xdd, 0x81, 0x62, 0x28, - 0xbd, 0x49, 0x8e, 0xf9, 0x27, 0x9c, 0x98, 0xc6, 0x17, 0x49, 0xfa, 0x13, 0x56, 0xc2, 0x9a, 0x97, - 0x3f, 0xf8, 0x64, 0x71, 0xea, 0xc3, 0x4f, 0x16, 0xa7, 0x3e, 0xfa, 0x64, 0x71, 0xea, 0xad, 0xe3, - 0x45, 0xeb, 0x83, 0xe3, 0x45, 0xeb, 0xc3, 0xe3, 0x45, 0xeb, 0xa3, 0xe3, 0x45, 0xeb, 0xe3, 0xe3, - 0x45, 0xeb, 0x9d, 0xbf, 0x2d, 0x4e, 0xdd, 0xcb, 0x1d, 0x5d, 0xfd, 0x4f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x83, 0xb7, 0x3e, 0x8b, 0x95, 0x26, 0x00, 0x00, + 0xc4, 0xa9, 0x12, 0xbd, 0x20, 0x55, 0xe2, 0x52, 0x09, 0x64, 0xb5, 0x01, 0x09, 0x6e, 0x9c, 0xb8, + 0xe4, 0x84, 0xde, 0xaf, 0x99, 0x37, 0x76, 0xbc, 0x19, 0xb3, 0xa5, 0xe2, 0xe4, 0x99, 0xef, 0xe7, + 0x7b, 0xdf, 0xfb, 0xde, 0xf7, 0x6b, 0x0c, 0x5b, 0x87, 0xd7, 0x59, 0xc3, 0x0b, 0x97, 0x0f, 0x7b, + 0x7b, 0x84, 0x06, 0x24, 0x22, 0x6c, 0xf9, 0x88, 0x04, 0x6e, 0x48, 0x97, 0x15, 0xc2, 0xe9, 0x7a, + 0x1d, 0xa7, 0x75, 0xe0, 0x05, 0x84, 0xf6, 0x97, 0xbb, 0x87, 0x6d, 0x0e, 0x60, 0xcb, 0x1d, 0x12, + 0x39, 0xcb, 0x47, 0x57, 0x97, 0xdb, 0x24, 0x20, 0xd4, 0x89, 0x88, 0xdb, 0xe8, 0xd2, 0x30, 0x0a, + 0xd1, 0x97, 0x24, 0x57, 0xc3, 0xe4, 0x6a, 0x74, 0x0f, 0xdb, 0x1c, 0xc0, 0x1a, 0x9c, 0xab, 0x71, + 0x74, 0x75, 0xe1, 0xd9, 0xb6, 0x17, 0x1d, 0xf4, 0xf6, 0x1a, 0xad, 0xb0, 0xb3, 0xdc, 0x0e, 0xdb, + 0xe1, 0xb2, 0x60, 0xde, 0xeb, 0xed, 0x8b, 0x37, 0xf1, 0x22, 0x9e, 0xa4, 0xd0, 0x85, 0xb1, 0x4b, + 0xa1, 0xbd, 0x20, 0xf2, 0x3a, 0x64, 0x78, 0x15, 0x0b, 0x2f, 0x9e, 0xc5, 0xc0, 0x5a, 0x07, 0xa4, + 0xe3, 0x0c, 0xf3, 0xd9, 0x7f, 0xcc, 0x43, 0x69, 0x65, 0x67, 0xf3, 0x26, 0x0d, 0x7b, 0x5d, 0xb4, + 0x04, 0xd3, 0x81, 0xd3, 0x21, 0x35, 0x6b, 0xc9, 0xba, 0x5c, 0x6e, 0xce, 0x7e, 0x30, 0xa8, 0x4f, + 0x1d, 0x0f, 0xea, 0xd3, 0xaf, 0x3b, 0x1d, 0x82, 0x05, 0x06, 0xf9, 0x50, 0x3a, 0x22, 0x94, 0x79, + 0x61, 0xc0, 0x6a, 0xb9, 0xa5, 0xfc, 0xe5, 0xca, 0xb5, 0x57, 0x1a, 0x59, 0xf6, 0xdf, 0x10, 0x0a, + 0xee, 0x4a, 0xd6, 0x8d, 0x90, 0xae, 0x79, 0xac, 0x15, 0x1e, 0x11, 0xda, 0x6f, 0xce, 0x29, 0x2d, + 0x25, 0x85, 0x64, 0x38, 0xd6, 0x80, 0x7e, 0x6c, 0xc1, 0x5c, 0x97, 0x92, 0x7d, 0x42, 0x29, 0x71, + 0x15, 0xbe, 0x96, 0x5f, 0xb2, 0x3e, 0x05, 0xb5, 0x35, 0xa5, 0x76, 0x6e, 0x67, 0x48, 0x3e, 0x1e, + 0xd1, 0x88, 0x7e, 0x63, 0xc1, 0x02, 0x23, 0xf4, 0x88, 0xd0, 0x15, 0xd7, 0xa5, 0x84, 0xb1, 0x66, + 0x7f, 0xd5, 0xf7, 0x48, 0x10, 0xad, 0x6e, 0xae, 0x61, 0x56, 0x9b, 0x16, 0x76, 0xf8, 0x46, 0xb6, + 0x05, 0xed, 0x8e, 0x93, 0xd3, 0xb4, 0xd5, 0x8a, 0x16, 0xc6, 0x92, 0x30, 0xfc, 0x88, 0x65, 0xd8, + 0xfb, 0x30, 0xab, 0x0f, 0xf2, 0x96, 0xc7, 0x22, 0x74, 0x17, 0x8a, 0x6d, 0xfe, 0xc2, 0x6a, 0x96, + 0x58, 0x60, 0x23, 0xdb, 0x02, 0xb5, 0x8c, 0xe6, 0x39, 0xb5, 0x9e, 0xa2, 0x78, 0x65, 0x58, 0x49, + 0xb3, 0x7f, 0x3e, 0x0d, 0x95, 0x95, 0x9d, 0x4d, 0x4c, 0x58, 0xd8, 0xa3, 0x2d, 0x92, 0xc1, 0x69, + 0xae, 0x01, 0xf0, 0x5f, 0xd6, 0x75, 0x5a, 0xc4, 0xad, 0xe5, 0x96, 0xac, 0xcb, 0xa5, 0x26, 0x52, + 0x74, 0xf0, 0x7a, 0x8c, 0xc1, 0x06, 0x15, 0x97, 0x7a, 0xe8, 0x05, 0xae, 0x38, 0x6d, 0x43, 0xea, + 0x6b, 0x5e, 0xe0, 0x62, 0x81, 0x41, 0xb7, 0xa0, 0x70, 0x44, 0xe8, 0x1e, 0xb7, 0x3f, 0x77, 0x88, + 0xaf, 0x64, 0xdb, 0xde, 0x5d, 0xce, 0xd2, 0x2c, 0x1f, 0x0f, 0xea, 0x05, 0xf1, 0x88, 0xa5, 0x10, + 0xd4, 0x00, 0x60, 0x07, 0x21, 0x8d, 0xc4, 0x72, 0x6a, 0x85, 0xa5, 0xfc, 0xe5, 0x72, 0xf3, 0x1c, + 0x5f, 0xdf, 0x6e, 0x0c, 0xc5, 0x06, 0x05, 0xba, 0x0e, 0xb3, 0xcc, 0x0b, 0xda, 0x3d, 0xdf, 0xa1, + 0x1c, 0x50, 0x2b, 0x8a, 0x75, 0x5e, 0x54, 0xeb, 0x9c, 0xdd, 0x35, 0x70, 0x38, 0x45, 0xc9, 0x35, + 0xb5, 0x9c, 0x88, 0xb4, 0x43, 0xea, 0x11, 0x56, 0x9b, 0x49, 0x34, 0xad, 0xc6, 0x50, 0x6c, 0x50, + 0xa0, 0xa7, 0xa0, 0x20, 0x2c, 0x5f, 0x2b, 0x09, 0x15, 0x55, 0xa5, 0xa2, 0x20, 0x8e, 0x05, 0x4b, + 0x1c, 0x7a, 0x06, 0x66, 0xd4, 0xad, 0xa9, 0x95, 0x05, 0xd9, 0x79, 0x45, 0x36, 0xa3, 0xdd, 0x5a, + 0xe3, 0xd1, 0xb7, 0x00, 0xb1, 0x28, 0xa4, 0x4e, 0x9b, 0x28, 0xd4, 0xab, 0x0e, 0x3b, 0xa8, 0x81, + 0xe0, 0x5a, 0x50, 0x5c, 0x68, 0x77, 0x84, 0x02, 0x9f, 0xc2, 0x65, 0xff, 0xce, 0x82, 0xf3, 0x86, + 0x2f, 0x08, 0xbf, 0xbb, 0x0e, 0xb3, 0x6d, 0xe3, 0xd6, 0x29, 0xbf, 0x88, 0x2d, 0x63, 0xde, 0x48, + 0x9c, 0xa2, 0x44, 0x04, 0xca, 0x54, 0x49, 0xd2, 0xd1, 0xe5, 0x6a, 0x66, 0xa7, 0xd5, 0x6b, 0x48, + 0x34, 0x19, 0x40, 0x86, 0x13, 0xc9, 0xf6, 0x3f, 0x2c, 0xe1, 0xc0, 0x3a, 0xde, 0xa0, 0xcb, 0x46, + 0x4c, 0xb3, 0xc4, 0x71, 0xcc, 0x8e, 0x89, 0x47, 0x67, 0x04, 0x82, 0xdc, 0xff, 0x45, 0x20, 0xb8, + 0x51, 0xfa, 0xd5, 0xbb, 0xf5, 0xa9, 0xb7, 0xfe, 0xba, 0x34, 0x65, 0x77, 0xa0, 0xba, 0x4a, 0x89, + 0x13, 0x91, 0xed, 0x6e, 0x24, 0x36, 0x60, 0x43, 0xd1, 0xa5, 0x7d, 0xdc, 0x0b, 0xd4, 0x46, 0x81, + 0xdf, 0xef, 0x35, 0x01, 0xc1, 0x0a, 0xc3, 0xcf, 0x6f, 0xdf, 0x23, 0xbe, 0xbb, 0xe5, 0x04, 0x4e, + 0x9b, 0x50, 0x75, 0x03, 0x63, 0xab, 0x6e, 0x18, 0x38, 0x9c, 0xa2, 0xb4, 0x7f, 0x9a, 0x87, 0xea, + 0x1a, 0xf1, 0x49, 0xa2, 0x6f, 0x03, 0x50, 0x9b, 0x3a, 0x2d, 0xb2, 0x43, 0xa8, 0x17, 0xba, 0xbb, + 0xa4, 0x15, 0x06, 0x2e, 0x13, 0x1e, 0x91, 0x6f, 0x7e, 0x8e, 0xfb, 0xd9, 0xcd, 0x11, 0x2c, 0x3e, + 0x85, 0x03, 0xf9, 0x50, 0xed, 0x52, 0xf1, 0xec, 0x45, 0x2a, 0xf7, 0xf0, 0x3b, 0xff, 0x7c, 0x36, + 0x53, 0xef, 0x98, 0xac, 0xcd, 0xf9, 0xe3, 0x41, 0xbd, 0x9a, 0x02, 0xe1, 0xb4, 0x70, 0xf4, 0x4d, + 0x98, 0x0b, 0x69, 0xf7, 0xc0, 0x09, 0xd6, 0x48, 0x97, 0x04, 0x2e, 0x09, 0x22, 0x26, 0xac, 0x50, + 0x6a, 0x5e, 0xe4, 0x19, 0x63, 0x7b, 0x08, 0x87, 0x47, 0xa8, 0xd1, 0x3d, 0x98, 0xef, 0xd2, 0xb0, + 0xeb, 0xb4, 0x1d, 0x2e, 0x71, 0x27, 0xf4, 0xbd, 0x56, 0x5f, 0xc4, 0xa9, 0x72, 0xf3, 0xca, 0xf1, + 0xa0, 0x3e, 0xbf, 0x33, 0x8c, 0x3c, 0x19, 0xd4, 0x2f, 0x08, 0xd3, 0x71, 0x48, 0x82, 0xc4, 0xa3, + 0x62, 0x8c, 0x33, 0x2c, 0x8c, 0x3b, 0x43, 0x7b, 0x13, 0x4a, 0x6b, 0x3d, 0x2a, 0xb8, 0xd0, 0xcb, + 0x50, 0x72, 0xd5, 0xb3, 0xb2, 0xfc, 0x93, 0x3a, 0xe5, 0x6a, 0x9a, 0x93, 0x41, 0xbd, 0xca, 0x8b, + 0x84, 0x86, 0x06, 0xe0, 0x98, 0xc5, 0xbe, 0x0f, 0xd5, 0xf5, 0x87, 0xdd, 0x90, 0x46, 0xfa, 0x4c, + 0x9f, 0x86, 0x22, 0x11, 0x00, 0x21, 0xad, 0x94, 0xe4, 0x09, 0x49, 0x86, 0x15, 0x96, 0xc7, 0x2d, + 0xf2, 0xd0, 0x69, 0x45, 0x2a, 0xe0, 0xc7, 0x71, 0x6b, 0x9d, 0x03, 0xb1, 0xc4, 0xd9, 0xef, 0x5b, + 0x50, 0x14, 0x1e, 0xc5, 0xd0, 0x6d, 0xc8, 0x77, 0x9c, 0xae, 0x4a, 0x56, 0x2f, 0x64, 0x3b, 0x59, + 0xc9, 0xda, 0xd8, 0x72, 0xba, 0xeb, 0x41, 0x44, 0xfb, 0xcd, 0x8a, 0x52, 0x92, 0xdf, 0x72, 0xba, + 0x98, 0x8b, 0x5b, 0x70, 0xa1, 0xa4, 0xb1, 0x68, 0x0e, 0xf2, 0x87, 0xa4, 0x2f, 0x03, 0x12, 0xe6, + 0x8f, 0xa8, 0x09, 0x85, 0x23, 0xc7, 0xef, 0x11, 0xe5, 0x4f, 0x57, 0x26, 0xd1, 0x8a, 0x25, 0xeb, + 0x8d, 0xdc, 0x75, 0xcb, 0xde, 0x06, 0xb8, 0x49, 0x62, 0x0b, 0xad, 0xc0, 0x79, 0x1d, 0x6d, 0xd2, + 0x41, 0xf0, 0xf3, 0x6a, 0x79, 0xe7, 0x71, 0x1a, 0x8d, 0x87, 0xe9, 0xed, 0xfb, 0x50, 0x16, 0x81, + 0x92, 0xe7, 0xbb, 0x24, 0x03, 0x58, 0x8f, 0xc8, 0x00, 0x3a, 0x61, 0xe6, 0xc6, 0x25, 0x4c, 0x23, + 0x2e, 0xf8, 0x50, 0x95, 0xbc, 0x3a, 0x87, 0x67, 0xd2, 0x70, 0x05, 0x4a, 0x7a, 0x99, 0x4a, 0x4b, + 0x5c, 0xbb, 0x69, 0x41, 0x38, 0xa6, 0x30, 0xb4, 0x1d, 0x40, 0x2a, 0xe8, 0x67, 0x53, 0x66, 0x24, + 0xb4, 0xdc, 0xa3, 0x13, 0x9a, 0xa1, 0xe9, 0x47, 0x50, 0x1b, 0x57, 0xf0, 0x3d, 0x46, 0x5a, 0xca, + 0xbe, 0x14, 0xfb, 0x6d, 0x0b, 0xe6, 0x4c, 0x49, 0xd9, 0x8f, 0x2f, 0xbb, 0x92, 0xb3, 0x4b, 0x23, + 0xc3, 0x22, 0xbf, 0xb6, 0xe0, 0x62, 0x6a, 0x6b, 0x13, 0x9d, 0xf8, 0x04, 0x8b, 0x32, 0x9d, 0x23, + 0x3f, 0x81, 0x73, 0x2c, 0x43, 0x65, 0x33, 0xf0, 0x22, 0xcf, 0xf1, 0xbd, 0x1f, 0x10, 0x7a, 0x76, + 0x31, 0x69, 0xff, 0xc1, 0x82, 0x59, 0x83, 0x83, 0xa1, 0xfb, 0x30, 0xc3, 0xe3, 0xae, 0x17, 0xb4, + 0x55, 0xec, 0xc8, 0x58, 0x33, 0x18, 0x42, 0x92, 0x7d, 0xed, 0x48, 0x49, 0x58, 0x8b, 0x44, 0x3b, + 0x50, 0xa4, 0x84, 0xf5, 0xfc, 0x68, 0xb2, 0x10, 0xb1, 0x1b, 0x39, 0x51, 0x8f, 0xc9, 0xd8, 0x8c, + 0x05, 0x3f, 0x56, 0x72, 0xec, 0x3f, 0xe7, 0xa0, 0x7a, 0xcb, 0xd9, 0x23, 0xfe, 0x2e, 0xf1, 0x49, + 0x2b, 0x0a, 0x29, 0xfa, 0x21, 0x54, 0x3a, 0x4e, 0xd4, 0x3a, 0x10, 0x50, 0x5d, 0xae, 0xaf, 0x65, + 0x53, 0x94, 0x92, 0xd4, 0xd8, 0x4a, 0xc4, 0xc8, 0x80, 0x78, 0x41, 0x6d, 0xac, 0x62, 0x60, 0xb0, + 0xa9, 0x4d, 0xf4, 0x58, 0xe2, 0x7d, 0xfd, 0x61, 0x97, 0xd7, 0x12, 0x93, 0xb7, 0x76, 0xa9, 0x25, + 0x60, 0xf2, 0x66, 0xcf, 0xa3, 0xa4, 0x43, 0x82, 0x28, 0xe9, 0xb1, 0xb6, 0x86, 0xe4, 0xe3, 0x11, + 0x8d, 0x0b, 0xaf, 0xc0, 0xdc, 0xf0, 0xe2, 0x4f, 0x89, 0xd7, 0x17, 0xcd, 0x78, 0x5d, 0x36, 0x23, + 0xf0, 0x6f, 0x2d, 0xa8, 0x8d, 0x5b, 0x08, 0xfa, 0xa2, 0x21, 0x28, 0xc9, 0x11, 0xaf, 0x91, 0xbe, + 0x94, 0xba, 0x0e, 0xa5, 0xb0, 0xcb, 0xbb, 0xe2, 0x90, 0x2a, 0x3f, 0x7f, 0x46, 0xfb, 0xee, 0xb6, + 0x82, 0x9f, 0x0c, 0xea, 0x97, 0x52, 0xe2, 0x35, 0x02, 0xc7, 0xac, 0x3c, 0x31, 0x8b, 0xf5, 0xf0, + 0x62, 0x21, 0x4e, 0xcc, 0x77, 0x05, 0x04, 0x2b, 0x8c, 0xfd, 0x7b, 0x0b, 0xa6, 0x45, 0x95, 0x7c, + 0x1f, 0x4a, 0xdc, 0x7e, 0xae, 0x13, 0x39, 0x62, 0x5d, 0x99, 0xfb, 0x33, 0xce, 0xbd, 0x45, 0x22, + 0x27, 0xb9, 0x5f, 0x1a, 0x82, 0x63, 0x89, 0x08, 0x43, 0xc1, 0x8b, 0x48, 0x47, 0x1f, 0xe4, 0xb3, + 0x63, 0x45, 0xab, 0xe9, 0x40, 0x03, 0x3b, 0x0f, 0xd6, 0x1f, 0x46, 0x24, 0xe0, 0x87, 0x91, 0x04, + 0x83, 0x4d, 0x2e, 0x03, 0x4b, 0x51, 0xf6, 0xbf, 0x2d, 0x88, 0x55, 0xf1, 0xeb, 0xce, 0x88, 0xbf, + 0x7f, 0xcb, 0x0b, 0x0e, 0x95, 0x59, 0xe3, 0xe5, 0xec, 0x2a, 0x38, 0x8e, 0x29, 0x4e, 0x4b, 0x88, + 0xb9, 0xc9, 0x12, 0x22, 0x57, 0xd8, 0x0a, 0x83, 0xc8, 0x0b, 0x7a, 0x23, 0xf1, 0x65, 0x55, 0xc1, + 0x71, 0x4c, 0xc1, 0x7b, 0x1c, 0x4a, 0x3a, 0x8e, 0x17, 0x78, 0x41, 0x9b, 0x6f, 0x62, 0x35, 0xec, + 0x05, 0x91, 0x28, 0xc0, 0xf2, 0x49, 0x8f, 0x83, 0x47, 0x28, 0xf0, 0x29, 0x5c, 0xf6, 0x9f, 0xf2, + 0x50, 0xe1, 0xfb, 0xd6, 0xd9, 0xfd, 0x25, 0xa8, 0xfa, 0xa6, 0x27, 0xa8, 0xfd, 0x5f, 0x52, 0x62, + 0xd3, 0x77, 0x1b, 0xa7, 0x69, 0x39, 0xb3, 0x28, 0x99, 0x63, 0xe6, 0x5c, 0x9a, 0x79, 0xc3, 0x44, + 0xe2, 0x34, 0x2d, 0x8f, 0xd9, 0x0f, 0xf8, 0x1d, 0x51, 0xc5, 0x68, 0x7c, 0x4c, 0xdf, 0xe6, 0x40, + 0x2c, 0x71, 0xa7, 0xd9, 0x7a, 0x7a, 0x42, 0x5b, 0xdf, 0x80, 0x73, 0xdc, 0x29, 0xc2, 0x5e, 0xa4, + 0x2b, 0xf6, 0x82, 0xb0, 0x1c, 0x3a, 0x1e, 0xd4, 0xcf, 0xdd, 0x4e, 0x61, 0xf0, 0x10, 0x25, 0x5f, + 0xa3, 0xef, 0x75, 0xbc, 0xa8, 0x36, 0x23, 0x58, 0xe2, 0x35, 0xde, 0xe2, 0x40, 0x2c, 0x71, 0xa9, + 0xc3, 0x2c, 0x9d, 0x79, 0x98, 0x5b, 0x70, 0xc1, 0xf1, 0xfd, 0xf0, 0x81, 0xd8, 0x66, 0x33, 0x0c, + 0x0f, 0x3b, 0x0e, 0x3d, 0x64, 0xa2, 0xcf, 0x2d, 0x35, 0xbf, 0xa0, 0x18, 0x2f, 0xac, 0x8c, 0x92, + 0xe0, 0xd3, 0xf8, 0xec, 0x7f, 0xe6, 0x00, 0xc9, 0x8e, 0xc5, 0x95, 0x85, 0x9c, 0x0c, 0x36, 0xcf, + 0xc0, 0x4c, 0x47, 0x75, 0x3c, 0x56, 0x3a, 0xd7, 0xe9, 0x66, 0x47, 0xe3, 0xd1, 0x16, 0x94, 0xe5, + 0xa5, 0x4f, 0x1c, 0x79, 0x59, 0x11, 0x97, 0xb7, 0x35, 0xe2, 0x64, 0x50, 0x5f, 0x48, 0xa9, 0x89, + 0x31, 0xb7, 0xfb, 0x5d, 0x82, 0x13, 0x09, 0xe8, 0x1a, 0x80, 0xd3, 0xf5, 0xcc, 0xf1, 0x56, 0x39, + 0x19, 0x8f, 0x24, 0x8d, 0x2a, 0x36, 0xa8, 0xd0, 0xab, 0x30, 0xcd, 0x0d, 0xaf, 0x66, 0x1f, 0x5f, + 0xce, 0x16, 0x3a, 0xf8, 0xd1, 0x35, 0x4b, 0x3c, 0x9f, 0xf2, 0x27, 0x2c, 0x24, 0xa0, 0x7b, 0x50, + 0x14, 0x5e, 0x26, 0x0f, 0x79, 0xc2, 0x1a, 0x58, 0x34, 0x44, 0xaa, 0x80, 0x3f, 0x89, 0x9f, 0xb0, + 0x92, 0x68, 0xbf, 0x09, 0xe5, 0x2d, 0xaf, 0x45, 0x43, 0xae, 0x8e, 0x1b, 0x98, 0xa5, 0x1a, 0xc0, + 0xd8, 0xc0, 0xda, 0x97, 0x34, 0x9e, 0x3b, 0x51, 0xe0, 0x04, 0xa1, 0x6c, 0xf3, 0x0a, 0x89, 0x13, + 0xbd, 0xce, 0x81, 0x58, 0xe2, 0x6e, 0x5c, 0xe4, 0x35, 0xc4, 0xcf, 0xde, 0xab, 0x4f, 0xbd, 0xf3, + 0x5e, 0x7d, 0xea, 0xdd, 0xf7, 0x54, 0x3d, 0xf1, 0xf7, 0x0a, 0xc0, 0xf6, 0xde, 0xf7, 0x49, 0x4b, + 0xc6, 0xa9, 0xb3, 0x87, 0x53, 0xbc, 0x2e, 0x54, 0x33, 0x51, 0x31, 0xc8, 0xc9, 0x0d, 0xd5, 0x85, + 0x06, 0x0e, 0xa7, 0x28, 0xd1, 0x32, 0x94, 0xe3, 0x81, 0x95, 0x3a, 0xb6, 0x79, 0xed, 0x06, 0xf1, + 0x54, 0x0b, 0x27, 0x34, 0xa9, 0xa0, 0x39, 0x7d, 0x66, 0xd0, 0x6c, 0x42, 0xbe, 0xe7, 0xb9, 0xe2, + 0x54, 0xca, 0xcd, 0xe7, 0x74, 0xd2, 0xba, 0xb3, 0xb9, 0x76, 0x32, 0xa8, 0x3f, 0x39, 0x6e, 0xda, + 0x1b, 0xf5, 0xbb, 0x84, 0x35, 0xee, 0x6c, 0xae, 0x61, 0xce, 0x7c, 0x5a, 0x30, 0x28, 0x4e, 0x18, + 0x0c, 0xae, 0x01, 0xa8, 0x5d, 0x73, 0x6e, 0x79, 0xab, 0x63, 0xef, 0xbc, 0x19, 0x63, 0xb0, 0x41, + 0x85, 0x18, 0xcc, 0xb7, 0x28, 0x91, 0xce, 0xee, 0x75, 0x08, 0x8b, 0x9c, 0x8e, 0x1c, 0x5f, 0x4d, + 0xe6, 0xaa, 0x4f, 0x28, 0x35, 0xf3, 0xab, 0xc3, 0xc2, 0xf0, 0xa8, 0x7c, 0x14, 0xc2, 0xbc, 0xab, + 0x3a, 0xe8, 0x44, 0x69, 0x79, 0x62, 0xa5, 0x97, 0xb8, 0xc2, 0xb5, 0x61, 0x41, 0x78, 0x54, 0x36, + 0xfa, 0x1e, 0x2c, 0x68, 0xe0, 0xe8, 0x18, 0x43, 0x0c, 0xd4, 0xf2, 0xcd, 0xc5, 0xe3, 0x41, 0x7d, + 0x61, 0x6d, 0x2c, 0x15, 0x7e, 0x84, 0x04, 0xe4, 0x42, 0xd1, 0x97, 0x15, 0x61, 0x45, 0x64, 0xf1, + 0xaf, 0x67, 0xdb, 0x45, 0xe2, 0xfd, 0x0d, 0xb3, 0x12, 0x8c, 0xdb, 0x74, 0x55, 0x04, 0x2a, 0xd9, + 0xe8, 0x21, 0x54, 0x9c, 0x20, 0x08, 0x23, 0x47, 0x0e, 0x56, 0x66, 0x85, 0xaa, 0x95, 0x89, 0x55, + 0xad, 0x24, 0x32, 0x86, 0x2a, 0x4f, 0x03, 0x83, 0x4d, 0x55, 0xe8, 0x01, 0x9c, 0x0f, 0x1f, 0x04, + 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x09, 0x5a, 0x84, 0xd5, 0xaa, 0x42, 0xfb, 0x57, 0x33, 0x6a, 0x4f, + 0x31, 0x27, 0x2e, 0x9d, 0x86, 0x33, 0x3c, 0xac, 0x05, 0x35, 0x00, 0xf6, 0xbd, 0x40, 0xf5, 0x0f, + 0xb5, 0x73, 0xc9, 0x04, 0x76, 0x23, 0x86, 0x62, 0x83, 0x02, 0xbd, 0x00, 0x95, 0x96, 0xdf, 0x63, + 0x11, 0x91, 0xa3, 0xde, 0xf3, 0xe2, 0x06, 0xc5, 0xfb, 0x5b, 0x4d, 0x50, 0xd8, 0xa4, 0x43, 0x07, + 0x30, 0xeb, 0x19, 0x8d, 0x4a, 0x6d, 0x4e, 0xf8, 0xe2, 0xb5, 0x89, 0xbb, 0x13, 0xd6, 0x9c, 0xe3, + 0x91, 0xc8, 0x84, 0xe0, 0x94, 0x64, 0xd4, 0x83, 0x6a, 0xc7, 0x4c, 0x35, 0xb5, 0x79, 0x61, 0xc7, + 0xeb, 0xd9, 0x54, 0x8d, 0x26, 0xc3, 0xa4, 0x1e, 0x49, 0xe1, 0x70, 0x5a, 0xcb, 0xc2, 0xd7, 0xa0, + 0xf2, 0x5f, 0x96, 0xeb, 0xbc, 0xdc, 0x1f, 0xf6, 0x98, 0x89, 0xca, 0xfd, 0xf7, 0x73, 0x70, 0x2e, + 0x7d, 0xce, 0x71, 0x5b, 0x6c, 0x8d, 0xfd, 0x62, 0xa0, 0x93, 0x41, 0x7e, 0x6c, 0x32, 0x50, 0x31, + 0x77, 0xfa, 0x71, 0x62, 0x6e, 0x3a, 0x9d, 0x17, 0x32, 0xa5, 0xf3, 0x06, 0x00, 0x2f, 0x77, 0x68, + 0xe8, 0xfb, 0x84, 0x8a, 0x10, 0x5d, 0x52, 0xdf, 0x04, 0x62, 0x28, 0x36, 0x28, 0xd0, 0x06, 0xa0, + 0x3d, 0x3f, 0x6c, 0x1d, 0x0a, 0x13, 0xe8, 0xf0, 0x22, 0x82, 0x73, 0x49, 0xce, 0x55, 0x9b, 0x23, + 0x58, 0x7c, 0x0a, 0x87, 0xdd, 0x87, 0x4b, 0x3b, 0x0e, 0xe5, 0x8e, 0x94, 0x5c, 0x65, 0xd1, 0x40, + 0xbc, 0x31, 0xd2, 0x9e, 0x3c, 0x37, 0x69, 0x48, 0x48, 0x36, 0x9d, 0xc0, 0x92, 0x16, 0xc5, 0xfe, + 0x8b, 0x05, 0x4f, 0x9c, 0xaa, 0xfb, 0x33, 0x68, 0x8f, 0xde, 0x48, 0xb7, 0x47, 0x2f, 0x65, 0x1c, + 0x23, 0x9f, 0xb6, 0xda, 0x31, 0xcd, 0xd2, 0x0c, 0x14, 0x76, 0x78, 0xd9, 0x69, 0xff, 0xd2, 0x82, + 0x59, 0xf1, 0x34, 0xc9, 0x08, 0xbe, 0x0e, 0x85, 0xfd, 0x50, 0x8f, 0xd9, 0x4a, 0xf2, 0x6b, 0xd5, + 0x06, 0x07, 0x60, 0x09, 0x7f, 0x8c, 0x19, 0xfd, 0xdb, 0x16, 0xa4, 0x87, 0xdf, 0xe8, 0x15, 0xe9, + 0xf3, 0x56, 0x3c, 0x9d, 0x9e, 0xd0, 0xdf, 0x5f, 0x1e, 0xd7, 0xdc, 0x5d, 0xc8, 0x34, 0xe9, 0xbc, + 0x02, 0x65, 0x1c, 0x86, 0xd1, 0x8e, 0x13, 0x1d, 0x30, 0xbe, 0xf1, 0x2e, 0x7f, 0x50, 0xb6, 0x11, + 0x1b, 0x17, 0x18, 0x2c, 0xe1, 0xf6, 0x2f, 0x2c, 0x78, 0x62, 0xec, 0x67, 0x11, 0x7e, 0xf5, 0x5a, + 0xf1, 0x9b, 0xda, 0x51, 0xec, 0x85, 0x09, 0x1d, 0x36, 0xa8, 0x78, 0x47, 0x96, 0xfa, 0x96, 0x32, + 0xdc, 0x91, 0xa5, 0xb4, 0xe1, 0x34, 0xad, 0xfd, 0xaf, 0x1c, 0x14, 0xe5, 0xa8, 0xe7, 0x7f, 0xec, + 0xb1, 0x4f, 0x43, 0x91, 0x09, 0x3d, 0x6a, 0x79, 0x71, 0x36, 0x97, 0xda, 0xb1, 0xc2, 0x8a, 0x2e, + 0x86, 0x30, 0xe6, 0xb4, 0x75, 0x94, 0x4b, 0xba, 0x18, 0x09, 0xc6, 0x1a, 0x8f, 0x5e, 0x84, 0x22, + 0x25, 0x0e, 0x8b, 0xfb, 0xc3, 0x45, 0x2d, 0x12, 0x0b, 0xe8, 0xc9, 0xa0, 0x3e, 0xab, 0x84, 0x8b, + 0x77, 0xac, 0xa8, 0xd1, 0x3d, 0x98, 0x71, 0x49, 0xe4, 0x78, 0xbe, 0xee, 0x18, 0x9e, 0x9f, 0x64, + 0x24, 0xb6, 0x26, 0x59, 0x9b, 0x15, 0xbe, 0x26, 0xf5, 0x82, 0xb5, 0x40, 0x1e, 0xa1, 0x5b, 0xa1, + 0x2b, 0xbf, 0xa6, 0x16, 0x92, 0x08, 0xbd, 0x1a, 0xba, 0x04, 0x0b, 0x8c, 0xfd, 0x8e, 0x05, 0x15, + 0x29, 0x69, 0xd5, 0xe9, 0x31, 0x82, 0xae, 0xc6, 0xbb, 0x90, 0xc7, 0xad, 0x6b, 0xc6, 0x69, 0xde, + 0x65, 0x9d, 0x0c, 0xea, 0x65, 0x41, 0x26, 0x5a, 0x2e, 0xbd, 0x01, 0xc3, 0x46, 0xb9, 0x33, 0x6c, + 0xf4, 0x14, 0x14, 0xc4, 0xed, 0x51, 0xc6, 0x8c, 0xef, 0xba, 0xb8, 0x60, 0x58, 0xe2, 0xec, 0x8f, + 0x73, 0x50, 0x4d, 0x6d, 0x2e, 0x43, 0xd7, 0x11, 0x8f, 0x5f, 0x73, 0x19, 0x46, 0xfa, 0xe3, 0xbf, + 0x81, 0x7f, 0x07, 0x8a, 0x2d, 0xbe, 0x3f, 0xfd, 0x27, 0x84, 0xab, 0x93, 0x1c, 0x85, 0xb0, 0x4c, + 0xe2, 0x49, 0xe2, 0x95, 0x61, 0x25, 0x10, 0xdd, 0x84, 0x79, 0x4a, 0x22, 0xda, 0x5f, 0xd9, 0x8f, + 0x08, 0x35, 0xe7, 0x00, 0x85, 0xa4, 0x2e, 0xc7, 0xc3, 0x04, 0x78, 0x94, 0x47, 0xe7, 0xd4, 0xe2, + 0x63, 0xe4, 0x54, 0x7b, 0x0f, 0x66, 0x6f, 0x3b, 0x7b, 0x7e, 0xfc, 0x5d, 0x11, 0x43, 0xd5, 0x0b, + 0x5a, 0x7e, 0xcf, 0x25, 0x32, 0x1a, 0xeb, 0xe8, 0xa5, 0x2f, 0xed, 0xa6, 0x89, 0x3c, 0x19, 0xd4, + 0x2f, 0xa4, 0x00, 0xf2, 0x43, 0x1a, 0x4e, 0x8b, 0xb0, 0x7d, 0x98, 0xfe, 0x0c, 0xfb, 0xd4, 0xef, + 0x42, 0x39, 0xe9, 0x24, 0x3e, 0x65, 0x95, 0xf6, 0x1b, 0x50, 0xe2, 0x1e, 0xaf, 0x3b, 0xe0, 0x33, + 0xca, 0xa2, 0x74, 0xc1, 0x92, 0xcb, 0x52, 0xb0, 0xd8, 0x1d, 0xa8, 0xde, 0xe9, 0xba, 0x8f, 0xf9, + 0x65, 0x39, 0x97, 0x39, 0x6b, 0x5d, 0x03, 0xf9, 0x6f, 0x0d, 0x9e, 0x20, 0x64, 0xe6, 0x36, 0x12, + 0x84, 0x99, 0x78, 0x8d, 0x2f, 0x0b, 0x3f, 0xb1, 0x00, 0xc4, 0xe8, 0x67, 0xfd, 0x88, 0x04, 0x11, + 0xb7, 0x03, 0x77, 0xaa, 0x61, 0x3b, 0x88, 0xc8, 0x20, 0x30, 0xe8, 0x0e, 0x14, 0x43, 0xe9, 0x4d, + 0x72, 0xd4, 0x3f, 0xe1, 0xd4, 0x34, 0xbe, 0x48, 0xd2, 0x9f, 0xb0, 0x12, 0xd6, 0xbc, 0xfc, 0xc1, + 0x27, 0x8b, 0x53, 0x1f, 0x7e, 0xb2, 0x38, 0xf5, 0xd1, 0x27, 0x8b, 0x53, 0x6f, 0x1d, 0x2f, 0x5a, + 0x1f, 0x1c, 0x2f, 0x5a, 0x1f, 0x1e, 0x2f, 0x5a, 0x1f, 0x1d, 0x2f, 0x5a, 0x1f, 0x1f, 0x2f, 0x5a, + 0xef, 0xfc, 0x6d, 0x71, 0xea, 0x5e, 0xee, 0xe8, 0xea, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x15, + 0x47, 0x89, 0xab, 0x99, 0x26, 0x00, 0x00, } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 0c850c4e480..9e7497a0620 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1309,5 +1309,5 @@ type PartialObjectMetadataList struct { ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items contains each of the included items. - Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` + Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 1bf64b4ca75..6df41a9e727 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -805,13 +805,9 @@ func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*PartialObjectMetadata, len(*in)) + *out = make([]PartialObjectMetadata, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PartialObjectMetadata) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 557dfa2c731..1bcd80ee983 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -138,7 +138,7 @@ func (this *PartialObjectMetadataList) String() string { return "nil" } s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata", 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata", 1), `&`, ``, 1) + `,`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -207,7 +207,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata{}) + m.Items = append(m.Items, k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -374,25 +374,25 @@ func init() { var fileDescriptorGenerated = []byte{ // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, - 0x14, 0x86, 0xe3, 0x7b, 0x55, 0x51, 0xd2, 0x05, 0x75, 0x2a, 0x1d, 0xdc, 0x8a, 0xa9, 0x0c, 0xb5, - 0xd5, 0x0a, 0x21, 0x24, 0xb6, 0x6e, 0x48, 0x20, 0x50, 0x47, 0xd4, 0xc5, 0x49, 0x0f, 0xa9, 0x09, - 0x8e, 0x23, 0xfb, 0xa4, 0x12, 0x1b, 0x8f, 0xc0, 0x63, 0x75, 0xec, 0x46, 0xa7, 0x8a, 0x9a, 0x17, - 0x41, 0x49, 0x03, 0x42, 0x05, 0x44, 0xb6, 0x9c, 0xff, 0xe8, 0xfb, 0xf2, 0xdb, 0xf6, 0xc7, 0xf1, - 0x99, 0x65, 0x52, 0xf3, 0x38, 0x0b, 0xc0, 0x24, 0x80, 0x60, 0xf9, 0x1c, 0x92, 0xa9, 0x36, 0xbc, - 0x5c, 0x88, 0x54, 0x2a, 0x11, 0xce, 0x64, 0x02, 0xe6, 0x91, 0xa7, 0x71, 0x94, 0x07, 0x96, 0x2b, - 0x40, 0xc1, 0xe7, 0x83, 0x00, 0x50, 0x0c, 0x78, 0x04, 0x09, 0x18, 0x81, 0x30, 0x65, 0xa9, 0xd1, - 0xa8, 0x9b, 0xc7, 0x5b, 0x94, 0x7d, 0x45, 0x59, 0x1a, 0x47, 0x79, 0x60, 0x59, 0x8e, 0xb2, 0x12, - 0x6d, 0xf7, 0x23, 0x89, 0xb3, 0x2c, 0x60, 0xa1, 0x56, 0x3c, 0xd2, 0x91, 0xe6, 0x85, 0x21, 0xc8, - 0xee, 0x8a, 0xa9, 0x18, 0x8a, 0xaf, 0xad, 0xb9, 0x7d, 0x52, 0xa5, 0xd4, 0x6e, 0x9f, 0xf6, 0xaf, - 0x47, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf4, 0x2f, 0xc0, 0x86, 0x33, 0x50, 0x62, 0x97, - 0x3b, 0x7a, 0x21, 0xfe, 0xe1, 0x8d, 0x30, 0x28, 0xc5, 0xc3, 0x75, 0x70, 0x0f, 0x21, 0x5e, 0x01, - 0x8a, 0xa9, 0x40, 0x71, 0x29, 0x2d, 0x36, 0x27, 0x7e, 0x4d, 0x22, 0x28, 0xdb, 0x22, 0xdd, 0xff, - 0xbd, 0xc6, 0xf0, 0x9c, 0x55, 0xb9, 0x26, 0xf6, 0xa3, 0x6f, 0xb4, 0xef, 0xd6, 0x9d, 0xda, 0x45, - 0x6e, 0x1b, 0x6f, 0xa5, 0xcd, 0x89, 0x5f, 0x57, 0xe5, 0xb6, 0xf5, 0xaf, 0x4b, 0x7a, 0x8d, 0x21, - 0xab, 0xf6, 0x83, 0xbc, 0x5b, 0xee, 0x1d, 0x1d, 0x2c, 0xd6, 0x1d, 0xcf, 0xad, 0x3b, 0xf5, 0x8f, - 0x64, 0xfc, 0x69, 0x1c, 0xf5, 0x17, 0x1b, 0xea, 0x2d, 0x37, 0xd4, 0x5b, 0x6d, 0xa8, 0xf7, 0xe4, - 0x28, 0x59, 0x38, 0x4a, 0x96, 0x8e, 0x92, 0x95, 0xa3, 0xe4, 0xd5, 0x51, 0xf2, 0xfc, 0x46, 0xbd, - 0xdb, 0xbd, 0xf2, 0x59, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xce, 0xfa, 0x86, 0x29, 0x56, 0x02, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, + 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, + 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x7d, 0xfe, 0xcd, 0xef, 0x97, 0x7f, 0x12, 0x7f, 0x1c, + 0x9f, 0x58, 0x26, 0x35, 0x8f, 0xb3, 0x00, 0x4c, 0x02, 0x08, 0x96, 0xcf, 0x20, 0x99, 0x68, 0xc3, + 0xcb, 0x1f, 0x22, 0x95, 0x4a, 0x84, 0x53, 0x99, 0x80, 0x79, 0xe4, 0x69, 0x1c, 0xb9, 0xc0, 0x72, + 0x05, 0x28, 0xf8, 0x6c, 0x10, 0x00, 0x8a, 0x01, 0x8f, 0x20, 0x01, 0x23, 0x10, 0x26, 0x2c, 0x35, + 0x1a, 0x75, 0xf3, 0x70, 0x83, 0xb2, 0xaf, 0x28, 0x4b, 0xe3, 0xc8, 0x05, 0x96, 0x39, 0x94, 0x95, + 0x68, 0xbb, 0x1f, 0x49, 0x9c, 0x66, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41, + 0x76, 0x57, 0x4c, 0xc5, 0x50, 0x7c, 0x6d, 0xcc, 0xed, 0xa3, 0x2a, 0xa5, 0xb6, 0xfb, 0xb4, 0x7f, + 0x3d, 0x8a, 0xc9, 0x12, 0x94, 0x0a, 0xbe, 0x01, 0xc7, 0x7f, 0x01, 0x36, 0x9c, 0x82, 0x12, 0xdb, + 0xdc, 0xc1, 0x0b, 0xf1, 0xf7, 0xaf, 0x84, 0x41, 0x29, 0x1e, 0x2e, 0x83, 0x7b, 0x08, 0xf1, 0x02, + 0x50, 0x4c, 0x04, 0x8a, 0x73, 0x69, 0xb1, 0x79, 0xeb, 0xd7, 0x24, 0x82, 0xb2, 0x2d, 0xd2, 0xfd, + 0xdf, 0x6b, 0x0c, 0x4f, 0x59, 0x95, 0x6b, 0x62, 0x3f, 0xfa, 0x46, 0xbb, 0xf3, 0x55, 0xc7, 0xcb, + 0x57, 0x9d, 0xda, 0x99, 0x33, 0x8e, 0x37, 0xe2, 0xe6, 0x8d, 0x5f, 0x57, 0xe5, 0x8a, 0xd6, 0xbf, + 0x2e, 0xe9, 0x35, 0x86, 0xac, 0xda, 0x26, 0xae, 0x9f, 0x73, 0x8f, 0xf6, 0x4a, 0x6f, 0xfd, 0x23, + 0x19, 0x7f, 0x1a, 0x47, 0xfd, 0xf9, 0x9a, 0x7a, 0x8b, 0x35, 0xf5, 0x96, 0x6b, 0xea, 0x3d, 0xe5, + 0x94, 0xcc, 0x73, 0x4a, 0x16, 0x39, 0x25, 0xcb, 0x9c, 0x92, 0xd7, 0x9c, 0x92, 0xe7, 0x37, 0xea, + 0x5d, 0xef, 0x94, 0x4f, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x10, 0x2f, 0x48, 0xbd, 0x5a, 0x02, 0x00, 0x00, } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 8b7f6bd4f54..87895a5b5f2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -68,7 +68,7 @@ type PartialObjectMetadataList struct { v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` // items contains each of the included items. - Items []*v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` } const ( diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 9c21e91f13f..37cf2571871 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -32,13 +32,9 @@ func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*v1.PartialObjectMetadata, len(*in)) + *out = make([]v1.PartialObjectMetadata, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.PartialObjectMetadata) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index 3d0f1106ebe..a40c41cc6e5 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -2378,7 +2378,7 @@ func TestGetPartialObjectMetadata(t *testing.T) { ResourceVersion: "10", SelfLink: "/test/link", }, - Items: []*metav1beta1.PartialObjectMetadata{ + Items: []metav1beta1.PartialObjectMetadata{ { TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1beta1", Kind: "PartialObjectMetadata"}, ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "ns1", CreationTimestamp: now, UID: types.UID("newer")}, diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go index 0fe8a71c728..c6d71841409 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -220,7 +220,7 @@ func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.Grou } partial := meta.AsPartialObjectMetadata(m) partial.GetObjectKind().SetGroupVersionKind(gvk) - list.Items = append(list.Items, partial) + list.Items = append(list.Items, *partial) return nil }) if err != nil { @@ -240,7 +240,7 @@ func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.Grou } partial := meta.AsPartialObjectMetadata(m) partial.GetObjectKind().SetGroupVersionKind(gvk) - list.Items = append(list.Items, partial) + list.Items = append(list.Items, *partial) return nil }) if err != nil { From dddc6a53d41a65322296ac26490724626d99b372 Mon Sep 17 00:00:00 2001 From: Akihito INOH Date: Wed, 8 May 2019 22:19:58 +0900 Subject: [PATCH 144/194] Fix golint failures of e2e/framework/*.go This fixes golint failures of the following files: - test/e2e/framework/networking_utils.go - test/e2e/framework/service_util.go - test/e2e/framework/util.go All golint failures in test/e2e/framework are fixed at this commit. Remove 'test/e2e/framework' from 'hack/.golint_failures' --- hack/.golint_failures | 1 - test/e2e/framework/networking_utils.go | 4 +-- test/e2e/framework/service_util.go | 44 +++++++++++++++++++++++++- test/e2e/framework/util.go | 4 ++- 4 files changed, 48 insertions(+), 5 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index dd6691cf7ec..d7d049cb31b 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -602,7 +602,6 @@ test/e2e test/e2e/autoscaling test/e2e/chaosmonkey test/e2e/common -test/e2e/framework test/e2e/lifecycle/bootstrap test/e2e/node test/e2e/scalability diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index d4cbf754cc8..0eee3a7bb8c 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -65,9 +65,9 @@ const ( maxNetProxyPodsCount = 10 // SessionAffinityChecks is number of checks to hit a given set of endpoints when enable session affinity. SessionAffinityChecks = 10 - // Regex to match IPv4 addresses + // RegexIPv4 is a regex to match IPv4 addresses RegexIPv4 = "(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)" - // Regex to match IPv6 addresses + // RegexIPv6 is a regex to match IPv6 addresses RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))" ) diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index bae9c569176..f2d99b54094 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -76,12 +76,16 @@ const ( // Bring the cleanup timeout back down to 5m once b/33588344 is resolved. LoadBalancerCleanupTimeout = 15 * time.Minute + // LoadBalancerPollTimeout is the time required by the loadbalancer to poll. // On average it takes ~6 minutes for a single backend to come online in GCE. - LoadBalancerPollTimeout = 15 * time.Minute + LoadBalancerPollTimeout = 15 * time.Minute + // LoadBalancerPollInterval is the interval value in which the loadbalancer polls. LoadBalancerPollInterval = 30 * time.Second + // LargeClusterMinNodesNumber is the number of nodes which a large cluster consists of. LargeClusterMinNodesNumber = 100 + // MaxNodesForEndpointsTests is the max number for testing endpoints. // Don't test with more than 3 nodes. // Many tests create an endpoint per node, in large clusters, this is // resource and time intensive. @@ -236,6 +240,7 @@ func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string, return j.Client.CoreV1().Services(namespace).Create(service) } +// ChangeServiceType updates the given service's ServiceType to the given newType. func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) { ingressIP := "" svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) { @@ -318,6 +323,7 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string return svc } +// GetNodeAddresses returns a list of addresses of the given addressType for the given node func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) { for j := range node.Status.Addresses { nodeAddress := &node.Status.Addresses[j] @@ -328,6 +334,7 @@ func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []stri return } +// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string { ips := []string{} for i := range nodes.Items { @@ -336,6 +343,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri return ips } +// GetNodePublicIps returns a public IP list of nodes. func GetNodePublicIps(c clientset.Interface) ([]string, error) { nodes := GetReadySchedulableNodesOrDie(c) @@ -347,6 +355,7 @@ func GetNodePublicIps(c clientset.Interface) ([]string, error) { return ips, nil } +// PickNodeIP picks one public node IP func PickNodeIP(c clientset.Interface) string { publicIps, err := GetNodePublicIps(c) ExpectNoError(err) @@ -415,6 +424,7 @@ func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) { return nodes } +// GetNodesNames returns a list of names of the first maxNodesForTest nodes func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { nodes := j.GetNodes(maxNodesForTest) nodesNames := []string{} @@ -424,6 +434,7 @@ func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { return nodesNames } +// WaitForEndpointOnNode waits for a service endpoint on the given node. func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) { err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) @@ -451,6 +462,7 @@ func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName ExpectNoError(err) } +// SanityCheckService performs sanity checks on the given service func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) { if svc.Spec.Type != svcType { Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) @@ -533,6 +545,7 @@ func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func return svc } +// WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service { Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name) service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool { @@ -548,6 +561,7 @@ func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP s return service } +// ChangeServiceNodePortOrFail changes node ports of the given service. func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service { var err error var service *v1.Service @@ -571,6 +585,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini return service } +// WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool { @@ -579,6 +594,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo return service } +// WaitForLoadBalancerDestroyOrFail waits the given service to destroy a LoadBalancer, or fails after the given timeout func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { @@ -658,6 +674,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll return rc } +// AddRCAntiAffinity adds AntiAffinity to the given ReplicationController. func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { var replicas int32 = 2 @@ -677,6 +694,7 @@ func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { }) } +// CreatePDBOrFail returns a PodDisruptionBudget for the given ReplicationController, or fails if a PodDisruptionBudget isn't ready func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget { pdb := j.newPDBTemplate(namespace, rc) newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb) @@ -733,6 +751,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati return result } +// Scale scales pods to the given replicas func (j *ServiceTestJig) Scale(namespace string, replicas int) { rc := j.Name scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{}) @@ -839,6 +858,7 @@ func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool return pod } +// LaunchNetexecPodOnNode launches a netexec pod on the given node. func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) { Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name) pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork) @@ -887,10 +907,12 @@ func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podNa Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) } +// TestReachableHTTP tests that the given host serves HTTP on the given port. func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { j.TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout) } +// TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes. func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeHTTP(host, port, "/echo?msg=hello", @@ -913,6 +935,7 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p } } +// TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port. func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeHTTP(host, port, "/", nil) @@ -927,6 +950,7 @@ func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout tim } } +// TestRejectedHTTP tests that the given host rejects a HTTP request on the given port. func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeHTTP(host, port, "/", nil) @@ -941,6 +965,7 @@ func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Du } } +// TestReachableUDP tests that the given host serves UDP on the given port. func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeUDP(host, port, "echo hello", &UDPPokeParams{ @@ -958,6 +983,7 @@ func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Du } } +// TestNotReachableUDP tests that the given host doesn't serve UDP on the given port. func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) @@ -971,6 +997,7 @@ func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time } } +// TestRejectedUDP tests that the given host rejects a UDP request on the given port. func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) @@ -984,6 +1011,7 @@ func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Dur } } +// GetHTTPContent returns the content of the given url by HTTP. func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { var body bytes.Buffer if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { @@ -1028,6 +1056,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url) } +// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port. func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error { count := 0 condition := func() (bool, error) { @@ -1063,6 +1092,7 @@ type ServiceTestFixture struct { Image string } +// NewServerTest creates a new ServiceTestFixture for the tests. func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture { t := &ServiceTestFixture{} t.Client = client @@ -1127,6 +1157,7 @@ func (t *ServiceTestFixture) DeleteService(serviceName string) error { return err } +// Cleanup cleans all ReplicationControllers and Services which this object holds. func (t *ServiceTestFixture) Cleanup() []error { var errs []error for rcName := range t.rcs { @@ -1175,6 +1206,7 @@ func (t *ServiceTestFixture) Cleanup() []error { return errs } +// GetIngressPoint returns a host on which ingress serves. func GetIngressPoint(ing *v1.LoadBalancerIngress) string { host := ing.IP if host == "" { @@ -1206,6 +1238,7 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update return service, err } +// GetContainerPortsByPodUID returns a PortsByPodUID map on the given endpoints. func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID { m := PortsByPodUID{} for _, ss := range endpoints.Subsets { @@ -1222,7 +1255,10 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID { return m } +// PortsByPodName maps pod name to ports. type PortsByPodName map[string][]int + +// PortsByPodUID maps UID to ports. type PortsByPodUID map[types.UID][]int func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { @@ -1261,6 +1297,7 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI } } +// ValidateEndpointsOrFail validates that the given service exists and is served by the given expectedEndpoints. func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) { ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 @@ -1348,6 +1385,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string return podNames, serviceIP, nil } +// StopServeHostnameService stops the given service. func StopServeHostnameService(clientset clientset.Interface, ns, name string) error { if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil { return err @@ -1439,6 +1477,7 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect return nil } +// VerifyServeHostnameServiceDown verifies that the given service isn't served. func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error { ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) // The current versions of curl included in CentOS and RHEL distros @@ -1466,6 +1505,7 @@ func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zo TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone) } +// DescribeSvc logs the output of kubectl describe svc for the given namespace func DescribeSvc(ns string) { Logf("\nOutput of kubectl describe svc:\n") desc, _ := RunKubectl( @@ -1473,6 +1513,7 @@ func DescribeSvc(ns string) { Logf(desc) } +// CreateServiceSpec returns a Service object for testing. func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service { headlessService := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1502,6 +1543,7 @@ func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(sv return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB() } +// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service. func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { if nodes := GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber { return LoadBalancerCreateTimeoutLarge diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index def1fee5fb4..b45a411562d 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -133,8 +133,10 @@ const ( // Poll is how often to Poll pods, nodes and claims. Poll = 2 * time.Second + // PollShortTimeout is the short timeout value in polling. PollShortTimeout = 1 * time.Minute - PollLongTimeout = 5 * time.Minute + // PollLongTimeout is the long timeout value in polling. + PollLongTimeout = 5 * time.Minute // ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned. // service accounts are provisioned after namespace creation From 43c8eb6c36ff52edb34858da71fb254edbbd42ca Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 13 May 2019 19:14:03 -0400 Subject: [PATCH 145/194] Revert "fix shellcheck failures of hack/verify-no-vendor-cycles.sh" This reverts commit e25e5a63e7b27e65b0118c3bfedfe1dec84c3f7c. --- hack/.shellcheck_failures | 1 + hack/verify-no-vendor-cycles.sh | 14 +++++--------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index f6bbf6cdd42..249f1b7ff4b 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -28,6 +28,7 @@ ./hack/pin-dependency.sh ./hack/test-integration.sh ./hack/update-vendor.sh +./hack/verify-no-vendor-cycles.sh ./hack/verify-test-featuregates.sh ./test/cmd/batch.sh ./test/cmd/certificate.sh diff --git a/hack/verify-no-vendor-cycles.sh b/hack/verify-no-vendor-cycles.sh index 30c902d6471..84d1a210d9b 100755 --- a/hack/verify-no-vendor-cycles.sh +++ b/hack/verify-no-vendor-cycles.sh @@ -18,20 +18,16 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. export GO111MODULE=auto -staging_repos=() -while IFS= read -r repo; do - staging_repos+=( "${repo}" ) -done < <(ls "${KUBE_ROOT}/staging/src/k8s.io/") - +staging_repos=($(ls "${KUBE_ROOT}/staging/src/k8s.io/")) staging_repos_pattern=$(IFS="|"; echo "${staging_repos[*]}") failed=false -while IFS= read -r -d '' i; do - deps=$(go list -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' ./"$i" 2> /dev/null || echo "") +for i in $(find vendor/ -type d); do + deps=$(go list -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' ./$i 2> /dev/null || echo "") deps_on_main=$(echo "${deps}" | grep -v "k8s.io/kubernetes/vendor/" | grep "k8s.io/kubernetes" || echo "") if [ -n "${deps_on_main}" ]; then echo "Package ${i} has a cyclic dependency on the main repository." @@ -42,7 +38,7 @@ while IFS= read -r -d '' i; do echo "Package ${i} has a cyclic dependency on staging repository packages: ${deps_on_staging}" failed=true fi -done < <(find vendor/ -type d) +done if [[ "${failed}" == "true" ]]; then exit 1 From 4841e5b98cc28770a85603a13a39a0fe943a2009 Mon Sep 17 00:00:00 2001 From: toyoda Date: Tue, 7 May 2019 14:54:01 +0900 Subject: [PATCH 146/194] use framework.ExpectNoError() for daemon_set.go and deployment.go in e2e/apps --- test/e2e/apps/daemon_set.go | 72 ++++++------ test/e2e/apps/deployment.go | 226 +++++++++++++++++++----------------- 2 files changed, 157 insertions(+), 141 deletions(-) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 6ac4e8f031b..da653324c49 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -89,7 +89,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { e2elog.Logf("unable to dump pods: %v", err) } err = clearDaemonSetNodeLabels(f.ClientSet) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) f = framework.NewDefaultFramework("daemonsets") @@ -106,12 +106,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { c = f.ClientSet updatedNS, err := updateNamespaceAnnotations(c, ns) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ns = updatedNS.Name err = clearDaemonSetNodeLabels(c) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) /* @@ -124,19 +124,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = checkDaemonStatus(f, dsName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) pod := podList.Items[0] err = c.CoreV1().Pods(ns).Delete(pod.Name, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") }) @@ -153,7 +153,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) @@ -169,14 +169,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") nodeSelector[daemonsetColorLabel] = "green" greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) framework.ExpectNoError(err, "error removing labels on node") - gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). - NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes") + err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, @@ -188,7 +188,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) // We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the @@ -216,7 +216,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }, } ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) @@ -232,13 +232,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Remove the node label and wait for daemons to be unscheduled") _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{}) framework.ExpectNoError(err, "error removing labels on node") - gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). - NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes") + err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") }) /* @@ -250,13 +250,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = checkDaemonStatus(f, dsName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) @@ -282,7 +282,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) @@ -290,7 +290,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 1) first := curHistory(listDaemonHistories(c, ns, label), ds) firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey] @@ -300,11 +300,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods images aren't updated.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) @@ -312,7 +312,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 2) cur := curHistory(listDaemonHistories(c, ns, label), ds) gomega.Expect(cur.Revision).To(gomega.Equal(int64(2))) @@ -331,7 +331,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) @@ -339,7 +339,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 1) cur := curHistory(listDaemonHistories(c, ns, label), ds) hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] @@ -349,18 +349,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Get the number of nodes, and set the timeout appropriately. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) nodeCount := len(nodes.Items) retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second ginkgo.By("Check that daemon pods images are updated.") err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) @@ -368,7 +368,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 2) cur = curHistory(listDaemonHistories(c, ns, label), ds) hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] @@ -389,7 +389,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Check that daemon pods launch on every node of the cluster") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) @@ -401,11 +401,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Make sure we're in the middle of a rollout err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) pods := listDaemonPods(c, ns, label) var existingPods, newPods []*v1.Pod @@ -433,11 +433,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = image }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Make sure DaemonSet rollback is complete") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted pods = listDaemonPods(c, ns, label) @@ -487,7 +487,7 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) * selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0)) return podList } @@ -748,7 +748,7 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} historyList, err := c.AppsV1().ControllerRevisions(ns).List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0)) return historyList } @@ -761,7 +761,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a // Every history should have the hash label gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0)) match, err := daemon.Match(ds, history) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if match { curHistory = history foundCurHistories++ diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 0fbe51556dc..34a37e8cfc0 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -191,10 +191,11 @@ func newDeploymentRollback(name string, annotations map[string]string, revision func stopDeployment(c clientset.Interface, ns, deploymentName string) { deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Deleting deployment %s", deploymentName) - framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)) + err = framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name) + framework.ExpectNoError(err) e2elog.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) @@ -202,10 +203,10 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) options := metav1.ListOptions{LabelSelector: selector.String()} rss, err := c.AppsV1().ReplicaSets(ns).List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) e2elog.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) var pods *v1.PodList @@ -235,19 +236,19 @@ func testDeleteDeployment(f *framework.Framework) { d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} deploy, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 1 err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gomega.Expect(newRS).NotTo(gomega.Equal(nilRs)) stopDeployment(c, ns, deploymentName) } @@ -271,7 +272,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { rs.Annotations = annotations e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name) _, err := c.AppsV1().ReplicaSets(ns).Create(rs) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) @@ -281,23 +282,23 @@ func testRollingUpdateDeployment(f *framework.Framework) { e2elog.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 3546343826724305833. e2elog.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Ensuring status for deployment %q is the expected", deploy.Name) err = e2edeploy.WaitForDeploymentComplete(c, deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // There should be 1 old RS (nginx-controller, which is adopted) e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1)) } @@ -310,15 +311,16 @@ func testRecreateDeployment(f *framework.Framework) { e2elog.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType) deployment, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 1 e2elog.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Waiting deployment %q to complete", deploymentName) - gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForDeploymentComplete(c, deployment) + framework.ExpectNoError(err) // Update deployment to delete redis pods and bring up nginx pods. e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName) @@ -326,10 +328,11 @@ func testRecreateDeployment(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Name = NginxImageName update.Spec.Template.Spec.Containers[0].Image = NginxImage }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) - gomega.Expect(e2edeploy.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WatchRecreateDeployment(c, deployment) + framework.ExpectNoError(err) } // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy @@ -346,7 +349,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { replicas := int32(1) revisionHistoryLimit := utilpointer.Int32Ptr(0) _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) @@ -365,7 +368,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { stopCh := make(chan struct{}) defer close(stopCh) w, err := c.CoreV1().Pods(ns).Watch(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) go func() { // There should be only one pod being created, which is the pod with the redis image. // The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector. @@ -395,11 +398,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit _, err = c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) err = e2edeploy.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } // testRolloverDeployment tests that deployment supports rollover. @@ -417,14 +420,15 @@ func testRolloverDeployment(f *framework.Framework) { rsName := "test-rollover-controller" rsReplicas := int32(1) _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Wait for replica set to become ready before adopting it. e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName) - gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred()) + err = replicaset.WaitForReadyReplicaSet(c, ns, rsName) + framework.ExpectNoError(err) // Create a deployment to delete nginx pods and instead bring up redis-slave pods. // We use a nonexistent image here, so that we make sure it won't finish @@ -440,25 +444,25 @@ func testRolloverDeployment(f *framework.Framework) { } newDeployment.Spec.MinReadySeconds = int32(10) _, err = c.AppsV1().Deployments(ns).Create(newDeployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Verify that the pods were scaled up and down as expected. deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) // Check if it's updated to revision 1 correctly e2elog.Logf("Check revision of new replica set for deployment %q", deploymentName) err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Ensure that both replica sets have 1 created replica") oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ensureReplicas(oldRS, int32(1)) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ensureReplicas(newRS, int32(1)) // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods. @@ -468,29 +472,29 @@ func testRolloverDeployment(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Use observedGeneration to determine if the controller noticed the pod template update. e2elog.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName) err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 2 e2elog.Logf("Wait for revision update of deployment %q to 2", deploymentName) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Make sure deployment %q is complete", deploymentName) err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Ensure that both old replica sets have no replicas") oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ensureReplicas(oldRS, int32(0)) // Not really the new replica set anymore but we GET by name so that's fine. newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ensureReplicas(newRS, int32(0)) } @@ -520,18 +524,18 @@ func testRollbackDeployment(f *framework.Framework) { createAnnotation := map[string]string{"action": "create", "author": "node"} d.Annotations = createAnnotation deploy, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 1 err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Current newRS annotation should be "create" err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // 2. Update the deployment to create redis pods. updatedDeploymentImage := RedisImage @@ -542,66 +546,66 @@ func testRollbackDeployment(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Annotations = updateAnnotation }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Use observedGeneration to determine if the controller noticed the pod template update. err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 2 err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Current newRS annotation should be "update" err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // 3. Update the deploymentRollback to rollback to revision 1 revision := int64(1) e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for the deployment to start rolling back err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // TODO: report RollbackDone in deployment status and check it here // Wait for it to be updated to revision 3 err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Current newRS annotation should be "create", after the rollback err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // 4. Update the deploymentRollback to rollback to last revision revision = 0 e2elog.Logf("rolling back deployment %s to last revision", deploymentName) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for it to be updated to revision 4 err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Current newRS annotation should be "update", after the rollback err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // 5. Update the deploymentRollback to rollback to revision 10 // Since there's no revision 10 in history, it should stay as revision 4 @@ -609,17 +613,17 @@ func testRollbackDeployment(f *framework.Framework) { e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for the deployment to start rolling back err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // TODO: report RollbackRevisionNotFound in deployment status and check it here // The pod template shouldn't change since there's no revision 10 // Check if it's still revision 4 and still has the old pod template err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // 6. Update the deploymentRollback to rollback to revision 4 // Since it's already revision 4, it should be no-op @@ -627,17 +631,17 @@ func testRollbackDeployment(f *framework.Framework) { e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Wait for the deployment to start rolling back err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // TODO: report RollbackTemplateUnchanged in deployment status and check it here // The pod template shouldn't change since it's already revision 4 // Check if it's still revision 4 and still has the old pod template err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } func randomScale(d *apps.Deployment, i int) { @@ -671,7 +675,7 @@ func testIterativeDeployments(f *framework.Framework) { d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero e2elog.Logf("Creating deployment %q", deploymentName) deployment, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) iterations := 20 for i := 0; i < iterations; i++ { @@ -688,7 +692,7 @@ func testIterativeDeployments(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) randomScale(update, i) }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) case n < 0.4: // rollback to the previous version @@ -699,7 +703,7 @@ func testIterativeDeployments(f *framework.Framework) { } update.Annotations[apps.DeprecatedRollbackTo] = "0" }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) case n < 0.6: // just scaling @@ -707,7 +711,7 @@ func testIterativeDeployments(f *framework.Framework) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { randomScale(update, i) }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) case n < 0.8: // toggling the deployment @@ -717,24 +721,24 @@ func testIterativeDeployments(f *framework.Framework) { update.Spec.Paused = true randomScale(update, i) }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } else { e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name) deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false randomScale(update, i) }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } default: // arbitrarily delete deployment pods e2elog.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) opts := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(opts) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if len(podList.Items) == 0 { e2elog.Logf("%02d: no deployment pods to delete", i) continue @@ -747,7 +751,7 @@ func testIterativeDeployments(f *framework.Framework) { e2elog.Logf("%02d: deleting deployment pod %q", i, name) err := c.CoreV1().Pods(ns).Delete(name, nil) if err != nil && !errors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } } } @@ -755,7 +759,7 @@ func testIterativeDeployments(f *framework.Framework) { // unpause the deployment if we end up pausing it deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if deployment.Spec.Paused { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false @@ -763,13 +767,16 @@ func testIterativeDeployments(f *framework.Framework) { } e2elog.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) - gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + framework.ExpectNoError(err) e2elog.Logf("Waiting for deployment %q status", deploymentName) - gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForDeploymentComplete(c, deployment) + framework.ExpectNoError(err) e2elog.Logf("Checking deployment %q for a complete condition", deploymentName) - gomega.Expect(e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing) + framework.ExpectNoError(err) } func testDeploymentsControllerRef(f *framework.Framework) { @@ -782,9 +789,9 @@ func testDeploymentsControllerRef(f *framework.Framework) { replicas := int32(1) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) rsList := listDeploymentReplicaSets(c, ns, podLabels) @@ -795,11 +802,11 @@ func testDeploymentsControllerRef(f *framework.Framework) { e2elog.Logf("Checking the ReplicaSet has the right controllerRef") err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) err = orphanDeploymentReplicaSets(c, deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Wait for the ReplicaSet to be orphaned") err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) @@ -809,13 +816,13 @@ func testDeploymentsControllerRef(f *framework.Framework) { e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) deploy, err = c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Waiting for the ReplicaSet to have the right controllerRef") err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) rsList = listDeploymentReplicaSets(c, ns, podLabels) @@ -844,10 +851,11 @@ func testProportionalScalingDeployment(f *framework.Framework) { e2elog.Logf("Creating deployment %q", deploymentName) deployment, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Waiting for observed generation %d", deployment.Generation) - gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + framework.ExpectNoError(err) // Verify that the required pods have come up. e2elog.Logf("Waiting for all required pods to come up") @@ -855,10 +863,11 @@ func testProportionalScalingDeployment(f *framework.Framework) { framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) e2elog.Logf("Waiting for deployment %q to complete", deployment.Name) - gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForDeploymentComplete(c, deployment) + framework.ExpectNoError(err) firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Update the deployment with a non-existent image so that the new replica set // will be blocked to simulate a partial rollout. @@ -866,37 +875,40 @@ func testProportionalScalingDeployment(f *framework.Framework) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = "nginx:404" }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Waiting for observed generation %d", deployment.Generation) - gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) + err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + framework.ExpectNoError(err) // Checking state of first rollout's replicaset. maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. minAvailableReplicas := replicas - int32(maxUnavailable) e2elog.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) - gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) + err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) + framework.ExpectNoError(err) // First rollout's replicaset should have .spec.replicas = 8 too. e2elog.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) - gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) + err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas) + framework.ExpectNoError(err) // The desired replicas wait makes sure that the RS controller has created expected number of pods. e2elog.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Checking state of second rollout's replicaset. secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Second rollout's replicaset should have 0 available replicas. e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") @@ -905,19 +917,21 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. newReplicas := replicas + int32(maxSurge) - minAvailableReplicas e2elog.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) - gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred()) + err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas) + framework.ExpectNoError(err) // The desired replicas wait makes sure that the RS controller has created expected number of pods. e2elog.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Check the deployment's minimum availability. e2elog.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) if deployment.Status.AvailableReplicas < minAvailableReplicas { - gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) + err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas) + framework.ExpectNoError(err) } // Scale the deployment to 30 replicas. @@ -926,23 +940,25 @@ func testProportionalScalingDeployment(f *framework.Framework) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e2elog.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. e2elog.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") - gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred()) + err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20) + framework.ExpectNoError(err) // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. e2elog.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") - gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred()) + err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13) + framework.ExpectNoError(err) } func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error { @@ -973,7 +989,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} rsList, err := c.AppsV1().ReplicaSets(ns).List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0)) return rsList } From 1f0f050fded19248d52c2dd2ed9ddcd67ff4e2f4 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Fri, 10 May 2019 10:38:46 -0700 Subject: [PATCH 147/194] Update etcd* version to use latest released images. --- cluster/gce/config-test.sh | 2 +- cluster/gce/manifests/etcd-empty-dir-cleanup.yaml | 2 +- cluster/gce/manifests/etcd.manifest | 2 +- cluster/gce/upgrade-aliases.sh | 2 +- cluster/images/etcd-version-monitor/Makefile | 2 +- cluster/images/etcd-version-monitor/etcd-version-monitor.yaml | 2 +- cluster/images/etcd/Makefile | 2 +- test/e2e/framework/nodes_util.go | 4 ++-- test/kubemark/start-kubemark.sh | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index aaeaab6ad0e..ba1c2f04aa7 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -175,7 +175,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}" # Useful for scheduling heapster in large clusters with nodes of small size. HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" -# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.3.10-0) if you need +# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.3.10-1) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}" diff --git a/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml b/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml index 6795aa23499..51750a06304 100644 --- a/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml +++ b/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml @@ -14,4 +14,4 @@ spec: dnsPolicy: Default containers: - name: etcd-empty-dir-cleanup - image: k8s.gcr.io/etcd-empty-dir-cleanup:3.3.10.0 + image: k8s.gcr.io/etcd-empty-dir-cleanup:3.3.10.1 diff --git a/cluster/gce/manifests/etcd.manifest b/cluster/gce/manifests/etcd.manifest index 0211aa1dbb2..4500d1df9e6 100644 --- a/cluster/gce/manifests/etcd.manifest +++ b/cluster/gce/manifests/etcd.manifest @@ -14,7 +14,7 @@ "containers":[ { "name": "etcd-container", - "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.3.10-0') }}", + "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.3.10-1') }}", "resources": { "requests": { "cpu": {{ cpulimit }} diff --git a/cluster/gce/upgrade-aliases.sh b/cluster/gce/upgrade-aliases.sh index 92b2382074a..907e15a0f6e 100755 --- a/cluster/gce/upgrade-aliases.sh +++ b/cluster/gce/upgrade-aliases.sh @@ -161,7 +161,7 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true export SECONDARY_RANGE_NAME="pods-default" export STORAGE_BACKEND="etcd3" export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf" -export ETCD_IMAGE=3.3.10-0 +export ETCD_IMAGE=3.3.10-1 export ETCD_VERSION=3.3.10 # Upgrade master with updated kube envs diff --git a/cluster/images/etcd-version-monitor/Makefile b/cluster/images/etcd-version-monitor/Makefile index 0c019322b2f..88429b31896 100644 --- a/cluster/images/etcd-version-monitor/Makefile +++ b/cluster/images/etcd-version-monitor/Makefile @@ -20,7 +20,7 @@ ARCH:=amd64 GOLANG_VERSION?=1.8.3 REGISTRY?=staging-k8s.gcr.io -TAG?=0.1.2 +TAG?=0.1.3 IMAGE:=$(REGISTRY)/etcd-version-monitor:$(TAG) CURRENT_DIR:=$(pwd) TEMP_DIR:=$(shell mktemp -d) diff --git a/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml b/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml index b02592679a2..c2a3df136ea 100644 --- a/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml +++ b/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml @@ -7,7 +7,7 @@ spec: hostNetwork: true containers: - name: etcd-version-monitor - image: k8s.gcr.io/etcd-version-monitor:0.1.2 + image: k8s.gcr.io/etcd-version-monitor:0.1.3 command: - /etcd-version-monitor - --logtostderr diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index e7ba544c6aa..874f3a86bd1 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -34,7 +34,7 @@ LATEST_ETCD_VERSION?=3.3.10 # REVISION provides a version number fo this image and all it's bundled # artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment # for each revision of this image at that etcd version. -REVISION?=0 +REVISION?=1 # IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "-". IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION) diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index d7c0dc055b7..4211376560d 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -60,7 +60,7 @@ func etcdUpgradeGCE(targetStorage, targetVersion string) error { os.Environ(), "TEST_ETCD_VERSION="+targetVersion, "STORAGE_BACKEND="+targetStorage, - "TEST_ETCD_IMAGE=3.3.10-0") + "TEST_ETCD_IMAGE=3.3.10-1") _, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M") return err @@ -80,7 +80,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error { env = append(env, "TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion, "STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage, - "TEST_ETCD_IMAGE=3.3.10-0") + "TEST_ETCD_IMAGE=3.3.10-1") } else { // In e2e tests, we skip the confirmation prompt about // implicit etcd upgrades to simulate the user entering "y". diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index 6cdf012fd9b..933a39615d5 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -64,7 +64,7 @@ SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}" EVENT_PD="${EVENT_PD:-}" # Etcd related variables. -ETCD_IMAGE="${ETCD_IMAGE:-3.3.10-0}" +ETCD_IMAGE="${ETCD_IMAGE:-3.3.10-1}" ETCD_VERSION="${ETCD_VERSION:-}" # Controller-manager related variables. From 196bbaa964b5e2dc298a4f930a4379eecae3ec5c Mon Sep 17 00:00:00 2001 From: David Zhu Date: Mon, 13 May 2019 13:20:06 -0700 Subject: [PATCH 148/194] Translate fstype storage class parameter to prefixed stripped parameter in the gce pd translation library. Change storage class translation library to operate on StorageClass instead of parameters only. --- staging/src/k8s.io/csi-translation-lib/BUILD | 1 + .../k8s.io/csi-translation-lib/plugins/BUILD | 7 +- .../csi-translation-lib/plugins/aws_ebs.go | 5 +- .../csi-translation-lib/plugins/gce_pd.go | 18 ++++- .../plugins/gce_pd_test.go | 67 +++++++++++++++++++ .../plugins/in_tree_volume.go | 11 +-- .../plugins/openstack_cinder.go | 6 +- .../k8s.io/csi-translation-lib/translate.go | 11 +-- 8 files changed, 110 insertions(+), 16 deletions(-) create mode 100644 staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go diff --git a/staging/src/k8s.io/csi-translation-lib/BUILD b/staging/src/k8s.io/csi-translation-lib/BUILD index 19624f2108e..64333e4fba2 100644 --- a/staging/src/k8s.io/csi-translation-lib/BUILD +++ b/staging/src/k8s.io/csi-translation-lib/BUILD @@ -8,6 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", ], ) diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/BUILD b/staging/src/k8s.io/csi-translation-lib/plugins/BUILD index 965bbd0175b..5aa90077f73 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/BUILD +++ b/staging/src/k8s.io/csi-translation-lib/plugins/BUILD @@ -13,6 +13,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/cloud-provider/volume:go_default_library", ], @@ -34,6 +35,10 @@ filegroup( go_test( name = "go_default_test", - srcs = ["aws_ebs_test.go"], + srcs = [ + "aws_ebs_test.go", + "gce_pd_test.go", + ], embed = [":go_default_library"], + deps = ["//staging/src/k8s.io/api/storage/v1:go_default_library"], ) diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go b/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go index 9cb61768493..5d4cd74f365 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go @@ -24,6 +24,7 @@ import ( "strings" "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" ) const ( @@ -44,8 +45,8 @@ func NewAWSElasticBlockStoreCSITranslator() InTreePlugin { } // TranslateInTreeStorageClassParametersToCSI translates InTree EBS storage class parameters to CSI storage class -func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) { - return scParameters, nil +func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) { + return sc, nil } // TranslateInTreePVToCSI takes a PV with AWSElasticBlockStore set from in-tree diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go index 103a62279de..5f7b82d7d5e 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go @@ -22,6 +22,7 @@ import ( "strings" "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/util/sets" cloudvolume "k8s.io/cloud-provider/volume" ) @@ -56,8 +57,21 @@ func NewGCEPersistentDiskCSITranslator() InTreePlugin { } // TranslateInTreeStorageClassParametersToCSI translates InTree GCE storage class parameters to CSI storage class -func (g *gcePersistentDiskCSITranslator) TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) { - return scParameters, nil +func (g *gcePersistentDiskCSITranslator) TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) { + np := map[string]string{} + for k, v := range sc.Parameters { + switch strings.ToLower(k) { + case "fstype": + np["csi.storage.k8s.io/fstype"] = v + default: + np[k] = v + } + } + sc.Parameters = np + + // TODO(#77235): Translate AccessModes and zone/zones to AccessibleTopologies + + return sc, nil } // TranslateInTreePVToCSI takes a PV with GCEPersistentDisk set from in-tree diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go new file mode 100644 index 00000000000..e92e1db9636 --- /dev/null +++ b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugins + +import ( + "reflect" + "testing" + + storage "k8s.io/api/storage/v1" +) + +func NewStorageClass(params map[string]string) storage.StorageClass { + return storage.StorageClass{ + Parameters: params, + } +} + +func TestTranslatePDInTreeVolumeOptionsToCSI(t *testing.T) { + g := NewGCEPersistentDiskCSITranslator() + + tcs := []struct { + name string + options storage.StorageClass + expOptions storage.StorageClass + }{ + { + name: "nothing special", + options: NewStorageClass(map[string]string{"foo": "bar"}), + expOptions: NewStorageClass(map[string]string{"foo": "bar"}), + }, + { + name: "fstype", + options: NewStorageClass(map[string]string{"fstype": "myfs"}), + expOptions: NewStorageClass(map[string]string{"csi.storage.k8s.io/fstype": "myfs"}), + }, + { + name: "empty params", + options: NewStorageClass(map[string]string{}), + expOptions: NewStorageClass(map[string]string{}), + }, + } + + for _, tc := range tcs { + t.Logf("Testing %v", tc.name) + gotOptions, err := g.TranslateInTreeVolumeOptionsToCSI(tc.options) + if err != nil { + t.Errorf("Did not expect error but got: %v", err) + } + if !reflect.DeepEqual(gotOptions, tc.expOptions) { + t.Errorf("Got parameters: %v, expected :%v", gotOptions, tc.expOptions) + } + } +} diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go b/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go index 6d5afd9f1f5..29a0ef5f8cd 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go @@ -16,14 +16,17 @@ limitations under the License. package plugins -import "k8s.io/api/core/v1" +import ( + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" +) // InTreePlugin handles translations between CSI and in-tree sources in a PV type InTreePlugin interface { - // TranslateInTreeStorageClassParametersToCSI takes in-tree storage class - // parameters and translates them to a set of parameters consumable by CSI plugin - TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) + // TranslateInTreeVolumeOptionsToCSI takes in-tree volume options + // and translates them to a volume options consumable by CSI plugin + TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) // TranslateInTreePVToCSI takes a persistent volume and will translate // the in-tree source to a CSI Source. The input persistent volume can be modified diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go b/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go index abd204a5575..5b22b5e6951 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go @@ -18,7 +18,9 @@ package plugins import ( "fmt" + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" ) const ( @@ -39,8 +41,8 @@ func NewOpenStackCinderCSITranslator() InTreePlugin { } // TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class -func (t *osCinderCSITranslator) TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) { - return scParameters, nil +func (t *osCinderCSITranslator) TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) { + return sc, nil } // TranslateInTreePVToCSI takes a PV with Cinder set from in-tree diff --git a/staging/src/k8s.io/csi-translation-lib/translate.go b/staging/src/k8s.io/csi-translation-lib/translate.go index ac1a4dd3d86..d22c0c2c619 100644 --- a/staging/src/k8s.io/csi-translation-lib/translate.go +++ b/staging/src/k8s.io/csi-translation-lib/translate.go @@ -21,6 +21,7 @@ import ( "fmt" "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" "k8s.io/csi-translation-lib/plugins" ) @@ -32,15 +33,15 @@ var ( } ) -// TranslateInTreeStorageClassParametersToCSI takes in-tree storage class -// parameters and translates them to a set of parameters consumable by CSI plugin -func TranslateInTreeStorageClassParametersToCSI(inTreePluginName string, scParameters map[string]string) (map[string]string, error) { +// TranslateInTreeVolumeOptionsToCSI takes in-tree volume options +// and translates them to a set of parameters consumable by CSI plugin +func TranslateInTreeVolumeOptionsToCSI(inTreePluginName string, sc storage.StorageClass) (storage.StorageClass, error) { for _, curPlugin := range inTreePlugins { if inTreePluginName == curPlugin.GetInTreePluginName() { - return curPlugin.TranslateInTreeStorageClassParametersToCSI(scParameters) + return curPlugin.TranslateInTreeVolumeOptionsToCSI(sc) } } - return nil, fmt.Errorf("could not find in-tree storage class parameter translation logic for %#v", inTreePluginName) + return storage.StorageClass{}, fmt.Errorf("could not find in-tree storage class parameter translation logic for %#v", inTreePluginName) } // TranslateInTreePVToCSI takes a persistent volume and will translate From 101c6298ce3167e04c0568e80f621456197eabdf Mon Sep 17 00:00:00 2001 From: David Zhu Date: Wed, 1 May 2019 18:10:18 -0700 Subject: [PATCH 149/194] Add tests for backwardCompatibleAccessModes --- .../plugins/gce_pd_test.go | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go index e92e1db9636..e5d970fb220 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go @@ -20,6 +20,7 @@ import ( "reflect" "testing" + "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" ) @@ -65,3 +66,53 @@ func TestTranslatePDInTreeVolumeOptionsToCSI(t *testing.T) { } } } + +func TestBackwardCompatibleAccessModes(t *testing.T) { + testCases := []struct { + name string + accessModes []v1.PersistentVolumeAccessMode + expAccessModes []v1.PersistentVolumeAccessMode + }{ + { + name: "multiple normals", + accessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadOnlyMany, + v1.ReadWriteOnce, + }, + expAccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadOnlyMany, + v1.ReadWriteOnce, + }, + }, + { + name: "one normal", + accessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + expAccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + }, + { + name: "some readwritemany", + accessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadWriteMany, + }, + expAccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadWriteOnce, + }, + }, + } + + for _, tc := range testCases { + t.Logf("running test: %v", tc.name) + + got := backwardCompatibleAccessModes(tc.accessModes) + + if !reflect.DeepEqual(tc.expAccessModes, got) { + t.Fatalf("Expected access modes: %v, instead got: %v", tc.expAccessModes, got) + } + } +} From d92b0e21471c5242bc68578b5116c21065020e1b Mon Sep 17 00:00:00 2001 From: Yago Nobre Date: Tue, 14 May 2019 00:04:11 -0300 Subject: [PATCH 150/194] Add phase runner to kubeadm reset --- cmd/kubeadm/app/cmd/cmd.go | 2 +- cmd/kubeadm/app/cmd/options/constant.go | 3 + cmd/kubeadm/app/cmd/reset.go | 218 +++++++++++++++++------- cmd/kubeadm/app/cmd/reset_test.go | 18 -- 4 files changed, 163 insertions(+), 78 deletions(-) diff --git a/cmd/kubeadm/app/cmd/cmd.go b/cmd/kubeadm/app/cmd/cmd.go index a80c30a2e92..c27a8691e1d 100644 --- a/cmd/kubeadm/app/cmd/cmd.go +++ b/cmd/kubeadm/app/cmd/cmd.go @@ -84,7 +84,7 @@ func NewKubeadmCommand(in io.Reader, out, err io.Writer) *cobra.Command { cmds.AddCommand(NewCmdConfig(out)) cmds.AddCommand(NewCmdInit(out, nil)) cmds.AddCommand(NewCmdJoin(out, nil)) - cmds.AddCommand(NewCmdReset(in, out)) + cmds.AddCommand(NewCmdReset(in, out, nil)) cmds.AddCommand(NewCmdVersion(out)) cmds.AddCommand(NewCmdToken(out, err)) cmds.AddCommand(upgrade.NewCmdUpgrade(out)) diff --git a/cmd/kubeadm/app/cmd/options/constant.go b/cmd/kubeadm/app/cmd/options/constant.go index 251cd201334..72bab518598 100644 --- a/cmd/kubeadm/app/cmd/options/constant.go +++ b/cmd/kubeadm/app/cmd/options/constant.go @@ -127,4 +127,7 @@ const ( // SkipCertificateKeyPrint flag instruct kubeadm to skip printing certificate key used to encrypt certs by 'kubeadm init'. SkipCertificateKeyPrint = "skip-certificate-key-print" + + // ForceReset flag instruct kubeadm to reset the node without prompting for confirmation + ForceReset = "force" ) diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index 9ac3c6d5877..eea7e05df44 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -28,6 +28,7 @@ import ( "github.com/lithammer/dedent" "github.com/pkg/errors" "github.com/spf13/cobra" + flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" @@ -35,6 +36,7 @@ import ( kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd" @@ -48,99 +50,197 @@ import ( utilsexec "k8s.io/utils/exec" ) -// NewCmdReset returns the "kubeadm reset" command -func NewCmdReset(in io.Reader, out io.Writer) *cobra.Command { - var certsDir string +// resetOptions defines all the options exposed via flags by kubeadm reset. +type resetOptions struct { + certificatesDir string + criSocketPath string + forceReset bool + ignorePreflightErrors []string + kubeconfigPath string +} + +// resetData defines all the runtime information used when running the kubeadm reset worklow; +// this data is shared across all the phases that are included in the workflow. +type resetData struct { + certificatesDir string + client clientset.Interface + criSocketPath string + forceReset bool + ignorePreflightErrors sets.String + inputReader io.Reader + outputWriter io.Writer + cfg *kubeadmapi.InitConfiguration +} + +// newResetOptions returns a struct ready for being used for creating cmd join flags. +func newResetOptions() *resetOptions { + return &resetOptions{ + certificatesDir: kubeadmapiv1beta2.DefaultCertificatesDir, + forceReset: false, + kubeconfigPath: kubeadmconstants.GetAdminKubeConfigPath(), + } +} + +// newResetData returns a new resetData struct to be used for the execution of the kubeadm reset workflow. +func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out io.Writer) (*resetData, error) { + var cfg *kubeadmapi.InitConfiguration + ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors) + if err != nil { + return nil, err + } + + client, err := getClientset(options.kubeconfigPath, false) + if err == nil { + klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", options.kubeconfigPath) + cfg, err = configutil.FetchInitConfigurationFromCluster(client, out, "reset", false) + if err != nil { + klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err) + } + } else { + klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", options.kubeconfigPath) + } + var criSocketPath string - var ignorePreflightErrors []string - var forceReset bool - var client clientset.Interface - kubeConfigFile := kubeadmconstants.GetAdminKubeConfigPath() + if options.criSocketPath == "" { + criSocketPath, err = resetDetectCRISocket(cfg) + if err != nil { + return nil, err + } + klog.V(1).Infof("[reset] Detected and using CRI socket: %s", criSocketPath) + } + + return &resetData{ + certificatesDir: options.certificatesDir, + client: client, + criSocketPath: criSocketPath, + forceReset: options.forceReset, + ignorePreflightErrors: ignorePreflightErrorsSet, + inputReader: in, + outputWriter: out, + cfg: cfg, + }, nil +} + +// AddResetFlags adds reset flags +func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) { + flagSet.StringVar( + &resetOptions.certificatesDir, options.CertificatesDir, resetOptions.certificatesDir, + `The path to the directory where the certificates are stored. If specified, clean this directory.`, + ) + flagSet.BoolVarP( + &resetOptions.forceReset, options.ForceReset, "f", false, + "Reset the node without prompting for confirmation.", + ) + + options.AddKubeConfigFlag(flagSet, &resetOptions.kubeconfigPath) + options.AddIgnorePreflightErrorsFlag(flagSet, &resetOptions.ignorePreflightErrors) + cmdutil.AddCRISocketFlag(flagSet, &resetOptions.criSocketPath) +} + +// NewCmdReset returns the "kubeadm reset" command +func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra.Command { + if resetOptions == nil { + resetOptions = newResetOptions() + } + resetRunner := workflow.NewRunner() cmd := &cobra.Command{ Use: "reset", Short: "Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'", Run: func(cmd *cobra.Command, args []string) { - ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors) + c, err := resetRunner.InitData(args) kubeadmutil.CheckErr(err) - var cfg *kubeadmapi.InitConfiguration - client, err = getClientset(kubeConfigFile, false) - if err == nil { - klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", kubeConfigFile) - cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false) - if err != nil { - klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err) - } - } else { - klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", kubeConfigFile) - } - - if criSocketPath == "" { - criSocketPath, err = resetDetectCRISocket(cfg) - kubeadmutil.CheckErr(err) - klog.V(1).Infof("[reset] Detected and using CRI socket: %s", criSocketPath) - } - - r, err := NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath) + err = resetRunner.Run(args) kubeadmutil.CheckErr(err) - kubeadmutil.CheckErr(r.Run(out, client, cfg)) + // TODO: remove this once we have all phases in place. + // the method joinData.Run() itself should be removed too. + data := c.(*resetData) + kubeadmutil.CheckErr(data.Run()) }, } - options.AddIgnorePreflightErrorsFlag(cmd.PersistentFlags(), &ignorePreflightErrors) - options.AddKubeConfigFlag(cmd.PersistentFlags(), &kubeConfigFile) + AddResetFlags(cmd.Flags(), resetOptions) - cmd.PersistentFlags().StringVar( - &certsDir, "cert-dir", kubeadmapiv1beta2.DefaultCertificatesDir, - "The path to the directory where the certificates are stored. If specified, clean this directory.", - ) + // initialize the workflow runner with the list of phases + // TODO: append phases here - cmdutil.AddCRISocketFlag(cmd.PersistentFlags(), &criSocketPath) + // sets the data builder function, that will be used by the runner + // both when running the entire workflow or single phases + resetRunner.SetDataInitializer(func(cmd *cobra.Command, args []string) (workflow.RunData, error) { + return newResetData(cmd, resetOptions, in, out) + }) - cmd.PersistentFlags().BoolVarP( - &forceReset, "force", "f", false, - "Reset the node without prompting for confirmation.", - ) + // binds the Runner to kubeadm init command by altering + // command help, adding --skip-phases flag and by adding phases subcommands + resetRunner.BindToCommand(cmd) return cmd } -// Reset defines struct used for kubeadm reset command -type Reset struct { - certsDir string - criSocketPath string +// Cfg returns the InitConfiguration. +func (r *resetData) Cfg() *kubeadmapi.InitConfiguration { + return r.cfg } -// NewReset instantiate Reset struct -func NewReset(in io.Reader, ignorePreflightErrors sets.String, forceReset bool, certsDir, criSocketPath string) (*Reset, error) { - if !forceReset { +// CertificatesDir returns the CertificatesDir. +func (r *resetData) CertificatesDir() string { + return r.certificatesDir +} + +// Client returns the Client for accessing the cluster. +func (r *resetData) Client() clientset.Interface { + return r.client +} + +// ForceReset returns the forceReset flag. +func (r *resetData) ForceReset() bool { + return r.forceReset +} + +// InputReader returns the io.reader used to read messages. +func (r *resetData) InputReader() io.Reader { + return r.inputReader +} + +// IgnorePreflightErrors returns the list of preflight errors to ignore. +func (r *resetData) IgnorePreflightErrors() sets.String { + return r.ignorePreflightErrors +} + +func (r *resetData) preflight() error { + if !r.ForceReset() { fmt.Println("[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.") fmt.Print("[reset] Are you sure you want to proceed? [y/N]: ") - s := bufio.NewScanner(in) + s := bufio.NewScanner(r.InputReader()) s.Scan() if err := s.Err(); err != nil { - return nil, err + return err } if strings.ToLower(s.Text()) != "y" { - return nil, errors.New("Aborted reset operation") + return errors.New("Aborted reset operation") } } fmt.Println("[preflight] Running pre-flight checks") - if err := preflight.RunRootCheckOnly(ignorePreflightErrors); err != nil { - return nil, err + if err := preflight.RunRootCheckOnly(r.IgnorePreflightErrors()); err != nil { + return err } - return &Reset{ - certsDir: certsDir, - criSocketPath: criSocketPath, - }, nil + return nil } // Run reverts any changes made to this host by "kubeadm init" or "kubeadm join". -func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.InitConfiguration) error { +func (r *resetData) Run() error { var dirsToClean []string + cfg := r.Cfg() + certsDir := r.CertificatesDir() + client := r.Client() + + err := r.preflight() + if err != nil { + return err + } // Reset the ClusterStatus for a given control-plane node. if isControlPlane() && cfg != nil { @@ -203,10 +303,10 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I // Remove contents from the config and pki directories klog.V(1).Infoln("[reset] Removing contents from the config and pki directories") - if r.certsDir != kubeadmapiv1beta2.DefaultCertificatesDir { - klog.Warningf("[reset] WARNING: Cleaning a non-default certificates directory: %q\n", r.certsDir) + if certsDir != kubeadmapiv1beta2.DefaultCertificatesDir { + klog.Warningf("[reset] WARNING: Cleaning a non-default certificates directory: %q\n", certsDir) } - resetConfigDir(kubeadmconstants.KubernetesDir, r.certsDir) + resetConfigDir(kubeadmconstants.KubernetesDir, certsDir) // Output help text instructing user how to remove iptables rules msg := dedent.Dedent(` diff --git a/cmd/kubeadm/app/cmd/reset_test.go b/cmd/kubeadm/app/cmd/reset_test.go index 6198f7f8fa6..1d38fbe47a0 100644 --- a/cmd/kubeadm/app/cmd/reset_test.go +++ b/cmd/kubeadm/app/cmd/reset_test.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "io" "io/ioutil" "os" "path/filepath" @@ -26,8 +25,6 @@ import ( "github.com/lithammer/dedent" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" testutil "k8s.io/kubernetes/cmd/kubeadm/test" @@ -85,21 +82,6 @@ func assertDirEmpty(t *testing.T, path string) { } } -func TestNewReset(t *testing.T) { - var in io.Reader - certsDir := kubeadmapiv1beta2.DefaultCertificatesDir - criSocketPath := kubeadmconstants.DefaultDockerCRISocket - forceReset := true - - ignorePreflightErrors := []string{"all"} - ignorePreflightErrorsSet, _ := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors) - NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath) - - ignorePreflightErrors = []string{} - ignorePreflightErrorsSet, _ = validation.ValidateIgnorePreflightErrors(ignorePreflightErrors) - NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath) -} - func TestConfigDirCleaner(t *testing.T) { tests := map[string]struct { resetDir string From 97e22fb9b1e1b0df068a7f05af641f0b46a5cbc1 Mon Sep 17 00:00:00 2001 From: Yago Nobre Date: Tue, 14 May 2019 03:06:50 -0300 Subject: [PATCH 151/194] Update bazel --- cmd/kubeadm/app/cmd/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index cab33333da4..dcaed8131f3 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -86,7 +86,6 @@ go_test( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", "//cmd/kubeadm/app/componentconfigs:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", From 140c8c73a64deb102b528109138ca9fb7dbb2392 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 7 May 2019 13:34:18 -0700 Subject: [PATCH 152/194] Pass {Operation}Option to Webhooks --- pkg/apis/admission/types.go | 10 ++- .../src/k8s.io/api/admission/v1beta1/types.go | 10 ++- .../pkg/apis/apiextensions/v1beta1/types.go | 2 +- .../apiserver/pkg/admission/attributes.go | 8 +- .../apiserver/pkg/admission/interfaces.go | 2 + .../plugin/webhook/request/admissionreview.go | 3 + .../apiserver/pkg/endpoints/handlers/BUILD | 1 + .../pkg/endpoints/handlers/create.go | 3 +- .../pkg/endpoints/handlers/delete.go | 8 +- .../apiserver/pkg/endpoints/handlers/patch.go | 51 +++++++---- .../apiserver/pkg/endpoints/handlers/rest.go | 4 +- .../pkg/endpoints/handlers/rest_test.go | 90 ++++++++++++++++++- .../pkg/endpoints/handlers/update.go | 22 ++++- .../apiserver/pkg/registry/rest/create.go | 1 + .../apiserver/pkg/registry/rest/update.go | 1 + 15 files changed, 186 insertions(+), 30 deletions(-) diff --git a/pkg/apis/admission/types.go b/pkg/apis/admission/types.go index fb704abdf90..f874013e398 100644 --- a/pkg/apis/admission/types.go +++ b/pkg/apis/admission/types.go @@ -63,7 +63,8 @@ type AdmissionRequest struct { // Namespace is the namespace associated with the request (if any). // +optional Namespace string - // Operation is the operation being performed + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. Operation Operation // UserInfo is information about the requesting user UserInfo authentication.UserInfo @@ -78,6 +79,13 @@ type AdmissionRequest struct { // Defaults to false. // +optional DryRun *bool + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.Object } // AdmissionResponse describes an admission response. diff --git a/staging/src/k8s.io/api/admission/v1beta1/types.go b/staging/src/k8s.io/api/admission/v1beta1/types.go index 653e847107f..9d2884e66fe 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/types.go +++ b/staging/src/k8s.io/api/admission/v1beta1/types.go @@ -61,7 +61,8 @@ type AdmissionRequest struct { // Namespace is the namespace associated with the request (if any). // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` - // Operation is the operation being performed + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` // UserInfo is information about the requesting user UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` @@ -75,6 +76,13 @@ type AdmissionRequest struct { // Defaults to false. // +optional DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` } // AdmissionResponse describes an admission response. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index 220a494bce2..0ca6673bf4a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -443,7 +443,7 @@ type ConversionRequest struct { // ConversionResponse describes a conversion response. type ConversionResponse struct { // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. + // This should be copied over from the corresponding ConversionRequest. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list diff --git a/staging/src/k8s.io/apiserver/pkg/admission/attributes.go b/staging/src/k8s.io/apiserver/pkg/admission/attributes.go index c8973cc629b..ad6ca6ba6fc 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/attributes.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/attributes.go @@ -34,6 +34,7 @@ type attributesRecord struct { resource schema.GroupVersionResource subresource string operation Operation + options runtime.Object dryRun bool object runtime.Object oldObject runtime.Object @@ -45,7 +46,7 @@ type attributesRecord struct { annotationsLock sync.RWMutex } -func NewAttributesRecord(object runtime.Object, oldObject runtime.Object, kind schema.GroupVersionKind, namespace, name string, resource schema.GroupVersionResource, subresource string, operation Operation, dryRun bool, userInfo user.Info) Attributes { +func NewAttributesRecord(object runtime.Object, oldObject runtime.Object, kind schema.GroupVersionKind, namespace, name string, resource schema.GroupVersionResource, subresource string, operation Operation, operationOptions runtime.Object, dryRun bool, userInfo user.Info) Attributes { return &attributesRecord{ kind: kind, namespace: namespace, @@ -53,6 +54,7 @@ func NewAttributesRecord(object runtime.Object, oldObject runtime.Object, kind s resource: resource, subresource: subresource, operation: operation, + options: operationOptions, dryRun: dryRun, object: object, oldObject: oldObject, @@ -84,6 +86,10 @@ func (record *attributesRecord) GetOperation() Operation { return record.operation } +func (record *attributesRecord) GetOperationOptions() runtime.Object { + return record.options +} + func (record *attributesRecord) IsDryRun() bool { return record.dryRun } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/interfaces.go b/staging/src/k8s.io/apiserver/pkg/admission/interfaces.go index 866777cc708..a8853fdd73e 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/interfaces.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/interfaces.go @@ -41,6 +41,8 @@ type Attributes interface { GetSubresource() string // GetOperation is the operation being performed GetOperation() Operation + // GetOperationOptions is the options for the operation being performed + GetOperationOptions() runtime.Object // IsDryRun indicates that modifications will definitely not be persisted for this request. This is to prevent // admission controllers with side effects and a method of reconciliation from being overwhelmed. // However, a value of false for this does not mean that the modification will be persisted, because it diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go index cec41315c2b..51a45a36a70 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go @@ -68,6 +68,9 @@ func CreateAdmissionReview(attr *generic.VersionedAttributes) admissionv1beta1.A Object: attr.VersionedOldObject, }, DryRun: &dryRun, + Options: runtime.RawExtension{ + Object: attr.GetOperationOptions(), + }, }, } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index 85695ff8635..87846313d46 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -27,6 +27,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 40c53efae33..d4a31b7e7e2 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -106,6 +106,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int scope.err(err, w, req) return } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) defaultGVK := scope.Kind original := r.New() @@ -128,7 +129,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) userInfo, _ := request.UserFrom(ctx) - admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, dryrun.IsDryRun(options.DryRun), userInfo) + admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo) if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { err = mutatingAdmission.Admit(admissionAttributes, scope) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 8f1d1d21793..7375c575eba 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -113,11 +113,12 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc scope.err(err, w, req) return } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) trace.Step("About to check admission control") if admit != nil && admit.Handles(admission.Delete) { userInfo, _ := request.UserFrom(ctx) - attrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, dryrun.IsDryRun(options.DryRun), userInfo) + attrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { if err := mutatingAdmission.Admit(attrs, scope); err != nil { scope.err(err, w, req) @@ -236,6 +237,8 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc scope.err(err, w, req) return } + // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions + // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions") obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { @@ -262,11 +265,12 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc scope.err(err, w, req) return } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) admit = admission.WithAudit(admit, ae) if admit != nil && admit.Handles(admission.Delete) { userInfo, _ := request.UserFrom(ctx) - attrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, dryrun.IsDryRun(options.DryRun), userInfo) + attrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { err = mutatingAdmission.Admit(attrs, scope) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 23644bd3409..c33f181661a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" @@ -118,6 +118,7 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac scope.err(err, w, req) return } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("PatchOptions")) ae := request.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) @@ -151,6 +152,7 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac scope.Resource, scope.Subresource, admission.Create, + patchToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo) staticUpdateAttributes := admission.NewAttributesRecord( @@ -162,6 +164,7 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac scope.Resource, scope.Subresource, admission.Update, + patchToUpdateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo, ) @@ -489,9 +492,9 @@ func (p *patcher) applyPatch(_ context.Context, _, currentObject runtime.Object) return objToUpdate, nil } -func (p *patcher) admissionAttributes(ctx context.Context, updatedObject runtime.Object, currentObject runtime.Object, operation admission.Operation) admission.Attributes { +func (p *patcher) admissionAttributes(ctx context.Context, updatedObject runtime.Object, currentObject runtime.Object, operation admission.Operation, operationOptions runtime.Object) admission.Attributes { userInfo, _ := request.UserFrom(ctx) - return admission.NewAttributesRecord(updatedObject, currentObject, p.kind, p.namespace, p.name, p.resource, p.subresource, operation, p.dryRun, userInfo) + return admission.NewAttributesRecord(updatedObject, currentObject, p.kind, p.namespace, p.name, p.resource, p.subresource, operation, operationOptions, p.dryRun, userInfo) } // applyAdmission is called every time GuaranteedUpdate asks for the updated object, @@ -500,16 +503,19 @@ func (p *patcher) admissionAttributes(ctx context.Context, updatedObject runtime func (p *patcher) applyAdmission(ctx context.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) { p.trace.Step("About to check admission control") var operation admission.Operation + var options runtime.Object if hasUID, err := hasUID(currentObject); err != nil { return nil, err } else if !hasUID { operation = admission.Create currentObject = nil + options = patchToCreateOptions(p.options) } else { operation = admission.Update + options = patchToUpdateOptions(p.options) } if p.admissionCheck != nil && p.admissionCheck.Handles(operation) { - attributes := p.admissionAttributes(ctx, patchedObject, currentObject, operation) + attributes := p.admissionAttributes(ctx, patchedObject, currentObject, operation, options) return patchedObject, p.admissionCheck.Admit(attributes, p.objectInterfaces) } return patchedObject, nil @@ -551,11 +557,8 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti wasCreated := false p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, p.applyPatch, p.applyAdmission) result, err := finishRequest(p.timeout, func() (runtime.Object, error) { - // TODO: Pass in UpdateOptions to override UpdateStrategy.AllowUpdateOnCreate - options, err := patchToUpdateOptions(p.options) - if err != nil { - return nil, err - } + // Pass in UpdateOptions to override UpdateStrategy.AllowUpdateOnCreate + options := patchToUpdateOptions(p.options) updateObject, created, updateErr := p.restPatcher.Update(ctx, p.name, p.updatedObjectInfo, p.createValidation, p.updateValidation, p.forceAllowCreate, options) wasCreated = created return updateObject, updateErr @@ -600,12 +603,28 @@ func interpretStrategicMergePatchError(err error) error { } } -func patchToUpdateOptions(po *metav1.PatchOptions) (*metav1.UpdateOptions, error) { - b, err := json.Marshal(po) - if err != nil { - return nil, err +// patchToUpdateOptions creates an UpdateOptions with the same field values as the provided PatchOptions. +func patchToUpdateOptions(po *metav1.PatchOptions) *metav1.UpdateOptions { + if po == nil { + return nil } - uo := metav1.UpdateOptions{} - err = json.Unmarshal(b, &uo) - return &uo, err + uo := &metav1.UpdateOptions{ + DryRun: po.DryRun, + FieldManager: po.FieldManager, + } + uo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) + return uo +} + +// patchToCreateOptions creates an CreateOptions with the same field values as the provided PatchOptions. +func patchToCreateOptions(po *metav1.PatchOptions) *metav1.CreateOptions { + if po == nil { + return nil + } + co := &metav1.CreateOptions{ + DryRun: po.DryRun, + FieldManager: po.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 4db0c067627..cc82a8df8d0 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -137,14 +137,14 @@ func ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admiss userInfo, _ := request.UserFrom(ctx) // TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { - err = mutatingAdmission.Admit(admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, false, userInfo), scope) + err = mutatingAdmission.Admit(admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope) if err != nil { scope.err(err, w, req) return } } if validatingAdmission, ok := admit.(admission.ValidationInterface); ok { - err = validatingAdmission.Validate(admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, false, userInfo), scope) + err = validatingAdmission.Validate(admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope) if err != nil { scope.err(err, w, req) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go index 2c9dab5b94e..4c33f05e79d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go @@ -26,7 +26,8 @@ import ( "testing" "time" - "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch" + fuzz "github.com/google/gofuzz" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apiserver/pkg/admission" @@ -1000,3 +1002,89 @@ func (alwaysErrorTyper) ObjectKinds(runtime.Object) ([]schema.GroupVersionKind, func (alwaysErrorTyper) Recognizes(gvk schema.GroupVersionKind) bool { return false } + +func TestUpdateToCreateOptions(t *testing.T) { + f := fuzz.New() + for i := 0; i < 100; i++ { + t.Run(fmt.Sprintf("Run %d/100", i), func(t *testing.T) { + update := &metav1.UpdateOptions{} + f.Fuzz(update) + create := updateToCreateOptions(update) + + b, err := json.Marshal(create) + if err != nil { + t.Fatalf("failed to marshal CreateOptions (%v): %v", err, create) + } + got := &metav1.UpdateOptions{} + err = json.Unmarshal(b, &got) + if err != nil { + t.Fatalf("failed to unmarshal UpdateOptions: %v", err) + } + got.TypeMeta = metav1.TypeMeta{} + update.TypeMeta = metav1.TypeMeta{} + if !reflect.DeepEqual(*update, *got) { + t.Fatalf(`updateToCreateOptions round-trip failed: +got: %#+v +want: %#+v`, got, update) + } + + }) + } +} + +func TestPatchToUpdateOptions(t *testing.T) { + tests := []struct { + name string + converterFn func(po *metav1.PatchOptions) interface{} + }{ + { + name: "patchToUpdateOptions", + converterFn: func(patch *metav1.PatchOptions) interface{} { + return patchToUpdateOptions(patch) + }, + }, + { + name: "patchToCreateOptions", + converterFn: func(patch *metav1.PatchOptions) interface{} { + return patchToCreateOptions(patch) + }, + }, + } + + f := fuzz.New() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for i := 0; i < 100; i++ { + t.Run(fmt.Sprintf("Run %d/100", i), func(t *testing.T) { + patch := &metav1.PatchOptions{} + f.Fuzz(patch) + converted := test.converterFn(patch) + + b, err := json.Marshal(converted) + if err != nil { + t.Fatalf("failed to marshal converted object (%v): %v", err, converted) + } + got := &metav1.PatchOptions{} + err = json.Unmarshal(b, &got) + if err != nil { + t.Fatalf("failed to unmarshal converted object: %v", err) + } + + // Clear TypeMeta because we expect it to be different between the original and converted type + got.TypeMeta = metav1.TypeMeta{} + patch.TypeMeta = metav1.TypeMeta{} + + // clear fields that we know belong in PatchOptions only + patch.Force = nil + + if !reflect.DeepEqual(*patch, *got) { + t.Fatalf(`round-trip failed: +got: %#+v +want: %#+v`, got, converted) + } + + }) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 4117c0e1683..f3aec87b396 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -87,6 +87,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa scope.err(err, w, req) return } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { @@ -138,11 +139,11 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error()) } else if !isNotZeroObject { if mutatingAdmission.Handles(admission.Create) { - return newObj, mutatingAdmission.Admit(admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, dryrun.IsDryRun(options.DryRun), userInfo), scope) + return newObj, mutatingAdmission.Admit(admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope) } } else { if mutatingAdmission.Handles(admission.Update) { - return newObj, mutatingAdmission.Admit(admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, dryrun.IsDryRun(options.DryRun), userInfo), scope) + return newObj, mutatingAdmission.Admit(admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope) } } return newObj, nil @@ -172,11 +173,11 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa rest.DefaultUpdatedObjectInfo(obj, transformers...), withAuthorization(rest.AdmissionToValidateObjectFunc( admit, - admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, dryrun.IsDryRun(options.DryRun), userInfo), scope), + admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope), scope.Authorizer, createAuthorizerAttributes), rest.AdmissionToValidateObjectUpdateFunc( admit, - admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, dryrun.IsDryRun(options.DryRun), userInfo), scope), + admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope), false, options, ) @@ -229,3 +230,16 @@ func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer return errors.NewForbidden(gr, name, err) } } + +// updateToCreateOptions creates a CreateOptions with the same field values as the provided UpdateOptions. +func updateToCreateOptions(uo *metav1.UpdateOptions) *metav1.CreateOptions { + if uo == nil { + return nil + } + co := &metav1.CreateOptions{ + DryRun: uo.DryRun, + FieldManager: uo.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co +} diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/create.go b/staging/src/k8s.io/apiserver/pkg/registry/rest/create.go index cc3c9ce4bc5..7750cb7bfd6 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/create.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/create.go @@ -175,6 +175,7 @@ func AdmissionToValidateObjectFunc(admit admission.Interface, staticAttributes a staticAttributes.GetResource(), staticAttributes.GetSubresource(), staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), staticAttributes.IsDryRun(), staticAttributes.GetUserInfo(), ) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go b/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go index d214e7e6a8f..21719b015eb 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go @@ -271,6 +271,7 @@ func AdmissionToValidateObjectUpdateFunc(admit admission.Interface, staticAttrib staticAttributes.GetResource(), staticAttributes.GetSubresource(), staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), staticAttributes.IsDryRun(), staticAttributes.GetUserInfo(), ) From 332d88db1aaddd0b68bfcc62a6a9dc3ce53eed34 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 7 May 2019 13:36:44 -0700 Subject: [PATCH 153/194] Generate proto bindings for: Pass {Operation}Option to Webhooks --- .../v1beta1/zz_generated.conversion.go | 6 + pkg/apis/admission/zz_generated.deepcopy.go | 3 + .../api/admission/v1beta1/generated.pb.go | 160 +++++++++++------- .../api/admission/v1beta1/generated.proto | 11 +- .../v1beta1/types_swagger_doc_generated.go | 3 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../apiextensions/v1beta1/generated.proto | 2 +- 7 files changed, 124 insertions(+), 62 deletions(-) diff --git a/pkg/apis/admission/v1beta1/zz_generated.conversion.go b/pkg/apis/admission/v1beta1/zz_generated.conversion.go index 15dbf75d937..d018d6f5d43 100644 --- a/pkg/apis/admission/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/admission/v1beta1/zz_generated.conversion.go @@ -90,6 +90,9 @@ func autoConvert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in *v1be return err } out.DryRun = (*bool)(unsafe.Pointer(in.DryRun)) + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Options, &out.Options, s); err != nil { + return err + } return nil } @@ -117,6 +120,9 @@ func autoConvert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in *admi return err } out.DryRun = (*bool)(unsafe.Pointer(in.DryRun)) + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Options, &out.Options, s); err != nil { + return err + } return nil } diff --git a/pkg/apis/admission/zz_generated.deepcopy.go b/pkg/apis/admission/zz_generated.deepcopy.go index 4c767f0dc7d..b68f0c67c22 100644 --- a/pkg/apis/admission/zz_generated.deepcopy.go +++ b/pkg/apis/admission/zz_generated.deepcopy.go @@ -42,6 +42,9 @@ func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { *out = new(bool) **out = **in } + if in.Options != nil { + out.Options = in.Options.DeepCopyObject() + } return } diff --git a/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go b/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go index 4082082ff94..2eb7b86a197 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go @@ -158,6 +158,14 @@ func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Options.Size())) + n6, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 return i, nil } @@ -192,11 +200,11 @@ func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n6, err := m.Result.MarshalTo(dAtA[i:]) + n7, err := m.Result.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } if m.Patch != nil { dAtA[i] = 0x22 @@ -254,21 +262,21 @@ func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) - n7, err := m.Request.MarshalTo(dAtA[i:]) + n8, err := m.Request.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } if m.Response != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) - n8, err := m.Response.MarshalTo(dAtA[i:]) + n9, err := m.Response.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } return i, nil } @@ -308,6 +316,8 @@ func (m *AdmissionRequest) Size() (n int) { if m.DryRun != nil { n += 2 } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -383,6 +393,7 @@ func (this *AdmissionRequest) String() string { `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `OldObject:` + strings.Replace(strings.Replace(this.OldObject.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(this.Options.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -776,6 +787,36 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1334,57 +1375,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 821 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x37, 0x69, 0x12, 0x4f, 0x2a, 0x36, 0x3b, 0x80, 0x64, 0x45, 0xc8, 0x09, 0x3d, 0xa0, - 0x20, 0x6d, 0xc7, 0xb4, 0x82, 0x55, 0xb5, 0xe2, 0x12, 0xd3, 0x08, 0x55, 0x48, 0xdb, 0x6a, 0x76, - 0x83, 0x80, 0x03, 0xd2, 0xc4, 0x9e, 0x4d, 0x4c, 0xe2, 0x19, 0xe3, 0x99, 0x49, 0xc9, 0x0d, 0x71, - 0xe5, 0x82, 0xc4, 0x9f, 0xc4, 0xa5, 0xc7, 0x3d, 0xee, 0x29, 0xa2, 0xe1, 0xbf, 0xe8, 0x09, 0x79, - 0x3c, 0x8e, 0x43, 0xba, 0x85, 0x5d, 0xb4, 0x27, 0xfb, 0xfd, 0xf8, 0xbe, 0x37, 0xf3, 0xbd, 0x37, - 0x0f, 0x0c, 0x67, 0x27, 0x02, 0x45, 0xdc, 0x9b, 0xa9, 0x31, 0x4d, 0x19, 0x95, 0x54, 0x78, 0x0b, - 0xca, 0x42, 0x9e, 0x7a, 0x26, 0x40, 0x92, 0xc8, 0x23, 0x61, 0x1c, 0x09, 0x11, 0x71, 0xe6, 0x2d, - 0x8e, 0xc6, 0x54, 0x92, 0x23, 0x6f, 0x42, 0x19, 0x4d, 0x89, 0xa4, 0x21, 0x4a, 0x52, 0x2e, 0x39, - 0xfc, 0x20, 0xcf, 0x46, 0x24, 0x89, 0xd0, 0x26, 0x1b, 0x99, 0xec, 0xce, 0xe1, 0x24, 0x92, 0x53, - 0x35, 0x46, 0x01, 0x8f, 0xbd, 0x09, 0x9f, 0x70, 0x4f, 0x83, 0xc6, 0xea, 0xb9, 0xb6, 0xb4, 0xa1, - 0xff, 0x72, 0xb2, 0xce, 0xc3, 0xed, 0xd2, 0x4a, 0x4e, 0x29, 0x93, 0x51, 0x40, 0x64, 0x5e, 0x7f, - 0xb7, 0x74, 0xe7, 0xd3, 0x32, 0x3b, 0x26, 0xc1, 0x34, 0x62, 0x34, 0x5d, 0x7a, 0xc9, 0x6c, 0x92, - 0x39, 0x84, 0x17, 0x53, 0x49, 0x5e, 0x85, 0xf2, 0xee, 0x42, 0xa5, 0x8a, 0xc9, 0x28, 0xa6, 0xb7, - 0x00, 0x8f, 0xfe, 0x0b, 0x20, 0x82, 0x29, 0x8d, 0xc9, 0x2e, 0xee, 0xe0, 0xf7, 0x3a, 0x68, 0x0f, - 0x0a, 0x45, 0x30, 0xfd, 0x51, 0x51, 0x21, 0xa1, 0x0f, 0xaa, 0x2a, 0x0a, 0x1d, 0xab, 0x67, 0xf5, - 0x6d, 0xff, 0x93, 0xab, 0x55, 0xb7, 0xb2, 0x5e, 0x75, 0xab, 0xa3, 0xb3, 0xd3, 0x9b, 0x55, 0xf7, - 0xc3, 0xbb, 0x0a, 0xc9, 0x65, 0x42, 0x05, 0x1a, 0x9d, 0x9d, 0xe2, 0x0c, 0x0c, 0xbf, 0x01, 0xb5, - 0x59, 0xc4, 0x42, 0xe7, 0x5e, 0xcf, 0xea, 0xb7, 0x8e, 0x1f, 0xa1, 0xb2, 0x03, 0x1b, 0x18, 0x4a, - 0x66, 0x93, 0xcc, 0x21, 0x50, 0x26, 0x03, 0x5a, 0x1c, 0xa1, 0x2f, 0x53, 0xae, 0x92, 0xaf, 0x69, - 0x9a, 0x1d, 0xe6, 0xab, 0x88, 0x85, 0xfe, 0xbe, 0x29, 0x5e, 0xcb, 0x2c, 0xac, 0x19, 0xe1, 0x14, - 0x34, 0x53, 0x2a, 0xb8, 0x4a, 0x03, 0xea, 0x54, 0x35, 0xfb, 0xe3, 0x37, 0x67, 0xc7, 0x86, 0xc1, - 0x6f, 0x9b, 0x0a, 0xcd, 0xc2, 0x83, 0x37, 0xec, 0xf0, 0x33, 0xd0, 0x12, 0x6a, 0x5c, 0x04, 0x9c, - 0x9a, 0xd6, 0xe3, 0x5d, 0x03, 0x68, 0x3d, 0x2d, 0x43, 0x78, 0x3b, 0x0f, 0xf6, 0x40, 0x8d, 0x91, - 0x98, 0x3a, 0x7b, 0x3a, 0x7f, 0x73, 0x85, 0x27, 0x24, 0xa6, 0x58, 0x47, 0xa0, 0x07, 0xec, 0xec, - 0x2b, 0x12, 0x12, 0x50, 0xa7, 0xae, 0xd3, 0x1e, 0x98, 0x34, 0xfb, 0x49, 0x11, 0xc0, 0x65, 0x0e, - 0xfc, 0x1c, 0xd8, 0x3c, 0xc9, 0x1a, 0x17, 0x71, 0xe6, 0x34, 0x34, 0xc0, 0x2d, 0x00, 0xe7, 0x45, - 0xe0, 0x66, 0xdb, 0xc0, 0x25, 0x00, 0x3e, 0x03, 0x4d, 0x25, 0x68, 0x7a, 0xc6, 0x9e, 0x73, 0xa7, - 0xa9, 0x15, 0xfb, 0x08, 0x6d, 0xbf, 0x88, 0x7f, 0x0c, 0x71, 0xa6, 0xd4, 0xc8, 0x64, 0x97, 0xea, - 0x14, 0x1e, 0xbc, 0x61, 0x82, 0x23, 0x50, 0xe7, 0xe3, 0x1f, 0x68, 0x20, 0x1d, 0x5b, 0x73, 0x1e, - 0xde, 0xd9, 0x05, 0x33, 0x83, 0x08, 0x93, 0xcb, 0xe1, 0x4f, 0x92, 0xb2, 0xac, 0x01, 0xfe, 0x3b, - 0x86, 0xba, 0x7e, 0xae, 0x49, 0xb0, 0x21, 0x83, 0xdf, 0x03, 0x9b, 0xcf, 0xc3, 0xdc, 0xe9, 0x80, - 0xff, 0xc3, 0xbc, 0x91, 0xf2, 0xbc, 0xe0, 0xc1, 0x25, 0x25, 0x3c, 0x00, 0xf5, 0x30, 0x5d, 0x62, - 0xc5, 0x9c, 0x56, 0xcf, 0xea, 0x37, 0x7d, 0x90, 0x9d, 0xe1, 0x54, 0x7b, 0xb0, 0x89, 0x1c, 0xfc, - 0x52, 0x03, 0x0f, 0xb6, 0x5e, 0x85, 0x48, 0x38, 0x13, 0xf4, 0xad, 0x3c, 0x8b, 0x8f, 0x41, 0x83, - 0xcc, 0xe7, 0xfc, 0x92, 0xe6, 0x2f, 0xa3, 0xe9, 0xdf, 0x37, 0x3c, 0x8d, 0x41, 0xee, 0xc6, 0x45, - 0x1c, 0x5e, 0x80, 0xba, 0x90, 0x44, 0x2a, 0x61, 0xa6, 0xfc, 0xe1, 0xeb, 0x4d, 0xf9, 0x53, 0x8d, - 0xc9, 0xaf, 0x85, 0xa9, 0x50, 0x73, 0x89, 0x0d, 0x0f, 0xec, 0x82, 0xbd, 0x84, 0xc8, 0x60, 0xaa, - 0x27, 0x79, 0xdf, 0xb7, 0xd7, 0xab, 0xee, 0xde, 0x45, 0xe6, 0xc0, 0xb9, 0x1f, 0x9e, 0x00, 0x5b, - 0xff, 0x3c, 0x5b, 0x26, 0xc5, 0xf8, 0x76, 0x32, 0x21, 0x2f, 0x0a, 0xe7, 0xcd, 0xb6, 0x81, 0xcb, - 0x64, 0xf8, 0xab, 0x05, 0xda, 0x44, 0x85, 0x91, 0x1c, 0x30, 0xc6, 0xa5, 0x1e, 0x24, 0xe1, 0xd4, - 0x7b, 0xd5, 0x7e, 0xeb, 0x78, 0x88, 0xfe, 0x6d, 0xfb, 0xa2, 0x5b, 0x3a, 0xa3, 0xc1, 0x0e, 0xcf, - 0x90, 0xc9, 0x74, 0xe9, 0x3b, 0x46, 0xa8, 0xf6, 0x6e, 0x18, 0xdf, 0x2a, 0xdc, 0xf9, 0x02, 0xbc, - 0xff, 0x4a, 0x12, 0xd8, 0x06, 0xd5, 0x19, 0x5d, 0xe6, 0x2d, 0xc4, 0xd9, 0x2f, 0x7c, 0x0f, 0xec, - 0x2d, 0xc8, 0x5c, 0x51, 0xdd, 0x0e, 0x1b, 0xe7, 0xc6, 0xe3, 0x7b, 0x27, 0xd6, 0xc1, 0x1f, 0x16, - 0xb8, 0xbf, 0x75, 0xb8, 0x45, 0x44, 0x2f, 0xe1, 0x08, 0x34, 0xd2, 0x7c, 0x49, 0x6a, 0x8e, 0xd6, - 0x31, 0x7a, 0xed, 0xcb, 0x69, 0x94, 0xdf, 0xca, 0x5a, 0x6d, 0x0c, 0x5c, 0x70, 0xc1, 0x6f, 0xf5, - 0x4a, 0xd3, 0xb7, 0x37, 0x0b, 0xd3, 0x7b, 0x43, 0xd1, 0xfc, 0x7d, 0xb3, 0xc3, 0xb4, 0x85, 0x37, - 0x74, 0xfe, 0xe1, 0xd5, 0xb5, 0x5b, 0x79, 0x71, 0xed, 0x56, 0x5e, 0x5e, 0xbb, 0x95, 0x9f, 0xd7, - 0xae, 0x75, 0xb5, 0x76, 0xad, 0x17, 0x6b, 0xd7, 0x7a, 0xb9, 0x76, 0xad, 0x3f, 0xd7, 0xae, 0xf5, - 0xdb, 0x5f, 0x6e, 0xe5, 0xbb, 0x86, 0x21, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xc2, 0x6f, - 0x1b, 0x71, 0x07, 0x00, 0x00, + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xd6, 0x8e, 0xed, 0x1d, 0x47, 0xd4, 0x1d, 0x40, 0x5a, 0x59, 0x68, 0x6d, 0x72, 0x40, + 0x41, 0x6a, 0x66, 0x49, 0x04, 0x55, 0x54, 0x71, 0xc9, 0x92, 0x08, 0x45, 0x48, 0x4d, 0x34, 0xad, + 0x51, 0xe1, 0x80, 0x34, 0xde, 0x9d, 0xda, 0x8b, 0xbd, 0x33, 0xcb, 0xce, 0x8c, 0x83, 0x6f, 0x88, + 0x2b, 0x17, 0xfe, 0x27, 0x2e, 0x39, 0xf6, 0xd8, 0x53, 0x44, 0xcc, 0x99, 0x7f, 0x20, 0x27, 0x34, + 0xb3, 0xb3, 0x5e, 0x93, 0x34, 0xd0, 0x46, 0x3d, 0xed, 0xbc, 0x1f, 0xdf, 0xf7, 0xde, 0x7c, 0x6f, + 0xdf, 0x80, 0xa3, 0xe9, 0xbe, 0x40, 0x09, 0x0f, 0xa6, 0x6a, 0x44, 0x73, 0x46, 0x25, 0x15, 0xc1, + 0x9c, 0xb2, 0x98, 0xe7, 0x81, 0x0d, 0x90, 0x2c, 0x09, 0x48, 0x9c, 0x26, 0x42, 0x24, 0x9c, 0x05, + 0xf3, 0xdd, 0x11, 0x95, 0x64, 0x37, 0x18, 0x53, 0x46, 0x73, 0x22, 0x69, 0x8c, 0xb2, 0x9c, 0x4b, + 0x0e, 0x3f, 0x2a, 0xb2, 0x11, 0xc9, 0x12, 0xb4, 0xca, 0x46, 0x36, 0xbb, 0xb7, 0x33, 0x4e, 0xe4, + 0x44, 0x8d, 0x50, 0xc4, 0xd3, 0x60, 0xcc, 0xc7, 0x3c, 0x30, 0xa0, 0x91, 0x7a, 0x61, 0x2c, 0x63, + 0x98, 0x53, 0x41, 0xd6, 0x7b, 0xb8, 0x5e, 0x5a, 0xc9, 0x09, 0x65, 0x32, 0x89, 0x88, 0x2c, 0xea, + 0x5f, 0x2f, 0xdd, 0xfb, 0xbc, 0xca, 0x4e, 0x49, 0x34, 0x49, 0x18, 0xcd, 0x17, 0x41, 0x36, 0x1d, + 0x6b, 0x87, 0x08, 0x52, 0x2a, 0xc9, 0xeb, 0x50, 0xc1, 0x6d, 0xa8, 0x5c, 0x31, 0x99, 0xa4, 0xf4, + 0x06, 0xe0, 0xd1, 0xff, 0x01, 0x44, 0x34, 0xa1, 0x29, 0xb9, 0x8e, 0xdb, 0xfa, 0xbb, 0x09, 0xba, + 0x07, 0xa5, 0x22, 0x98, 0xfe, 0xa4, 0xa8, 0x90, 0x30, 0x04, 0x75, 0x95, 0xc4, 0x9e, 0x33, 0x70, + 0xb6, 0xdd, 0xf0, 0xb3, 0xf3, 0x8b, 0x7e, 0x6d, 0x79, 0xd1, 0xaf, 0x0f, 0x8f, 0x0f, 0xaf, 0x2e, + 0xfa, 0x1f, 0xdf, 0x56, 0x48, 0x2e, 0x32, 0x2a, 0xd0, 0xf0, 0xf8, 0x10, 0x6b, 0x30, 0x7c, 0x0e, + 0x1a, 0xd3, 0x84, 0xc5, 0xde, 0xbd, 0x81, 0xb3, 0xdd, 0xd9, 0x7b, 0x84, 0xaa, 0x09, 0xac, 0x60, + 0x28, 0x9b, 0x8e, 0xb5, 0x43, 0x20, 0x2d, 0x03, 0x9a, 0xef, 0xa2, 0xaf, 0x73, 0xae, 0xb2, 0x6f, + 0x69, 0xae, 0x9b, 0xf9, 0x26, 0x61, 0x71, 0xb8, 0x69, 0x8b, 0x37, 0xb4, 0x85, 0x0d, 0x23, 0x9c, + 0x80, 0x76, 0x4e, 0x05, 0x57, 0x79, 0x44, 0xbd, 0xba, 0x61, 0x7f, 0xfc, 0xf6, 0xec, 0xd8, 0x32, + 0x84, 0x5d, 0x5b, 0xa1, 0x5d, 0x7a, 0xf0, 0x8a, 0x1d, 0x7e, 0x01, 0x3a, 0x42, 0x8d, 0xca, 0x80, + 0xd7, 0x30, 0x7a, 0xbc, 0x6f, 0x01, 0x9d, 0xa7, 0x55, 0x08, 0xaf, 0xe7, 0xc1, 0x01, 0x68, 0x30, + 0x92, 0x52, 0x6f, 0xc3, 0xe4, 0xaf, 0xae, 0xf0, 0x84, 0xa4, 0x14, 0x9b, 0x08, 0x0c, 0x80, 0xab, + 0xbf, 0x22, 0x23, 0x11, 0xf5, 0x9a, 0x26, 0xed, 0x81, 0x4d, 0x73, 0x9f, 0x94, 0x01, 0x5c, 0xe5, + 0xc0, 0x2f, 0x81, 0xcb, 0x33, 0x3d, 0xb8, 0x84, 0x33, 0xaf, 0x65, 0x00, 0x7e, 0x09, 0x38, 0x29, + 0x03, 0x57, 0xeb, 0x06, 0xae, 0x00, 0xf0, 0x19, 0x68, 0x2b, 0x41, 0xf3, 0x63, 0xf6, 0x82, 0x7b, + 0x6d, 0xa3, 0xd8, 0x27, 0x68, 0x7d, 0x23, 0xfe, 0xf5, 0x13, 0x6b, 0xa5, 0x86, 0x36, 0xbb, 0x52, + 0xa7, 0xf4, 0xe0, 0x15, 0x13, 0x1c, 0x82, 0x26, 0x1f, 0xfd, 0x48, 0x23, 0xe9, 0xb9, 0x86, 0x73, + 0xe7, 0xd6, 0x29, 0xd8, 0x7f, 0x10, 0x61, 0x72, 0x76, 0xf4, 0xb3, 0xa4, 0x4c, 0x0f, 0x20, 0x7c, + 0xcf, 0x52, 0x37, 0x4f, 0x0c, 0x09, 0xb6, 0x64, 0xf0, 0x07, 0xe0, 0xf2, 0x59, 0x5c, 0x38, 0x3d, + 0x70, 0x17, 0xe6, 0x95, 0x94, 0x27, 0x25, 0x0f, 0xae, 0x28, 0xe1, 0x16, 0x68, 0xc6, 0xf9, 0x02, + 0x2b, 0xe6, 0x75, 0x06, 0xce, 0x76, 0x3b, 0x04, 0xba, 0x87, 0x43, 0xe3, 0xc1, 0x36, 0x02, 0x9f, + 0x83, 0x16, 0xcf, 0xb4, 0x18, 0xc2, 0xdb, 0xbc, 0x4b, 0x07, 0xf7, 0x6d, 0x07, 0xad, 0x93, 0x82, + 0x05, 0x97, 0x74, 0x5b, 0xbf, 0x36, 0xc0, 0x83, 0xb5, 0x7d, 0x13, 0x19, 0x67, 0x82, 0xbe, 0x93, + 0x85, 0xfb, 0x14, 0xb4, 0xc8, 0x6c, 0xc6, 0xcf, 0x68, 0xb1, 0x73, 0xed, 0xaa, 0x89, 0x83, 0xc2, + 0x8d, 0xcb, 0x38, 0x3c, 0x05, 0x4d, 0x21, 0x89, 0x54, 0xc2, 0xee, 0xcf, 0xc3, 0x37, 0xdb, 0x9f, + 0xa7, 0x06, 0x53, 0x08, 0x86, 0xa9, 0x50, 0x33, 0x89, 0x2d, 0x0f, 0xec, 0x83, 0x8d, 0x8c, 0xc8, + 0x68, 0x62, 0x76, 0x64, 0x33, 0x74, 0x97, 0x17, 0xfd, 0x8d, 0x53, 0xed, 0xc0, 0x85, 0x1f, 0xee, + 0x03, 0xd7, 0x1c, 0x9e, 0x2d, 0xb2, 0x72, 0x31, 0x7a, 0x7a, 0x44, 0xa7, 0xa5, 0xf3, 0x6a, 0xdd, + 0xc0, 0x55, 0x32, 0xfc, 0xcd, 0x01, 0x5d, 0xa2, 0xe2, 0x44, 0x1e, 0x30, 0xc6, 0x25, 0x29, 0xa6, + 0xd2, 0x1c, 0xd4, 0xb7, 0x3b, 0x7b, 0x47, 0xe8, 0xbf, 0xde, 0x75, 0x74, 0x43, 0x67, 0x74, 0x70, + 0x8d, 0xe7, 0x88, 0xc9, 0x7c, 0x11, 0x7a, 0x56, 0xa8, 0xee, 0xf5, 0x30, 0xbe, 0x51, 0xb8, 0xf7, + 0x15, 0xf8, 0xf0, 0xb5, 0x24, 0xb0, 0x0b, 0xea, 0x53, 0xba, 0x28, 0x46, 0x88, 0xf5, 0x11, 0x7e, + 0x00, 0x36, 0xe6, 0x64, 0xa6, 0xa8, 0x19, 0x87, 0x8b, 0x0b, 0xe3, 0xf1, 0xbd, 0x7d, 0x67, 0xeb, + 0x0f, 0x07, 0xdc, 0x5f, 0x6b, 0x6e, 0x9e, 0xd0, 0x33, 0x38, 0x04, 0xad, 0xbc, 0x78, 0x7e, 0x0d, + 0x47, 0x67, 0x0f, 0xbd, 0xf1, 0xe5, 0x0c, 0x2a, 0xec, 0xe8, 0x51, 0x5b, 0x03, 0x97, 0x5c, 0xf0, + 0x3b, 0xf3, 0x58, 0x9a, 0xdb, 0xdb, 0xa7, 0x38, 0x78, 0x4b, 0xd1, 0xc2, 0x4d, 0xfb, 0x3a, 0x1a, + 0x0b, 0xaf, 0xe8, 0xc2, 0x9d, 0xf3, 0x4b, 0xbf, 0xf6, 0xf2, 0xd2, 0xaf, 0xbd, 0xba, 0xf4, 0x6b, + 0xbf, 0x2c, 0x7d, 0xe7, 0x7c, 0xe9, 0x3b, 0x2f, 0x97, 0xbe, 0xf3, 0x6a, 0xe9, 0x3b, 0x7f, 0x2e, + 0x7d, 0xe7, 0xf7, 0xbf, 0xfc, 0xda, 0xf7, 0x2d, 0x4b, 0xfc, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x9a, 0x46, 0xfc, 0x70, 0xcb, 0x07, 0x00, 0x00, } diff --git a/staging/src/k8s.io/api/admission/v1beta1/generated.proto b/staging/src/k8s.io/api/admission/v1beta1/generated.proto index 451d4c9ad77..9bf414c1f47 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/admission/v1beta1/generated.proto @@ -60,7 +60,8 @@ message AdmissionRequest { // +optional optional string namespace = 6; - // Operation is the operation being performed + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. optional string operation = 7; // UserInfo is information about the requesting user @@ -78,6 +79,14 @@ message AdmissionRequest { // Defaults to false. // +optional optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; } // AdmissionResponse describes an admission response. diff --git a/staging/src/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 8a938db3b4a..700b5f9e4a1 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -35,11 +35,12 @@ var map_AdmissionRequest = map[string]string{ "subResource": "SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. For instance, /pods has the resource \"pods\" and the kind \"Pod\", while /pods/foo/status has the resource \"pods\", the sub resource \"status\", and the kind \"Pod\" (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource \"pods\", subresource \"binding\", and kind \"Binding\".", "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this method will return the empty string.", "namespace": "Namespace is the namespace associated with the request (if any).", - "operation": "Operation is the operation being performed", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", "userInfo": "UserInfo is information about the requesting user", "object": "Object is the object from the incoming request prior to default values being applied", "oldObject": "OldObject is the existing object. Only populated for UPDATE requests.", "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", } func (AdmissionRequest) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go index 2b4352a9482..5bafcbdc5f9 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -38,6 +38,7 @@ func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { *out = new(bool) **out = **in } + in.Options.DeepCopyInto(&out.Options) return } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index a0c23a44f56..011cee45521 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -46,7 +46,7 @@ message ConversionRequest { // ConversionResponse describes a conversion response. message ConversionResponse { // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. + // This should be copied over from the corresponding ConversionRequest. optional string uid = 1; // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. From 15a4342fe8cbdfeb357cba366107b66ccde20517 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 11:43:18 +0800 Subject: [PATCH 154/194] remove dot imports in e2e/scheduling --- .../equivalence_cache_predicates.go | 42 +++---- test/e2e/scheduling/limit_range.go | 90 +++++++-------- test/e2e/scheduling/nvidia-gpus.go | 12 +- test/e2e/scheduling/predicates.go | 108 +++++++++--------- test/e2e/scheduling/preemption.go | 78 ++++++------- test/e2e/scheduling/priorities.go | 76 ++++++------ test/e2e/scheduling/taint_based_evictions.go | 32 +++--- test/e2e/scheduling/taints.go | 58 +++++----- test/e2e/scheduling/ubernetes_lite.go | 44 +++---- test/e2e/scheduling/ubernetes_lite_volumes.go | 66 +++++------ 10 files changed, 303 insertions(+), 303 deletions(-) diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index c61dde9c851..56745293a7c 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -31,8 +31,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" _ "github.com/stretchr/testify/assert" ) @@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { var ns string f := framework.NewDefaultFramework("equivalence-cache") - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name @@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { // cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) systemPodsNo = 0 for _, pod := range systemPods { if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil { @@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { } err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, node := range nodeList.Items { e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) @@ -83,15 +83,15 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { // When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node, // so that subsequent replica pods with same host port claim will be rejected. // We enforce all replica pods bind to the same node so there will always be conflicts. - It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() { - By("Launching a RC with two replica pods with HostPorts") + ginkgo.It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() { + ginkgo.By("Launching a RC with two replica pods with HostPorts") nodeName := getNodeThatCanRunPodWithoutToleration(f) rcName := "host-port" // bind all replicas to same node nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName} - By("One pod should be scheduled, the other should be rejected") + ginkgo.By("One pod should be scheduled, the other should be rejected") // CreateNodeSelectorPods creates RC with host port 4312 WaitForSchedulerAfterAction(f, func() error { err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false) @@ -105,11 +105,11 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { // This test verifies that MatchInterPodAffinity works as expected. // In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed), // because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster. - It("validates pod affinity works properly when new replica pod is scheduled", func() { + ginkgo.It("validates pod affinity works properly when new replica pod is scheduled", func() { // create a pod running with label {security: S1}, and choose this node nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f) - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") // we need to use real failure domains, since scheduler only know them k := "failure-domain.beta.kubernetes.io/zone" v := "equivalence-e2e-test" @@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { // restore the node label defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue) - By("Trying to schedule RC with Pod Affinity should success.") + ginkgo.By("Trying to schedule RC with Pod Affinity should success.") framework.WaitForStableCluster(cs, masterNodes) affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID()) replica := 2 @@ -154,10 +154,10 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController"))) - By("Remove node failure domain label") + ginkgo.By("Remove node failure domain label") framework.RemoveLabelOffNode(cs, nodeName, k) - By("Trying to schedule another equivalent Pod should fail due to node label has been removed.") + ginkgo.By("Trying to schedule another equivalent Pod should fail due to node label has been removed.") // use scale to create another equivalent pod and wait for failure event WaitForSchedulerAfterAction(f, func() error { err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false) @@ -168,17 +168,17 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { }) // This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected. - It("validates pod anti-affinity works properly when new replica pod is scheduled", func() { - By("Launching two pods on two distinct nodes to get two node names") + ginkgo.It("validates pod anti-affinity works properly when new replica pod is scheduled", func() { + ginkgo.By("Launching two pods on two distinct nodes to get two node names") CreateHostPortPods(f, "host-port", 2, true) defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port") podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) - Expect(len(podList.Items)).To(Equal(2)) + gomega.Expect(len(podList.Items)).To(gomega.Equal(2)) nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName} - Expect(nodeNames[0]).ToNot(Equal(nodeNames[1])) + gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1])) - By("Applying a random label to both nodes.") + ginkgo.By("Applying a random label to both nodes.") k := "e2e.inter-pod-affinity.kubernetes.io/zone" v := "equivalence-e2etest" for _, nodeName := range nodeNames { @@ -187,7 +187,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { defer framework.RemoveLabelOffNode(cs, nodeName, k) } - By("Trying to launch a pod with the service label on the selected nodes.") + ginkgo.By("Trying to launch a pod with the service label on the selected nodes.") // run a pod with label {"service": "S1"} and expect it to be running runPausePod(f, pausePodConfig{ Name: "with-label-" + string(uuid.NewUUID()), @@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { NodeSelector: map[string]string{k: v}, // only launch on our two nodes }) - By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.") + ginkgo.By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.") labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID()) replica := 2 labelsMap := map[string]string{ @@ -270,7 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str } func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error { - By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) + ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) config := &testutils.RCConfig{ Client: f.ClientSet, diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 592d7319e82..b79034a8ea1 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -30,8 +30,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -41,8 +41,8 @@ const ( var _ = SIGDescribe("LimitRange", func() { f := framework.NewDefaultFramework("limitrange") - It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() { - By("Creating a LimitRange") + ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() { + ginkgo.By("Creating a LimitRange") min := getResourceList("50m", "100Mi", "100Gi") max := getResourceList("500m", "500Mi", "500Gi") @@ -54,24 +54,24 @@ var _ = SIGDescribe("LimitRange", func() { defaultLimit, defaultRequest, maxLimitRequestRatio) - By("Setting up watch") + ginkgo.By("Setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name})) options := metav1.ListOptions{LabelSelector: selector.String()} limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for limitRanges") - Expect(len(limitRanges.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges") + gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: limitRanges.ListMeta.ResourceVersion, } w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") - By("Submitting a LimitRange") + ginkgo.By("Submitting a LimitRange") limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Verifying LimitRange creation was observed") + ginkgo.By("Verifying LimitRange creation was observed") select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { @@ -81,39 +81,39 @@ var _ = SIGDescribe("LimitRange", func() { framework.Failf("Timeout while waiting for LimitRange creation") } - By("Fetching the LimitRange to ensure it has proper values") + ginkgo.By("Fetching the LimitRange to ensure it has proper values") limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} err = equalResourceRequirement(expected, actual) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Creating a Pod with no resource requirements") + ginkgo.By("Creating a Pod with no resource requirements") pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Ensuring Pod has resource requirements applied from LimitRange") + ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange") pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := range pod.Spec.Containers { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) if err != nil { // Print the pod to help in debugging. e2elog.Logf("Pod %+v does not have the expected requirements", pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - By("Creating a Pod with partial resource requirements") + ginkgo.By("Creating a Pod with partial resource requirements") pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", "")) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Ensuring Pod has merged resource requirements applied from LimitRange") + ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange") pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // This is an interesting case, so it's worth a comment // If you specify a Limit, and no Request, the Limit will default to the Request // This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied @@ -123,49 +123,49 @@ var _ = SIGDescribe("LimitRange", func() { if err != nil { // Print the pod to help in debugging. e2elog.Logf("Pod %+v does not have the expected requirements", pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - By("Failing to create a Pod with less than min resources") + ginkgo.By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) - By("Failing to create a Pod with more than max resources") + ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) - By("Updating a LimitRange") + ginkgo.By("Updating a LimitRange") newMin := getResourceList("9m", "49Mi", "49Gi") limitRange.Spec.Limits[0].Min = newMin limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Verifying LimitRange updating is effective") - Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) { + ginkgo.By("Verifying LimitRange updating is effective") + gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) { limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil - })).NotTo(HaveOccurred()) + })).NotTo(gomega.HaveOccurred()) - By("Creating a Pod with less than former min resources") + ginkgo.By("Creating a Pod with less than former min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Failing to create a Pod with more than max resources") + ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) - By("Deleting a LimitRange") + ginkgo.By("Deleting a LimitRange") err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Verifying the LimitRange was deleted") - Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { + ginkgo.By("Verifying the LimitRange was deleted") + gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name})) options := metav1.ListOptions{LabelSelector: selector.String()} limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) @@ -190,12 +190,12 @@ var _ = SIGDescribe("LimitRange", func() { return false, nil - })).NotTo(HaveOccurred(), "kubelet never observed the termination notice") + })).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice") - By("Creating a Pod with more than former max resources") + ginkgo.By("Creating a Pod with more than former max resources") pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 1f667a3b8f6..503cf2b43f2 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -30,8 +30,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -130,7 +130,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra e2elog.Logf("Using %v", dsYamlUrl) // Creates the DaemonSet that installs Nvidia Drivers. ds, err := framework.DsFromManifest(dsYamlUrl) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ds.Namespace = f.Namespace.Name _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") @@ -155,9 +155,9 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra // Wait for Nvidia GPUs to be available on nodes e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...") - Eventually(func() bool { + gomega.Eventually(func() bool { return areGPUsAvailableOnAllSchedulableNodes(f) - }, driverInstallTimeout, time.Second).Should(BeTrue()) + }, driverInstallTimeout, time.Second).Should(gomega.BeTrue()) return rsgather } @@ -185,7 +185,7 @@ func testNvidiaGPUs(f *framework.Framework) { var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() { f := framework.NewDefaultFramework("device-plugin-gpus") - It("run Nvidia GPU Device Plugin tests", func() { + ginkgo.It("run Nvidia GPU Device Plugin tests", func() { testNvidiaGPUs(f) }) }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 7a6040f94f8..8818346a8b0 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -34,8 +34,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" _ "github.com/stretchr/testify/assert" ) @@ -68,16 +68,16 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { var ns string f := framework.NewDefaultFramework("sched-pred") - AfterEach(func() { + ginkgo.AfterEach(func() { rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{}) if err == nil && *(rc.Spec.Replicas) != 0 { - By("Cleaning up the replication controller") + ginkgo.By("Cleaning up the replication controller") err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName) framework.ExpectNoError(err) } }) - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name nodeList = &v1.NodeList{} @@ -100,20 +100,20 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds. // // Slow PR #13315 (8 min) - It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() { + ginkgo.It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() { totalPodCapacity = 0 for _, node := range nodeList.Items { e2elog.Logf("Node: %v", node) podCapacity, found := node.Status.Capacity[v1.ResourcePods] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) totalPodCapacity += podCapacity.Value() } currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes) podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods - By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) + ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) // As the pods are distributed randomly among nodes, // it can easily happen that all nodes are satured @@ -137,7 +137,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage limits of pods is greater than machines capacity. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. - It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() { + ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() { framework.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery()) @@ -146,7 +146,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeToAllocatableMap := make(map[string]int64) for _, node := range nodeList.Items { allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) nodeToAllocatableMap[node.Name] = allocatable.MilliValue() if nodeMaxAllocatable < allocatable.MilliValue() { nodeMaxAllocatable = allocatable.MilliValue() @@ -174,7 +174,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod) } - By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation)) + ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation)) // As the pods are distributed randomly among nodes, // it can easily happen that all nodes are saturated @@ -245,7 +245,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name) // Find allocatable amount of CPU. allocatable, found := node.Status.Allocatable[v1.ResourceCPU] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) nodeToAllocatableMap[node.Name] = allocatable.MilliValue() if nodeMaxAllocatable < allocatable.MilliValue() { nodeMaxAllocatable = allocatable.MilliValue() @@ -268,7 +268,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } } - By("Starting Pods to consume most of the cluster CPU.") + ginkgo.By("Starting Pods to consume most of the cluster CPU.") // Create one pod per node that requires 70% of the node remaining CPU. fillerPods := []*v1.Pod{} for nodeName, cpu := range nodeToAllocatableMap { @@ -306,7 +306,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { for _, pod := range fillerPods { framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) } - By("Creating another pod that requires unavailable amount of CPU.") + ginkgo.By("Creating another pod that requires unavailable amount of CPU.") // Create another pod that requires 50% of the largest node CPU resources. // This pod should remain pending as at least 70% of CPU of other nodes in // the cluster are already consumed. @@ -332,7 +332,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled. */ framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() { - By("Trying to schedule Pod with nonempty NodeSelector.") + ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" framework.WaitForStableCluster(cs, masterNodes) @@ -357,14 +357,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() { nodeName := GetNodeThatCanRunPod(f) - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "42" framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v) defer framework.RemoveLabelOffNode(cs, nodeName, k) - By("Trying to relaunch the pod, now with labels.") + ginkgo.By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" createPausePod(f, pausePodConfig{ Name: labelPodName, @@ -381,13 +381,13 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName)) labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) + gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName)) }) // Test Nodes does not have any label, hence it should be impossible to schedule Pod with // non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution. - It("validates that NodeAffinity is respected if not matching", func() { - By("Trying to schedule Pod with nonempty NodeSelector.") + ginkgo.It("validates that NodeAffinity is respected if not matching", func() { + ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" framework.WaitForStableCluster(cs, masterNodes) @@ -427,17 +427,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // Keep the same steps with the test on NodeSelector, // but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector. - It("validates that required NodeAffinity setting is respected if matching", func() { + ginkgo.It("validates that required NodeAffinity setting is respected if matching", func() { nodeName := GetNodeThatCanRunPod(f) - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "42" framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v) defer framework.RemoveLabelOffNode(cs, nodeName, k) - By("Trying to relaunch the pod, now with labels.") + ginkgo.By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" createPausePod(f, pausePodConfig{ Name: labelPodName, @@ -468,17 +468,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName)) labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) + gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName)) }) // 1. Run a pod to get an available node, then delete the pod // 2. Taint the node with a random taint // 3. Try to relaunch the pod with tolerations tolerate the taints on node, // and the pod's nodeName specified to the name of node found in step 1 - It("validates that taints-tolerations is respected if matching", func() { + ginkgo.It("validates that taints-tolerations is respected if matching", func() { nodeName := getNodeThatCanRunPodWithoutToleration(f) - By("Trying to apply a random taint on the found node.") + ginkgo.By("Trying to apply a random taint on the found node.") testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -488,14 +488,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelValue := "testing-label-value" framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) defer framework.RemoveLabelOffNode(cs, nodeName, labelKey) - By("Trying to relaunch the pod, now with tolerations.") + ginkgo.By("Trying to relaunch the pod, now with tolerations.") tolerationPodName := "with-tolerations" createPausePod(f, pausePodConfig{ Name: tolerationPodName, @@ -511,17 +511,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName)) deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(deployedPod.Spec.NodeName).To(Equal(nodeName)) + gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName)) }) // 1. Run a pod to get an available node, then delete the pod // 2. Taint the node with a random taint // 3. Try to relaunch the pod still no tolerations, // and the pod's nodeName specified to the name of node found in step 1 - It("validates that taints-tolerations is respected if not matching", func() { + ginkgo.It("validates that taints-tolerations is respected if not matching", func() { nodeName := getNodeThatCanRunPodWithoutToleration(f) - By("Trying to apply a random taint on the found node.") + ginkgo.By("Trying to apply a random taint on the found node.") testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -531,14 +531,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelValue := "testing-label-value" framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) defer framework.RemoveLabelOffNode(cs, nodeName, labelKey) - By("Trying to relaunch the pod, still no tolerations.") + ginkgo.By("Trying to relaunch the pod, still no tolerations.") podNameNoTolerations := "still-no-tolerations" conf := pausePodConfig{ Name: podNameNoTolerations, @@ -548,17 +548,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false) verifyResult(cs, 0, 1, ns) - By("Removing taint off the node") + ginkgo.By("Removing taint off the node") WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true) verifyResult(cs, 1, 0, ns) }) - It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() { + ginkgo.It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() { nodeName := GetNodeThatCanRunPod(f) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "90" @@ -570,21 +570,21 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { defer framework.RemoveLabelOffNode(cs, nodeName, k) port := int32(54321) - By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port)) + ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port)) createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true) - By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port)) + ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port)) createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true) - By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port)) + ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port)) createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true) }) - It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() { + ginkgo.It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() { nodeName := GetNodeThatCanRunPod(f) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not - By("Trying to apply a random label on the found node.") + ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "95" @@ -596,10 +596,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { defer framework.RemoveLabelOffNode(cs, nodeName, k) port := int32(54322) - By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) + ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true) - By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port)) + ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port)) createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false) }) }) @@ -664,7 +664,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string { // scheduled onto it. pod := runPausePod(f, conf) - By("Explicitly delete pod here to free the resource it takes.") + ginkgo.By("Explicitly delete pod here to free the resource it takes.") err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) @@ -712,8 +712,8 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n predicate = scheduleSuccessEvent(ns, podName, "" /* any node */) } success, err := common.ObserveEventAfterAction(f, predicate, action) - Expect(err).NotTo(HaveOccurred()) - Expect(success).To(Equal(true)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(success).To(gomega.Equal(true)) } // TODO: upgrade calls in PodAffinity tests when we're able to run them @@ -732,8 +732,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched } } - Expect(len(notScheduledPods)).To(Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) - Expect(len(scheduledPods)).To(Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))) + gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) + gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))) } // verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC @@ -751,8 +751,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected } } - Expect(len(notScheduledPods)).To(Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) - Expect(len(scheduledPods)).To(Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))) + gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) + gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))) } func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList { @@ -767,7 +767,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin // not just take the node list and choose the first of them. Depending on the // cluster and the scheduler it might be that a "normal" pod cannot be // scheduled onto it. - By("Trying to launch a pod with a label to get a node which can launch it.") + ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.") pod := runPausePod(f, pausePodConfig{ Name: "with-label-" + string(uuid.NewUUID()), Labels: map[string]string{"security": "S1"}, @@ -776,17 +776,17 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin } func GetNodeThatCanRunPod(f *framework.Framework) string { - By("Trying to launch a pod without a label to get a node which can launch it.") + ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"}) } func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string { - By("Trying to launch a pod without a toleration to get a node which can launch it.") + ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.") return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"}) } func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { - By(fmt.Sprintf("Running RC which reserves host port")) + ginkgo.By(fmt.Sprintf("Running RC which reserves host port")) config := &testutils.RCConfig{ Client: f.ClientSet, Name: id, diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 0efed3588a7..2a929723da5 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -37,8 +37,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/replicaset" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" _ "github.com/stretchr/testify/assert" ) @@ -63,19 +63,19 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { {name: highPriorityClassName, value: highPriority}, } - AfterEach(func() { + ginkgo.AfterEach(func() { for _, pair := range priorityPairs { cs.SchedulingV1().PriorityClasses().Delete(pair.name, metav1.NewDeleteOptions(0)) } }) - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name nodeList = &corev1.NodeList{} for _, pair := range priorityPairs { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) - Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) + gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true)) } framework.WaitForAllNodesHealthy(cs, time.Minute) @@ -88,17 +88,17 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // This test verifies that when a higher priority pod is created and no node with // enough resources is found, scheduler preempts a lower priority pod to schedule // the high priority pod. - It("validates basic preemption works", func() { + ginkgo.It("validates basic preemption works", func() { var podRes corev1.ResourceList // Create one pod per node that uses a lot of the node's resources. - By("Create pods that use 60% of node resources.") + ginkgo.By("Create pods that use 60% of node resources.") pods := make([]*corev1.Pod, len(nodeList.Items)) for i, node := range nodeList.Items { cpuAllocatable, found := node.Status.Allocatable["cpu"] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) milliCPU := cpuAllocatable.MilliValue() * 40 / 100 memAllocatable, found := node.Status.Allocatable["memory"] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) memory := memAllocatable.Value() * 60 / 100 podRes = corev1.ResourceList{} podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI) @@ -118,12 +118,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) e2elog.Logf("Created pod: %v", pods[i].Name) } - By("Wait for pods to be scheduled.") + ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) } - By("Run a high priority pod that use 60% of a node resources.") + ginkgo.By("Run a high priority pod that use 60% of a node resources.") // Create a high priority pod and make sure it is scheduled. runPausePod(f, pausePodConfig{ Name: "preemptor-pod", @@ -136,29 +136,29 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) podDeleted := (err != nil && errors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) - Expect(podDeleted).To(BeTrue()) + gomega.Expect(podDeleted).To(gomega.BeTrue()) // Other pods (mid priority ones) should be present. for i := 1; i < len(pods); i++ { livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(livePod.DeletionTimestamp).To(BeNil()) + gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } }) // This test verifies that when a critical pod is created and no node with // enough resources is found, scheduler preempts a lower priority pod to schedule // this critical pod. - It("validates lower priority pod preemption by critical pod", func() { + ginkgo.It("validates lower priority pod preemption by critical pod", func() { var podRes corev1.ResourceList // Create one pod per node that uses a lot of the node's resources. - By("Create pods that use 60% of node resources.") + ginkgo.By("Create pods that use 60% of node resources.") pods := make([]*corev1.Pod, len(nodeList.Items)) for i, node := range nodeList.Items { cpuAllocatable, found := node.Status.Allocatable["cpu"] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) milliCPU := cpuAllocatable.MilliValue() * 40 / 100 memAllocatable, found := node.Status.Allocatable["memory"] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) memory := memAllocatable.Value() * 60 / 100 podRes = corev1.ResourceList{} podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI) @@ -178,12 +178,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) e2elog.Logf("Created pod: %v", pods[i].Name) } - By("Wait for pods to be scheduled.") + ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) } - By("Run a critical pod that use 60% of a node resources.") + ginkgo.By("Run a critical pod that use 60% of a node resources.") // Create a critical pod and make sure it is scheduled. runPausePod(f, pausePodConfig{ Name: "critical-pod", @@ -202,12 +202,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }() podDeleted := (err != nil && errors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) - Expect(podDeleted).To(BeTrue()) + gomega.Expect(podDeleted).To(gomega.BeTrue()) // Other pods (mid priority ones) should be present. for i := 1; i < len(pods); i++ { livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(livePod.DeletionTimestamp).To(BeNil()) + gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } }) @@ -216,10 +216,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // pod is preempted to allow the higher priority pod schedule. // It also verifies that existing low priority pods are not preempted as their // preemption wouldn't help. - It("validates pod anti-affinity works in preemption", func() { + ginkgo.It("validates pod anti-affinity works in preemption", func() { var podRes corev1.ResourceList // Create a few pods that uses a small amount of resources. - By("Create pods that use 10% of node resources.") + ginkgo.By("Create pods that use 10% of node resources.") numPods := 4 if len(nodeList.Items) < numPods { numPods = len(nodeList.Items) @@ -228,10 +228,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { for i := 0; i < numPods; i++ { node := nodeList.Items[i] cpuAllocatable, found := node.Status.Allocatable["cpu"] - Expect(found).To(BeTrue()) + gomega.Expect(found).To(gomega.BeTrue()) milliCPU := cpuAllocatable.MilliValue() * 10 / 100 memAllocatable, found := node.Status.Allocatable["memory"] - Expect(found).To(BeTrue()) + gomega.Expect(found).To(gomega.BeTrue()) memory := memAllocatable.Value() * 10 / 100 podRes = corev1.ResourceList{} podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI) @@ -294,12 +294,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } }() - By("Wait for pods to be scheduled.") + ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) } - By("Run a high priority pod with node affinity to the first node.") + ginkgo.By("Run a high priority pod with node affinity to the first node.") // Create a high priority pod and make sure it is scheduled. runPausePod(f, pausePodConfig{ Name: "preemptor-pod", @@ -327,12 +327,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) podDeleted := (err != nil && errors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) - Expect(podDeleted).To(BeTrue()) + gomega.Expect(podDeleted).To(gomega.BeTrue()) // Other pods (low priority ones) should be present. for i := 1; i < len(pods); i++ { livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(livePod.DeletionTimestamp).To(BeNil()) + gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } }) }) @@ -342,7 +342,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() { var ns string f := framework.NewDefaultFramework("sched-pod-priority") - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name @@ -351,9 +351,9 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() { }) // This test verifies that system critical priorities are created automatically and resolved properly. - It("validates critical system priorities are created and resolved", func() { + ginkgo.It("validates critical system priorities are created and resolved", func() { // Create pods that use system critical priorities and - By("Create pods that use critical system priorities.") + ginkgo.By("Create pods that use critical system priorities.") systemPriorityClasses := []string{ scheduling.SystemNodeCritical, scheduling.SystemClusterCritical, } @@ -368,7 +368,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() { err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }() - Expect(pod.Spec.Priority).NotTo(BeNil()) + gomega.Expect(pod.Spec.Priority).NotTo(gomega.BeNil()) e2elog.Logf("Created pod: %v", pod.Name) } }) @@ -386,9 +386,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { priorityPairs := make([]priorityPair, 0) - AfterEach(func() { + ginkgo.AfterEach(func() { // print out additional info if tests failed - if CurrentGinkgoTestDescription().Failed { + if ginkgo.CurrentGinkgoTestDescription().Failed { // list existing priorities priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{}) if err != nil { @@ -414,12 +414,12 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { } }) - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name // find an available node - By("Finding an available node") + ginkgo.By("Finding an available node") nodeName := GetNodeThatCanRunPod(f) e2elog.Logf("found a healthy node: %s", nodeName) @@ -453,11 +453,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err) } - Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) + gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true)) } }) - It("runs ReplicaSets to verify preemption running path", func() { + ginkgo.It("runs ReplicaSets to verify preemption running path", func() { podNamesSeen := make(map[string]struct{}) stopCh := make(chan struct{}) diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index d460f937ffd..0c55f2e3e28 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -22,8 +22,8 @@ import ( "math" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" _ "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" @@ -66,10 +66,10 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { var ns string f := framework.NewDefaultFramework("sched-priority") - AfterEach(func() { + ginkgo.AfterEach(func() { }) - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name nodeList = &v1.NodeList{} @@ -80,18 +80,18 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { err := framework.CheckTestingNSDeletedExcept(cs, ns) framework.ExpectNoError(err) err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() { - By("Trying to launch a pod with a label to get a node which can launch it.") + ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() { + ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.") pod := runPausePod(f, pausePodConfig{ Name: "pod-with-label-security-s1", Labels: map[string]string{"security": "S1"}, }) nodeName := pod.Spec.NodeName - By("Trying to apply a label on the found node.") + ginkgo.By("Trying to apply a label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", "node-topologyKey") v := "topologyvalue" framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) @@ -100,7 +100,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { // make the nodes have balanced cpu,mem usage err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6) framework.ExpectNoError(err) - By("Trying to launch the pod with podAntiAffinity.") + ginkgo.By("Trying to launch the pod with podAntiAffinity.") labelPodName := "pod-with-pod-antiaffinity" pod = createPausePod(f, pausePodConfig{ Resources: podRequestedResource, @@ -136,20 +136,20 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { }, }, }) - By("Wait the pod becomes running") + ginkgo.By("Wait the pod becomes running") framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - By("Verify the pod was scheduled to the expected node.") - Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName)) + ginkgo.By("Verify the pod was scheduled to the expected node.") + gomega.Expect(labelPod.Spec.NodeName).NotTo(gomega.Equal(nodeName)) }) - It("Pod should avoid nodes that have avoidPod annotation", func() { + ginkgo.It("Pod should avoid nodes that have avoidPod annotation", func() { nodeName := nodeList.Items[0].Name // make the nodes have balanced cpu,mem usage err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) framework.ExpectNoError(err) - By("Create a RC, with 0 replicas") + ginkgo.By("Create a RC, with 0 replicas") rc := createRC(ns, "scheduler-priority-avoid-pod", int32(0), map[string]string{"name": "scheduler-priority-avoid-pod"}, f, podRequestedResource) // Cleanup the replication controller when we are done. defer func() { @@ -159,7 +159,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { } }() - By("Trying to apply avoidPod annotations on the first node.") + ginkgo.By("Trying to apply avoidPod annotations on the first node.") avoidPod := v1.AvoidPods{ PreferAvoidPods: []v1.PreferAvoidPodsEntry{ { @@ -189,30 +189,30 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { return node.Annotations[v1.PreferAvoidPodsAnnotationKey] == string(val) } success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action) - Expect(err).NotTo(HaveOccurred()) - Expect(success).To(Equal(true)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(success).To(gomega.Equal(true)) defer framework.RemoveAvoidPodsOffNode(cs, nodeName) - By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) + ginkgo.By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{ LabelSelector: "name=scheduler-priority-avoid-pod", }) - Expect(err).NotTo(HaveOccurred()) - By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName)) for _, pod := range testPods.Items { - Expect(pod.Spec.NodeName).NotTo(Equal(nodeName)) + gomega.Expect(pod.Spec.NodeName).NotTo(gomega.Equal(nodeName)) } }) - It("Pod should be preferably scheduled to nodes pod can tolerate", func() { + ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func() { // make the nodes have balanced cpu,mem usage ratio err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) framework.ExpectNoError(err) //we need apply more taints on a node, because one match toleration only count 1 - By("Trying to apply 10 taint on the nodes except first one.") + ginkgo.By("Trying to apply 10 taint on the nodes except first one.") nodeName := nodeList.Items[0].Name for index, node := range nodeList.Items { @@ -224,19 +224,19 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { defer framework.RemoveTaintOffNode(cs, node.Name, *testTaint) } } - By("Create a pod without any tolerations") + ginkgo.By("Create a pod without any tolerations") tolerationPodName := "without-tolerations" pod := createPausePod(f, pausePodConfig{ Name: tolerationPodName, }) framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("Pod should prefer scheduled to the node don't have the taint.") + ginkgo.By("Pod should prefer scheduled to the node don't have the taint.") tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(tolePod.Spec.NodeName).To(Equal(nodeName)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName)) - By("Trying to apply 10 taint on the first node.") + ginkgo.By("Trying to apply 10 taint on the first node.") var tolerations []v1.Toleration for i := 0; i < 10; i++ { testTaint := addRandomTaitToNode(cs, nodeName) @@ -244,17 +244,17 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { defer framework.RemoveTaintOffNode(cs, nodeName, *testTaint) } tolerationPodName = "with-tolerations" - By("Create a pod that tolerates all the taints of the first node.") + ginkgo.By("Create a pod that tolerates all the taints of the first node.") pod = createPausePod(f, pausePodConfig{ Name: tolerationPodName, Tolerations: tolerations, }) framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("Pod should prefer scheduled to the node that pod can tolerate.") + ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.") tolePod, err = cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(tolePod.Spec.NodeName).To(Equal(nodeName)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName)) }) }) @@ -279,11 +279,11 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n ratio = math.Max(maxCPUFraction, maxMemFraction) for _, node := range nodes { memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) memAllocatableVal := memAllocatable.Value() cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) cpuAllocatableMil := cpuAllocatable.MilliValue() needCreateResource := v1.ResourceList{} @@ -310,7 +310,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n } for _, node := range nodes { - By("Compute Cpu, Mem Fraction after create balanced pods.") + ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.") computeCpuMemFraction(cs, node, requestedResource) } @@ -337,7 +337,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re } } cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) cpuAllocatableMil := cpuAllocatable.MilliValue() floatOne := float64(1) @@ -346,7 +346,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re cpuFraction = floatOne } memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory] - Expect(found).To(Equal(true)) + gomega.Expect(found).To(gomega.Equal(true)) memAllocatableVal := memAllocatable.Value() memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal) if memFraction > floatOne { @@ -398,7 +398,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string, }, } rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(rc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return rc } diff --git a/test/e2e/scheduling/taint_based_evictions.go b/test/e2e/scheduling/taint_based_evictions.go index 686cfc7b8d8..556b9c45284 100644 --- a/test/e2e/scheduling/taint_based_evictions.go +++ b/test/e2e/scheduling/taint_based_evictions.go @@ -28,7 +28,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) func newUnreachableNoExecuteTaint() *v1.Taint { @@ -52,7 +52,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { var cs clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name // skip if TaintBasedEvictions is not enabled @@ -72,10 +72,10 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { // When network issue recovers, it's expected to see: // 5. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionTrue] // 6. node.kubernetes.io/unreachable=:NoExecute taint is taken off the node - It("Checks that the node becomes unreachable", func() { + ginkgo.It("Checks that the node becomes unreachable", func() { // find an available node nodeName := GetNodeThatCanRunPod(f) - By("Finding an available node " + nodeName) + ginkgo.By("Finding an available node " + nodeName) // pod0 is a pod with unschedulable=:NoExecute toleration, and tolerationSeconds=0s // pod1 is a pod with unschedulable=:NoExecute toleration, and tolerationSeconds=200s @@ -83,7 +83,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { base := "taint-based-eviction" tolerationSeconds := []int64{0, 200} numPods := len(tolerationSeconds) + 1 - By(fmt.Sprintf("Preparing %v pods", numPods)) + ginkgo.By(fmt.Sprintf("Preparing %v pods", numPods)) pods := make([]*v1.Pod, numPods) zero := int64(0) // build pod0, pod1 @@ -108,7 +108,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { NodeName: nodeName, }) - By("Verifying all pods are running properly") + ginkgo.By("Verifying all pods are running properly") for _, pod := range pods { framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) } @@ -121,7 +121,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { } node := nodeList.Items[0] - By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName)) + ginkgo.By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName)) host, err := framework.GetNodeExternalIP(&node) // TODO(Huang-Wei): make this case work for local provider // if err != nil { @@ -132,19 +132,19 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { taint := newUnreachableNoExecuteTaint() defer func() { - By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name)) for _, masterAddress := range masterAddresses { framework.UnblockNetwork(host, masterAddress) } - if CurrentGinkgoTestDescription().Failed { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.Failf("Current e2e test has failed, so return from here.") return } - By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName)) + ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName)) framework.WaitForNodeToBeReady(cs, nodeName, time.Minute*1) - By("Expecting to see unreachable=:NoExecute taint is taken off") + ginkgo.By("Expecting to see unreachable=:NoExecute taint is taken off") err := framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, false, time.Second*30) framework.ExpectNoError(err) }() @@ -153,15 +153,15 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { framework.BlockNetwork(host, masterAddress) } - By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName)) + ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName)) if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) { framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName) } - By("Expecting to see unreachable=:NoExecute taint is applied") + ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied") err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30) framework.ExpectNoError(err) - By("Expecting pod0 to be evicted immediately") + ginkgo.By("Expecting pod0 to be evicted immediately") err = framework.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) { // as node is unreachable, pod0 is expected to be in Terminating status // rather than getting deleted @@ -172,7 +172,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { }) framework.ExpectNoError(err) - By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300") + ginkgo.By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300") err = framework.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) { if seconds, err := getTolerationSeconds(pod.Spec.Tolerations); err == nil { return seconds == 300, nil @@ -181,7 +181,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { }) framework.ExpectNoError(err) - By("Expecting pod1 to be unchanged") + ginkgo.By("Expecting pod1 to be unchanged") livePod1, err := cs.CoreV1().Pods(pods[1].Namespace).Get(pods[1].Name, metav1.GetOptions{}) framework.ExpectNoError(err) seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations) diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index 111349f7ff2..532be82dae9 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -19,7 +19,7 @@ package scheduling import ( "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" _ "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" @@ -155,7 +155,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { var ns string f := framework.NewDefaultFramework("taint-single-pod") - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name @@ -168,26 +168,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 1. Run a pod // 2. Taint the node running this pod with a no-execute taint // 3. See if pod will get evicted - It("evicts pods from tainted nodes", func() { + ginkgo.It("evicts pods from tainted nodes", func() { podName := "taint-eviction-1" pod := createPodForTaintsTest(false, 0, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podName, ns) - By("Starting pod...") + ginkgo.By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) - By("Trying to apply a taint on the Node") + ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit - By("Waiting for Pod to be deleted") + ginkgo.By("Waiting for Pod to be deleted") timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: @@ -200,26 +200,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 1. Run a pod with toleration // 2. Taint the node running this pod with a no-execute taint // 3. See if pod won't get evicted - It("doesn't evict pod with tolerations from tainted nodes", func() { + ginkgo.It("doesn't evict pod with tolerations from tainted nodes", func() { podName := "taint-eviction-2" pod := createPodForTaintsTest(true, 0, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podName, ns) - By("Starting pod...") + ginkgo.By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) - By("Trying to apply a taint on the Node") + ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit - By("Waiting for Pod to be deleted") + ginkgo.By("Waiting for Pod to be deleted") timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: @@ -233,26 +233,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 2. Taint the node running this pod with a no-execute taint // 3. See if pod won't get evicted before toleration time runs out // 4. See if pod will get evicted after toleration time runs out - It("eventually evict pod with finite tolerations from tainted nodes", func() { + ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() { podName := "taint-eviction-3" pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podName, ns) - By("Starting pod...") + ginkgo.By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) - By("Trying to apply a taint on the Node") + ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit - By("Waiting to see if a Pod won't be deleted") + ginkgo.By("Waiting to see if a Pod won't be deleted") timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: @@ -261,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { framework.Failf("Pod was evicted despite toleration") return } - By("Waiting for Pod to be deleted") + ginkgo.By("Waiting for Pod to be deleted") timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: @@ -277,19 +277,19 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 3. Wait some time // 4. Remove the taint // 5. See if Pod won't be evicted. - It("removing taint cancels eviction", func() { + ginkgo.It("removing taint cancels eviction", func() { podName := "taint-eviction-4" pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podName, ns) - By("Starting pod...") + ginkgo.By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) - By("Trying to apply a taint on the Node") + ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) @@ -301,7 +301,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { }() // Wait a bit - By("Waiting short time to make sure Pod is queued for deletion") + ginkgo.By("Waiting short time to make sure Pod is queued for deletion") timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C select { case <-timeoutChannel: @@ -313,7 +313,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { e2elog.Logf("Removing taint from Node") framework.RemoveTaintOffNode(cs, nodeName, testTaint) taintRemoved = true - By("Waiting some time to make sure that toleration time passed.") + ginkgo.By("Waiting some time to make sure that toleration time passed.") timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: @@ -329,7 +329,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { var ns string f := framework.NewDefaultFramework("taint-multiple-pods") - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name @@ -342,7 +342,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { // 1. Run two pods; one with toleration, one without toleration // 2. Taint the nodes running those pods with a no-execute taint // 3. See if pod-without-toleration get evicted, and pod-with-toleration is kept - It("only evicts pods without tolerations from tainted nodes", func() { + ginkgo.It("only evicts pods without tolerations from tainted nodes", func() { podGroup := "taint-eviction-a" observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) @@ -351,7 +351,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { pod1 := createPodForTaintsTest(false, 0, podGroup+"1", podGroup, ns) pod2 := createPodForTaintsTest(true, 0, podGroup+"2", podGroup, ns) - By("Starting pods...") + ginkgo.By("Starting pods...") nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) framework.ExpectNoError(err) e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName1) @@ -359,7 +359,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { framework.ExpectNoError(err) e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName2) - By("Trying to apply a taint on the Nodes") + ginkgo.By("Trying to apply a taint on the Nodes") testTaint := getTestTaint() framework.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint) framework.ExpectNodeHasTaint(cs, nodeName1, &testTaint) @@ -371,7 +371,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { } // Wait a bit - By("Waiting for Pod1 to be deleted") + ginkgo.By("Waiting for Pod1 to be deleted") timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C var evicted int for { @@ -398,7 +398,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { // 1. Run two pods both with toleration; one with tolerationSeconds=5, the other with 25 // 2. Taint the nodes running those pods with a no-execute taint // 3. See if both pods get evicted in between [5, 25] seconds - It("evicts pods with minTolerationSeconds", func() { + ginkgo.It("evicts pods with minTolerationSeconds", func() { podGroup := "taint-eviction-b" observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) @@ -407,7 +407,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { pod1 := createPodForTaintsTest(true, AdditionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns) pod2 := createPodForTaintsTest(true, 5*AdditionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns) - By("Starting pods...") + ginkgo.By("Starting pods...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -423,14 +423,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { framework.ExpectNoError(err) e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName) - By("Trying to apply a taint on the Node") + ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit - By("Waiting for Pod1 and Pod2 to be deleted") + ginkgo.By("Waiting for Pod1 and Pod2 to be deleted") timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C var evicted int for evicted != 2 { diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index b0bae6d4222..246605d2872 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -20,8 +20,8 @@ import ( "fmt" "math" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -39,22 +39,22 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { var zoneCount int var err error image := framework.ServeHostnameImage - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke", "aws") if zoneCount <= 0 { zoneCount, err = getZoneCount(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) + ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount) framework.SkipUnlessAtLeast(zoneCount, 2, msg) // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread }) - It("should spread the pods of a service across zones", func() { + ginkgo.It("should spread the pods of a service across zones", func() { SpreadServiceOrFail(f, (2*zoneCount)+1, image) }) - It("should spread the pods of a replication controller across zones", func() { + ginkgo.It("should spread the pods of a replication controller across zones", func() { SpreadRCOrFail(f, int32((2*zoneCount)+1), image) }) }) @@ -79,7 +79,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) }, } _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Now create some pods behind the service podSpec := &v1.Pod{ @@ -106,12 +106,12 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Now make sure they're spread across zones zoneNames, err := framework.GetClusterZones(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) - Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true)) } // Find the name of the zone in which a Node is running @@ -136,9 +136,9 @@ func getZoneCount(c clientset.Interface) (int, error) { // Find the name of the zone in which the pod is scheduled func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) { - By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) + ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return getZoneNameForNode(*node) } @@ -154,7 +154,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str continue } zoneName, err := getZoneNameForPod(c, pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) podsPerZone[zoneName] = podsPerZone[zoneName] + 1 } minPodsPerZone := math.MaxInt32 @@ -167,7 +167,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str maxPodsPerZone = podCount } } - Expect(minPodsPerZone).To(BeNumerically("~", maxPodsPerZone, 1), + gomega.Expect(minPodsPerZone).To(gomega.BeNumerically("~", maxPodsPerZone, 1), "Pods were not evenly spread across zones. %d in one zone and %d in another zone", minPodsPerZone, maxPodsPerZone) return true, nil @@ -176,7 +176,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str // Check that the pods comprising a replication controller get spread evenly across available zones func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { name := "ubelite-spread-rc-" + string(uuid.NewUUID()) - By(fmt.Sprintf("Creating replication controller %s", name)) + ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace.Name, @@ -203,7 +203,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { }, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. @@ -214,15 +214,15 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for all of them to be scheduled - By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) + ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Now make sure they're spread across zones zoneNames, err := framework.GetClusterZones(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) - Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true)) } diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index d282b002475..a29251638f3 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -20,8 +20,8 @@ import ( "fmt" "strconv" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -38,22 +38,22 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() { var zoneCount int var err error image := framework.ServeHostnameImage - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") if zoneCount <= 0 { zoneCount, err = getZoneCount(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) + ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount) framework.SkipUnlessAtLeast(zoneCount, 2, msg) // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread }) - It("should schedule pods in the same zones as statically provisioned PVs", func() { + ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() { PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image) }) - It("should only be allowed to provision PDs in zones where nodes exist", func() { + ginkgo.It("should only be allowed to provision PDs in zones where nodes exist", func() { OnlyAllowNodeZones(f, zoneCount, image) }) }) @@ -61,17 +61,17 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() { // OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Get all the zones that the nodes are in expectedZones, err := gceCloud.GetAllZonesFromCloudProvider() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Expected zones: %v", expectedZones) // Get all the zones in this current region region := gceCloud.Region() allZonesInRegion, err := gceCloud.ListZonesInRegion(region) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) var extraZone string for _, zone := range allZonesInRegion { @@ -80,9 +80,9 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { break } } - Expect(extraZone).NotTo(Equal(""), fmt.Sprintf("No extra zones available in region %s", region)) + gomega.Expect(extraZone).NotTo(gomega.Equal(""), fmt.Sprintf("No extra zones available in region %s", region)) - By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone)) + ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone)) project := framework.TestContext.CloudConfig.ProjectID zone := extraZone myuuid := string(uuid.NewUUID()) @@ -117,16 +117,16 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { } err = gceCloud.InsertInstance(project, zone, rb) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { // Teardown of the compute instance e2elog.Logf("Deleting compute resource: %v", name) err := gceCloud.DeleteInstance(project, zone, name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes") + ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes") // Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1 // This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees" var pvcList []*v1.PersistentVolumeClaim @@ -136,7 +136,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { for index := 1; index <= zoneCount+1; index++ { pvc := newNamedDefaultClaim(ns, index) pvc, err = framework.CreatePVC(c, ns, pvc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcList = append(pvcList, pvc) // Defer the cleanup @@ -152,25 +152,25 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { // Wait for all claims bound for _, claim := range pvcList { err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } pvZones := sets.NewString() - By("Checking that PDs have been provisioned in only the expected zones") + ginkgo.By("Checking that PDs have been provisioned in only the expected zones") for _, claim := range pvcList { // Get a new copy of the claim to have all fields populated claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Get the related PV pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain] - Expect(ok).To(BeTrue(), "PV has no LabelZone to be found") + gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found") pvZones.Insert(pvZone) } - Expect(pvZones.Equal(expectedZones)).To(BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) + gomega.Expect(pvZones.Equal(expectedZones)).To(gomega.BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) } type staticPVTestConfig struct { @@ -187,16 +187,16 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) ns := f.Namespace.Name zones, err := framework.GetClusterZones(c) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) zonelist := zones.List() - By("Creating static PVs across zones") + ginkgo.By("Creating static PVs across zones") configs := make([]*staticPVTestConfig, podCount) for i := range configs { configs[i] = &staticPVTestConfig{} } defer func() { - By("Cleaning up pods and PVs") + ginkgo.By("Cleaning up pods and PVs") for _, config := range configs { framework.DeletePodOrFail(c, ns, config.pod.Name) } @@ -204,14 +204,14 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns) framework.PVPVCCleanup(c, ns, config.pv, config.pvc) err = framework.DeletePVSource(config.pvSource) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() for i, config := range configs { zone := zonelist[i%len(zones)] config.pvSource, err = framework.CreatePVSource(zone) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvConfig := framework.PersistentVolumeConfig{ NamePrefix: "multizone-pv", @@ -222,25 +222,25 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className} config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By("Waiting for all PVCs to be bound") + ginkgo.By("Waiting for all PVCs to be bound") for _, config := range configs { framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc) } - By("Creating pods for each static PV") + ginkgo.By("Creating pods for each static PV") for _, config := range configs { podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By("Waiting for all pods to be running") + ginkgo.By("Waiting for all pods to be running") for _, config := range configs { err = framework.WaitForPodRunningInNamespace(c, config.pod) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } From 4cb48644874878ad7ae8418e9c95c1e0a95b820c Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 10 May 2019 11:46:30 +0800 Subject: [PATCH 155/194] replace test error checking with more readable way --- test/e2e/scheduling/limit_range.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index b79034a8ea1..37b38d9ebff 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -130,12 +130,12 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Updating a LimitRange") newMin := getResourceList("9m", "49Mi", "49Gi") @@ -158,7 +158,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - gomega.Expect(err).To(gomega.HaveOccurred()) + framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30)) From ccecc67a5b8a6da41a4ce7c9dbc018e79bbdc9a4 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Tue, 14 May 2019 14:18:52 +0800 Subject: [PATCH 156/194] fix golint error in test/e2e/scheduling --- hack/.golint_failures | 1 - .../equivalence_cache_predicates.go | 4 +- test/e2e/scheduling/framework.go | 1 + test/e2e/scheduling/nvidia-gpus.go | 15 +-- test/e2e/scheduling/predicates.go | 9 +- test/e2e/scheduling/preemption.go | 1 + test/e2e/scheduling/priorities.go | 22 ++-- test/e2e/scheduling/taints.go | 103 +++++++++--------- test/e2e/scheduling/ubernetes_lite.go | 6 +- test/e2e/scheduling/ubernetes_lite_volumes.go | 3 +- 10 files changed, 87 insertions(+), 78 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 786e60789cc..0e0efe4e94e 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -605,7 +605,6 @@ test/e2e/common test/e2e/framework test/e2e/lifecycle/bootstrap test/e2e/scalability -test/e2e/scheduling test/e2e/storage/drivers test/e2e/storage/testsuites test/e2e/storage/utils diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index 56745293a7c..d7f6b0cf557 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -33,6 +33,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" ) @@ -92,7 +93,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName} ginkgo.By("One pod should be scheduled, the other should be rejected") - // CreateNodeSelectorPods creates RC with host port 4312 + // CreateNodeSelectorPods creates RC with host port 4321 WaitForSchedulerAfterAction(f, func() error { err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false) return err @@ -269,6 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str } } +// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error { ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) diff --git a/test/e2e/scheduling/framework.go b/test/e2e/scheduling/framework.go index 8b732791105..b5a32c66b2d 100644 --- a/test/e2e/scheduling/framework.go +++ b/test/e2e/scheduling/framework.go @@ -18,6 +18,7 @@ package scheduling import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-scheduling] "+text, body) } diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 503cf2b43f2..8bc36ea0ffe 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -42,7 +42,7 @@ const ( var ( gpuResourceName v1.ResourceName - dsYamlUrl string + dsYamlURL string ) func makeCudaAdditionDevicePluginTestPod() *v1.Pod { @@ -116,20 +116,21 @@ func getGPUsAvailable(f *framework.Framework) int64 { return gpusAvailable } +// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer { logOSImages(f) - dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET") - if dsYamlUrlFromEnv != "" { - dsYamlUrl = dsYamlUrlFromEnv + dsYamlURLFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET") + if dsYamlURLFromEnv != "" { + dsYamlURL = dsYamlURLFromEnv } else { - dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml" + dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml" } gpuResourceName = gpu.NVIDIAGPUResourceName - e2elog.Logf("Using %v", dsYamlUrl) + e2elog.Logf("Using %v", dsYamlURL) // Creates the DaemonSet that installs Nvidia Drivers. - ds, err := framework.DsFromManifest(dsYamlUrl) + ds, err := framework.DsFromManifest(dsYamlURL) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ds.Namespace = f.Namespace.Name _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 8818346a8b0..24420b85468 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -36,6 +36,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" ) @@ -727,9 +728,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched if !printed { printed = true return msg - } else { - return "" } + return "" } gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) @@ -746,9 +746,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected if !printed { printed = true return msg - } else { - return "" } + return "" } gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) @@ -775,6 +774,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin return pod.Spec.NodeName, pod.Name } +// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it func GetNodeThatCanRunPod(f *framework.Framework) string { ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"}) @@ -785,6 +785,7 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string { return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"}) } +// CreateHostPortPods creates RC with host port 4321 func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { ginkgo.By(fmt.Sprintf("Running RC which reserves host port")) config := &testutils.RCConfig{ diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 2a929723da5..8c75df34293 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -39,6 +39,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" ) diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 0c55f2e3e28..838caa754f5 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -24,6 +24,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" @@ -40,14 +41,15 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" ) +// Resource is a collection of compute resource. type Resource struct { MilliCPU int64 Memory int64 } -var balancePodLabel map[string]string = map[string]string{"name": "priority-balanced-memory"} +var balancePodLabel = map[string]string{"name": "priority-balanced-memory"} -var podRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{ +var podRequestedResource = &v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("100Mi"), v1.ResourceCPU: resource.MustParse("100m"), @@ -265,7 +267,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n var cpuFractionMap = make(map[string]float64) var memFractionMap = make(map[string]float64) for _, node := range nodes { - cpuFraction, memFraction := computeCpuMemFraction(cs, node, requestedResource) + cpuFraction, memFraction := computeCPUMemFraction(cs, node, requestedResource) cpuFractionMap[node.Name] = cpuFraction memFractionMap[node.Name] = memFraction if cpuFraction > maxCPUFraction { @@ -311,15 +313,15 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n for _, node := range nodes { ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.") - computeCpuMemFraction(cs, node, requestedResource) + computeCPUMemFraction(cs, node, requestedResource) } return nil } -func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) { - e2elog.Logf("ComputeCpuMemFraction for node: %v", node.Name) - totalRequestedCpuResource := resource.Requests.Cpu().MilliValue() +func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) { + e2elog.Logf("ComputeCPUMemFraction for node: %v", node.Name) + totalRequestedCPUResource := resource.Requests.Cpu().MilliValue() totalRequestedMemResource := resource.Requests.Memory().Value() allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { @@ -332,7 +334,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort { continue } - totalRequestedCpuResource += getNonZeroRequests(&pod).MilliCPU + totalRequestedCPUResource += getNonZeroRequests(&pod).MilliCPU totalRequestedMemResource += getNonZeroRequests(&pod).Memory } } @@ -341,7 +343,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re cpuAllocatableMil := cpuAllocatable.MilliValue() floatOne := float64(1) - cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil) + cpuFraction := float64(totalRequestedCPUResource) / float64(cpuAllocatableMil) if cpuFraction > floatOne { cpuFraction = floatOne } @@ -353,7 +355,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re memFraction = floatOne } - e2elog.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction) + e2elog.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction) e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction) return cpuFraction, memFraction diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index 532be82dae9..b4837de429a 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -20,6 +20,7 @@ import ( "time" "github.com/onsi/ginkgo" + // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" @@ -65,48 +66,46 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, }, }, } - } else { - if tolerationSeconds <= 0 { - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: ns, - Labels: map[string]string{"group": podLabel}, - DeletionGracePeriodSeconds: &grace, - // default - tolerate forever - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "pause", - Image: "k8s.gcr.io/pause:3.1", - }, + } + if tolerationSeconds <= 0 { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: ns, + Labels: map[string]string{"group": podLabel}, + DeletionGracePeriodSeconds: &grace, + // default - tolerate forever + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "pause", + Image: "k8s.gcr.io/pause:3.1", }, - Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}}, }, - } - } else { - ts := int64(tolerationSeconds) - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: ns, - Labels: map[string]string{"group": podLabel}, - DeletionGracePeriodSeconds: &grace, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "pause", - Image: "k8s.gcr.io/pause:3.1", - }, - }, - // default - tolerate forever - Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}}, - }, - } + Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}}, + }, } } + ts := int64(tolerationSeconds) + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: ns, + Labels: map[string]string{"group": podLabel}, + DeletionGracePeriodSeconds: &grace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "pause", + Image: "k8s.gcr.io/pause:3.1", + }, + }, + // default - tolerate forever + Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}}, + }, + } } // Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new @@ -141,8 +140,8 @@ func createTestController(cs clientset.Interface, observedDeletions chan string, } const ( - KubeletPodDeletionDelaySeconds = 60 - AdditionalWaitPerDeleteSeconds = 5 + kubeletPodDeletionDelaySeconds = 60 + additionalWaitPerDeleteSeconds = 5 ) // Tests the behavior of NoExecuteTaintManager. Following scenarios are included: @@ -188,7 +187,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // Wait a bit ginkgo.By("Waiting for Pod to be deleted") - timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: framework.Failf("Failed to evict Pod") @@ -220,7 +219,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // Wait a bit ginkgo.By("Waiting for Pod to be deleted") - timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted. Test successful") @@ -235,7 +234,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 4. See if pod will get evicted after toleration time runs out ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() { podName := "taint-eviction-3" - pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, podName, ns) + pod := createPodForTaintsTest(true, kubeletPodDeletionDelaySeconds+2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podName, ns) @@ -253,7 +252,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // Wait a bit ginkgo.By("Waiting to see if a Pod won't be deleted") - timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted") @@ -262,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { return } ginkgo.By("Waiting for Pod to be deleted") - timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: framework.Failf("Pod wasn't evicted") @@ -279,7 +278,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 5. See if Pod won't be evicted. ginkgo.It("removing taint cancels eviction", func() { podName := "taint-eviction-4" - pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, podName, ns) + pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podName, ns) @@ -302,7 +301,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // Wait a bit ginkgo.By("Waiting short time to make sure Pod is queued for deletion") - timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C + timeoutChannel := time.NewTimer(additionalWaitPerDeleteSeconds).C select { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted. Proceeding") @@ -314,7 +313,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { framework.RemoveTaintOffNode(cs, nodeName, testTaint) taintRemoved = true ginkgo.By("Waiting some time to make sure that toleration time passed.") - timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted. Test successful") @@ -372,7 +371,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { // Wait a bit ginkgo.By("Waiting for Pod1 to be deleted") - timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C var evicted int for { select { @@ -404,8 +403,8 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { stopCh := make(chan struct{}) createTestController(cs, observedDeletions, stopCh, podGroup, ns) - pod1 := createPodForTaintsTest(true, AdditionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns) - pod2 := createPodForTaintsTest(true, 5*AdditionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns) + pod1 := createPodForTaintsTest(true, additionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns) + pod2 := createPodForTaintsTest(true, 5*additionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns) ginkgo.By("Starting pods...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) @@ -431,7 +430,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { // Wait a bit ginkgo.By("Waiting for Pod1 and Pod2 to be deleted") - timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C + timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C var evicted int for evicted != 2 { select { diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 246605d2872..c907155c94d 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -59,7 +59,8 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { }) }) -// Check that the pods comprising a service get spread evenly across available zones +// SpreadServiceOrFail check that the pods comprising a service +// get spread evenly across available zones func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { // First create the service serviceName := "test-service" @@ -173,7 +174,8 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str return true, nil } -// Check that the pods comprising a replication controller get spread evenly across available zones +// SpreadRCOrFail Check that the pods comprising a replication +// controller get spread evenly across available zones func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { name := "ubelite-spread-rc-" + string(uuid.NewUUID()) ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index a29251638f3..38ef8d9b3f2 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -180,7 +180,8 @@ type staticPVTestConfig struct { pod *v1.Pod } -// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in. +// PodsUseStaticPVsOrFail Check that the pods using statically +// created PVs get scheduled to the same zone that the PV is in. func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) { var err error c := f.ClientSet From 28f88c91ee15fca2e2329b0a871f875d87bb11b3 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 13 May 2019 11:07:02 +0200 Subject: [PATCH 157/194] integration: Start{RealMasterOrDie->TestServer} in admissionwebhook tests --- .../apiserver/admissionwebhook/BUILD | 3 +- .../admissionwebhook/admission_test.go | 29 ++++++++++--------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/test/integration/apiserver/admissionwebhook/BUILD b/test/integration/apiserver/admissionwebhook/BUILD index 30236402e85..c59a47eabd8 100644 --- a/test/integration/apiserver/admissionwebhook/BUILD +++ b/test/integration/apiserver/admissionwebhook/BUILD @@ -12,7 +12,6 @@ go_test( "integration", ], deps = [ - "//cmd/kube-apiserver/app/options:go_default_library", "//cmd/kube-apiserver/app/testing:go_default_library", "//staging/src/k8s.io/api/admission/v1beta1:go_default_library", "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", @@ -21,6 +20,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", @@ -31,6 +31,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//test/integration/etcd:go_default_library", "//test/integration/framework:go_default_library", diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index bd506e4dcf1..aab139efa90 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -36,6 +36,7 @@ import ( corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -46,9 +47,11 @@ import ( "k8s.io/apimachinery/pkg/util/wait" dynamic "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/etcd" + "k8s.io/kubernetes/test/integration/framework" ) const ( @@ -323,25 +326,23 @@ func TestWebhookV1beta1(t *testing.T) { defer webhookServer.Close() // start API server - master := etcd.StartRealMasterOrDie(t, func(opts *options.ServerRunOptions) { - // turn off admission plugins that add finalizers - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "StorageObjectInUseProtection"} + s, err := kubeapiservertesting.StartTestServer(t, kubeapiservertesting.NewDefaultTestServerOptions(), []string{ + "--disable-admission-plugins=ServiceAccount,StorageObjectInUseProtection", + "--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true,extensions/v1beta1/replicasets=true,extensions/v1beta1/podsecuritypolicies=true,extensions/v1beta1/networkpolicies=true", + }, framework.SharedEtcd()) + if err != nil { + t.Fatal(err) + } + defer s.TearDownFn() - // force enable all resources so we can check storage. - // TODO: drop these once we stop allowing them to be served. - opts.APIEnablement.RuntimeConfig["extensions/v1beta1/deployments"] = "true" - opts.APIEnablement.RuntimeConfig["extensions/v1beta1/daemonsets"] = "true" - opts.APIEnablement.RuntimeConfig["extensions/v1beta1/replicasets"] = "true" - opts.APIEnablement.RuntimeConfig["extensions/v1beta1/podsecuritypolicies"] = "true" - opts.APIEnablement.RuntimeConfig["extensions/v1beta1/networkpolicies"] = "true" - }) - defer master.Cleanup() + // create CRDs so we can make sure that custom resources do not get lost + etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(s.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) // Configure a client with a distinct user name so that it is easy to distinguish requests // made by the client from requests made by controllers. We use this to filter out requests // before recording them to ensure we don't accidentally mistake requests from controllers // as requests made by the client. - clientConfig := master.Config + clientConfig := rest.CopyConfig(s.ClientConfig) clientConfig.Impersonate.UserName = testClientUsername clientConfig.Impersonate.Groups = []string{"system:masters", "system:authenticated"} client, err := clientset.NewForConfig(clientConfig) From 76b5bee48ca4605fb9ff2be74cdfaa630c743308 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 13 May 2019 11:29:40 +0200 Subject: [PATCH 158/194] integration: Start{RealMasterOrDie->TestServer} in dryrun tests --- test/integration/dryrun/BUILD | 3 ++ test/integration/dryrun/dryrun_test.go | 41 ++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/test/integration/dryrun/BUILD b/test/integration/dryrun/BUILD index e847eda66c2..5bfe9dc37ed 100644 --- a/test/integration/dryrun/BUILD +++ b/test/integration/dryrun/BUILD @@ -17,7 +17,9 @@ go_test( "integration", ], deps = [ + "//cmd/kube-apiserver/app/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", @@ -27,6 +29,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//test/integration/etcd:go_default_library", "//test/integration/framework:go_default_library", diff --git a/test/integration/dryrun/dryrun_test.go b/test/integration/dryrun/dryrun_test.go index d487ef91b84..38e2e254929 100644 --- a/test/integration/dryrun/dryrun_test.go +++ b/test/integration/dryrun/dryrun_test.go @@ -19,7 +19,8 @@ package dryrun import ( "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -29,8 +30,11 @@ import ( "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" featuregatetesting "k8s.io/component-base/featuregate/testing" + kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/etcd" + "k8s.io/kubernetes/test/integration/framework" ) // Only add kinds to this list when this a virtual resource with get and create verbs that doesn't actually @@ -206,10 +210,29 @@ func DryRunDeleteTest(t *testing.T, rsc dynamic.ResourceInterface, name string) func TestDryRun(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DryRun, true)() - master := etcd.StartRealMasterOrDie(t) - defer master.Cleanup() + // start API server + s, err := kubeapiservertesting.StartTestServer(t, kubeapiservertesting.NewDefaultTestServerOptions(), []string{ + "--disable-admission-plugins=ServiceAccount,StorageObjectInUseProtection", + "--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true,extensions/v1beta1/replicasets=true,extensions/v1beta1/podsecuritypolicies=true,extensions/v1beta1/networkpolicies=true", + }, framework.SharedEtcd()) + if err != nil { + t.Fatal(err) + } + defer s.TearDownFn() - if _, err := master.Client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + client, err := kubernetes.NewForConfig(s.ClientConfig) + if err != nil { + t.Fatal(err) + } + dynamicClient, err := dynamic.NewForConfig(s.ClientConfig) + if err != nil { + t.Fatal(err) + } + + // create CRDs so we can make sure that custom resources do not get lost + etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(s.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) + + if _, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { t.Fatal(err) } @@ -225,7 +248,13 @@ func TestDryRun(t *testing.T) { dryrunData[resource] = data } - for _, resourceToTest := range master.Resources { + // gather resources to test + _, resources, err := client.Discovery().ServerGroupsAndResources() + if err != nil { + t.Fatalf("Failed to get ServerGroupsAndResources with error: %+v", err) + } + + for _, resourceToTest := range etcd.GetResources(t, resources) { t.Run(resourceToTest.Mapping.Resource.String(), func(t *testing.T) { mapping := resourceToTest.Mapping gvk := resourceToTest.Mapping.GroupVersionKind @@ -242,7 +271,7 @@ func TestDryRun(t *testing.T) { t.Fatalf("no test data for %s. Please add a test for your new type to etcd.GetEtcdStorageData().", gvResource) } - rsc, obj, err := etcd.JSONToUnstructured(testData.Stub, testNamespace, mapping, master.Dynamic) + rsc, obj, err := etcd.JSONToUnstructured(testData.Stub, testNamespace, mapping, dynamicClient) if err != nil { t.Fatalf("failed to unmarshal stub (%v): %v", testData.Stub, err) } From 5b9e4f1e8830b51a266ab8cb9138655e2a29545b Mon Sep 17 00:00:00 2001 From: David McCormick Date: Tue, 16 Oct 2018 11:15:34 +0100 Subject: [PATCH 159/194] Rebase allow updates to pdbs to latest upstream master --- pkg/apis/policy/validation/validation.go | 7 +- pkg/apis/policy/validation/validation_test.go | 15 +-- .../poddisruptionbudget/strategy_test.go | 18 +-- test/e2e/apps/BUILD | 1 + test/e2e/apps/disruption.go | 110 ++++++++++++++---- 5 files changed, 105 insertions(+), 46 deletions(-) diff --git a/pkg/apis/policy/validation/validation.go b/pkg/apis/policy/validation/validation.go index b3c90ade846..386d809231a 100644 --- a/pkg/apis/policy/validation/validation.go +++ b/pkg/apis/policy/validation/validation.go @@ -19,7 +19,6 @@ package validation import ( "fmt" "path/filepath" - "reflect" "regexp" "strings" @@ -43,14 +42,10 @@ func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorLis } func ValidatePodDisruptionBudgetUpdate(pdb, oldPdb *policy.PodDisruptionBudget) field.ErrorList { - allErrs := field.ErrorList{} - restoreGeneration := pdb.Generation pdb.Generation = oldPdb.Generation - if !reflect.DeepEqual(pdb.Spec, oldPdb.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to poddisruptionbudget spec are forbidden.")) - } + allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec")) allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...) pdb.Generation = restoreGeneration diff --git a/pkg/apis/policy/validation/validation_test.go b/pkg/apis/policy/validation/validation_test.go index 82b52aaaa5c..259f929b275 100644 --- a/pkg/apis/policy/validation/validation_test.go +++ b/pkg/apis/policy/validation/validation_test.go @@ -140,12 +140,10 @@ func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { generations: []int64{int64(2), int64(3)}, specs: []policy.PodDisruptionBudgetSpec{ { - MinAvailable: &c1, - MaxUnavailable: &c2, + MinAvailable: &c1, }, { - MinAvailable: &c1, - MaxUnavailable: &c2, + MinAvailable: &c1, }, }, status: []policy.PodDisruptionBudgetStatus{ @@ -163,7 +161,7 @@ func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { ok: true, }, { - name: "only update pdb spec", + name: "update pdb spec causing clash", generations: []int64{int64(2), int64(3)}, specs: []policy.PodDisruptionBudgetSpec{ { @@ -192,7 +190,6 @@ func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { MaxUnavailable: &c2, }, { - MinAvailable: &c1, MaxUnavailable: &c3, }, }, @@ -208,7 +205,7 @@ func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { DesiredHealthy: 3, }, }, - ok: false, + ok: true, }, } @@ -219,9 +216,9 @@ func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { pdb.Spec = tc.specs[1] pdb.Generation = tc.generations[1] - oldPdb.Status = tc.status[1] + pdb.Status = tc.status[1] - errs := ValidatePodDisruptionBudgetUpdate(oldPdb, pdb) + errs := ValidatePodDisruptionBudgetUpdate(pdb, oldPdb) if tc.ok && len(errs) > 0 { t.Errorf("[%d:%s] unexpected errors: %v", i, tc.name, errs) } else if !tc.ok && len(errs) == 0 { diff --git a/pkg/registry/policy/poddisruptionbudget/strategy_test.go b/pkg/registry/policy/poddisruptionbudget/strategy_test.go index 579e28c9690..f8ff2c7ef63 100644 --- a/pkg/registry/policy/poddisruptionbudget/strategy_test.go +++ b/pkg/registry/policy/poddisruptionbudget/strategy_test.go @@ -68,30 +68,32 @@ func TestPodDisruptionBudgetStrategy(t *testing.T) { t.Errorf("Unexpected error updating PodDisruptionBudget.") } - // Changing the selector? No. + // Changing the selector? OK newPdb.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"a": "bar"}} Strategy.PrepareForUpdate(ctx, newPdb, pdb) errs = Strategy.ValidateUpdate(ctx, newPdb, pdb) - if len(errs) == 0 { - t.Errorf("Expected a validation error since updates are disallowed on poddisruptionbudgets.") + if len(errs) != 0 { + t.Errorf("Expected no error on changing selector on poddisruptionbudgets.") } newPdb.Spec.Selector = pdb.Spec.Selector - // Changing MinAvailable? Also no. + // Changing MinAvailable? OK newMinAvailable := intstr.FromString("28%") newPdb.Spec.MinAvailable = &newMinAvailable Strategy.PrepareForUpdate(ctx, newPdb, pdb) errs = Strategy.ValidateUpdate(ctx, newPdb, pdb) - if len(errs) == 0 { - t.Errorf("Expected a validation error since updates are disallowed on poddisruptionbudgets.") + if len(errs) != 0 { + t.Errorf("Expected no error updating MinAvailable on poddisruptionbudgets.") } + // Changing MinAvailable to MaxAvailable? OK maxUnavailable := intstr.FromString("28%") newPdb.Spec.MaxUnavailable = &maxUnavailable + newPdb.Spec.MinAvailable = nil Strategy.PrepareForUpdate(ctx, newPdb, pdb) errs = Strategy.ValidateUpdate(ctx, newPdb, pdb) - if len(errs) == 0 { - t.Errorf("Expected a validation error since updates are disallowed on poddisruptionbudgets.") + if len(errs) != 0 { + t.Errorf("Expected no error updating replacing MinAvailable with MaxUnavailable on poddisruptionbudgets.") } } diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 2b1d22ce6ca..1b4190f9762 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -59,6 +59,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", + "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 645b0b87cd5..61245742e76 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" @@ -164,23 +165,8 @@ var _ = SIGDescribe("DisruptionController", func() { } // Locate a running pod. - var pod v1.Pod - err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{}) - if err != nil { - return false, err - } - - for i := range podList.Items { - if podList.Items[i].Status.Phase == v1.PodRunning { - pod = podList.Items[i] - return true, nil - } - } - - return false, nil - }) - framework.ExpectNoError(err) + pod, err := locateRunningPod(cs, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e := &policy.Eviction{ ObjectMeta: metav1.ObjectMeta{ @@ -190,10 +176,6 @@ var _ = SIGDescribe("DisruptionController", func() { } if c.shouldDeny { - // Since disruptionAllowed starts out false, wait at least 60s hoping that - // this gives the controller enough time to have truly set the status. - time.Sleep(timeout) - err = cs.CoreV1().Pods(ns).Evict(e) gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget.")) } else { @@ -215,6 +197,34 @@ var _ = SIGDescribe("DisruptionController", func() { } }) } + + ginkgo.It("should block an eviction until the PDB is updated to allow it", func() { + ginkgo.By("Creating a pdb that targets all three pods in a test replica set") + createPDBMinAvailableOrDie(cs, ns, intstr.FromInt(3)) + createReplicaSetOrDie(cs, ns, 3, false) + + ginkgo.By("First trying to evict a pod which shouldn't be evictable") + pod, err := locateRunningPod(cs, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb + e := &policy.Eviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: ns, + }, + } + err = cs.CoreV1().Pods(ns).Evict(e) + gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget.")) + + ginkgo.By("Updating the pdb to allow a pod to be evicted") + updatePDBMinAvailableOrDie(cs, ns, intstr.FromInt(2)) + + ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable") + waitForPodsOrDie(cs, ns, 3) + err = cs.CoreV1().Pods(ns).Evict(e) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // the eviction is now allowed + }) }) func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) { @@ -229,7 +239,8 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable }, } _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) - framework.ExpectNoError(err) + framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns) + waitForPdbToBeProcessed(cs, ns) } func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) { @@ -244,7 +255,25 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail }, } _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) - framework.ExpectNoError(err) + framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns) + waitForPdbToBeProcessed(cs, ns) +} + +func updatePDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + if err != nil { + return err + } + old.Spec.MinAvailable = &minAvailable + if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(old); err != nil { + return err + } + return nil + }) + + framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns) + waitForPdbToBeProcessed(cs, ns) } func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { @@ -335,3 +364,38 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu _, err := cs.AppsV1().ReplicaSets(ns).Create(rs) framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns) } + +func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { + ginkgo.By("locating a running pod") + err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { + podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{}) + if err != nil { + return false, err + } + + for i := range podList.Items { + if podList.Items[i].Status.Phase == v1.PodRunning { + pod = &podList.Items[i] + return true, nil + } + } + + return false, nil + }) + return pod, err +} + +func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string) { + ginkgo.By("Waiting for the pdb to be processed") + err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { + pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + if err != nil { + return false, err + } + if pdb.Status.ObservedGeneration < pdb.Generation { + return false, nil + } + return true, nil + }) + framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns) +} From 3537eed826c5e3d4b329d73e156e45ec165fe513 Mon Sep 17 00:00:00 2001 From: David McCormick Date: Fri, 26 Apr 2019 10:36:54 +0100 Subject: [PATCH 160/194] Remove the generation altering code - validate an update for a PDB by running ValidatePodDisruptionBudget only. --- pkg/apis/policy/validation/validation.go | 11 -- pkg/apis/policy/validation/validation_test.go | 105 ------------------ .../policy/poddisruptionbudget/strategy.go | 4 +- 3 files changed, 1 insertion(+), 119 deletions(-) diff --git a/pkg/apis/policy/validation/validation.go b/pkg/apis/policy/validation/validation.go index 386d809231a..2d880a20968 100644 --- a/pkg/apis/policy/validation/validation.go +++ b/pkg/apis/policy/validation/validation.go @@ -41,17 +41,6 @@ func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorLis return allErrs } -func ValidatePodDisruptionBudgetUpdate(pdb, oldPdb *policy.PodDisruptionBudget) field.ErrorList { - restoreGeneration := pdb.Generation - pdb.Generation = oldPdb.Generation - - allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec")) - allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...) - - pdb.Generation = restoreGeneration - return allErrs -} - func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/pkg/apis/policy/validation/validation_test.go b/pkg/apis/policy/validation/validation_test.go index 259f929b275..c390097bcc3 100644 --- a/pkg/apis/policy/validation/validation_test.go +++ b/pkg/apis/policy/validation/validation_test.go @@ -122,111 +122,6 @@ func TestValidatePodDisruptionBudgetStatus(t *testing.T) { } } -func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { - c1 := intstr.FromString("10%") - c2 := intstr.FromInt(1) - c3 := intstr.FromInt(2) - oldPdb := &policy.PodDisruptionBudget{} - pdb := &policy.PodDisruptionBudget{} - testCases := []struct { - generations []int64 - name string - specs []policy.PodDisruptionBudgetSpec - status []policy.PodDisruptionBudgetStatus - ok bool - }{ - { - name: "only update status", - generations: []int64{int64(2), int64(3)}, - specs: []policy.PodDisruptionBudgetSpec{ - { - MinAvailable: &c1, - }, - { - MinAvailable: &c1, - }, - }, - status: []policy.PodDisruptionBudgetStatus{ - { - PodDisruptionsAllowed: 10, - CurrentHealthy: 5, - ExpectedPods: 2, - }, - { - PodDisruptionsAllowed: 8, - CurrentHealthy: 5, - DesiredHealthy: 3, - }, - }, - ok: true, - }, - { - name: "update pdb spec causing clash", - generations: []int64{int64(2), int64(3)}, - specs: []policy.PodDisruptionBudgetSpec{ - { - MaxUnavailable: &c2, - }, - { - MinAvailable: &c1, - MaxUnavailable: &c3, - }, - }, - status: []policy.PodDisruptionBudgetStatus{ - { - PodDisruptionsAllowed: 10, - }, - { - PodDisruptionsAllowed: 10, - }, - }, - ok: false, - }, - { - name: "update spec and status", - generations: []int64{int64(2), int64(3)}, - specs: []policy.PodDisruptionBudgetSpec{ - { - MaxUnavailable: &c2, - }, - { - MaxUnavailable: &c3, - }, - }, - status: []policy.PodDisruptionBudgetStatus{ - { - PodDisruptionsAllowed: 10, - CurrentHealthy: 5, - ExpectedPods: 2, - }, - { - PodDisruptionsAllowed: 8, - CurrentHealthy: 5, - DesiredHealthy: 3, - }, - }, - ok: true, - }, - } - - for i, tc := range testCases { - oldPdb.Spec = tc.specs[0] - oldPdb.Generation = tc.generations[0] - oldPdb.Status = tc.status[0] - - pdb.Spec = tc.specs[1] - pdb.Generation = tc.generations[1] - pdb.Status = tc.status[1] - - errs := ValidatePodDisruptionBudgetUpdate(pdb, oldPdb) - if tc.ok && len(errs) > 0 { - t.Errorf("[%d:%s] unexpected errors: %v", i, tc.name, errs) - } else if !tc.ok && len(errs) == 0 { - t.Errorf("[%d:%s] expected errors: %v", i, tc.name, errs) - } - } -} - func TestValidatePodSecurityPolicy(t *testing.T) { validPSP := func() *policy.PodSecurityPolicy { return &policy.PodSecurityPolicy{ diff --git a/pkg/registry/policy/poddisruptionbudget/strategy.go b/pkg/registry/policy/poddisruptionbudget/strategy.go index 0ee30f3b9d7..5a4f8dc2122 100644 --- a/pkg/registry/policy/poddisruptionbudget/strategy.go +++ b/pkg/registry/policy/poddisruptionbudget/strategy.go @@ -83,9 +83,7 @@ func (podDisruptionBudgetStrategy) AllowCreateOnUpdate() bool { // ValidateUpdate is the default update validation for an end user. func (podDisruptionBudgetStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - validationErrorList := validation.ValidatePodDisruptionBudget(obj.(*policy.PodDisruptionBudget)) - updateErrorList := validation.ValidatePodDisruptionBudgetUpdate(obj.(*policy.PodDisruptionBudget), old.(*policy.PodDisruptionBudget)) - return append(validationErrorList, updateErrorList...) + return validation.ValidatePodDisruptionBudget(obj.(*policy.PodDisruptionBudget)) } // AllowUnconditionalUpdate is the default update policy for PodDisruptionBudget objects. Status update should From cd3aac34eaad6eefc9d0ea2f4f06e1393a88c4f7 Mon Sep 17 00:00:00 2001 From: draveness Date: Mon, 6 May 2019 17:52:08 +0800 Subject: [PATCH 161/194] feat: cache pod limits as part of metadata in priority functions --- .../algorithm/priorities/metadata.go | 2 ++ .../algorithm/priorities/metadata_test.go | 13 ++++++++++ .../algorithm/priorities/resource_limits.go | 10 ++++++-- .../priorities/resource_limits_test.go | 24 +++++++++++++------ 4 files changed, 40 insertions(+), 9 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/metadata.go b/pkg/scheduler/algorithm/priorities/metadata.go index c80fc7d0358..9f34962f716 100644 --- a/pkg/scheduler/algorithm/priorities/metadata.go +++ b/pkg/scheduler/algorithm/priorities/metadata.go @@ -46,6 +46,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle // priorityMetadata is a type that is passed as metadata for priority functions type priorityMetadata struct { nonZeroRequest *schedulernodeinfo.Resource + podLimits *schedulernodeinfo.Resource podTolerations []v1.Toleration affinity *v1.Affinity podSelectors []labels.Selector @@ -62,6 +63,7 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo } return &priorityMetadata{ nonZeroRequest: getNonZeroRequests(pod), + podLimits: getResourceLimits(pod), podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations), affinity: pod.Spec.Affinity, podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister), diff --git a/pkg/scheduler/algorithm/priorities/metadata_test.go b/pkg/scheduler/algorithm/priorities/metadata_test.go index 9a992b5797f..bef089b37ff 100644 --- a/pkg/scheduler/algorithm/priorities/metadata_test.go +++ b/pkg/scheduler/algorithm/priorities/metadata_test.go @@ -38,6 +38,12 @@ func TestPriorityMetadata(t *testing.T) { specifiedReqs.MilliCPU = 200 specifiedReqs.Memory = 2000 + nonPodLimits := &schedulernodeinfo.Resource{} + + specifiedPodLimits := &schedulernodeinfo.Resource{} + specifiedPodLimits.MilliCPU = 200 + specifiedPodLimits.Memory = 2000 + tolerations := []v1.Toleration{{ Key: "foo", Operator: v1.TolerationOpEqual, @@ -104,6 +110,10 @@ func TestPriorityMetadata(t *testing.T) { Image: "image", ImagePullPolicy: "Always", Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("2000"), + }, Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("2000"), @@ -128,6 +138,7 @@ func TestPriorityMetadata(t *testing.T) { pod: podWithTolerationsAndAffinity, expected: &priorityMetadata{ nonZeroRequest: nonZeroReqs, + podLimits: nonPodLimits, podTolerations: tolerations, affinity: podAffinity, }, @@ -137,6 +148,7 @@ func TestPriorityMetadata(t *testing.T) { pod: podWithTolerationsAndRequests, expected: &priorityMetadata{ nonZeroRequest: specifiedReqs, + podLimits: nonPodLimits, podTolerations: tolerations, affinity: nil, }, @@ -146,6 +158,7 @@ func TestPriorityMetadata(t *testing.T) { pod: podWithAffinityAndRequests, expected: &priorityMetadata{ nonZeroRequest: specifiedReqs, + podLimits: specifiedPodLimits, podTolerations: nil, affinity: podAffinity, }, diff --git a/pkg/scheduler/algorithm/priorities/resource_limits.go b/pkg/scheduler/algorithm/priorities/resource_limits.go index 1344dc1eec4..8922007b1bb 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -42,7 +42,14 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule allocatableResources := nodeInfo.AllocatableResource() // compute pod limits - podLimits := getResourceLimits(pod) + var podLimits *schedulernodeinfo.Resource + if priorityMeta, ok := meta.(*priorityMetadata); ok && priorityMeta != nil { + // We were able to parse metadata, use podLimits from there. + podLimits = priorityMeta.podLimits + } else { + // We couldn't parse metadata - fallback to computing it. + podLimits = getResourceLimits(pod) + } cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU) memScore := computeScore(podLimits.Memory, allocatableResources.Memory) @@ -83,7 +90,6 @@ func computeScore(limit, allocatable int64) int64 { // The reason to create this new function is to be consistent with other // priority functions because most or perhaps all priority functions work // with schedulernodeinfo.Resource. -// TODO: cache it as part of metadata passed to priority functions. func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource { result := &schedulernodeinfo.Resource{} for _, container := range pod.Spec.Containers { diff --git a/pkg/scheduler/algorithm/priorities/resource_limits_test.go b/pkg/scheduler/algorithm/priorities/resource_limits_test.go index 1e3782b014c..96bf356da32 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits_test.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits_test.go @@ -27,7 +27,7 @@ import ( schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) -func TestResourceLimistPriority(t *testing.T) { +func TestResourceLimitsPriority(t *testing.T) { noResources := v1.PodSpec{ Containers: []v1.Container{}, } @@ -140,12 +140,22 @@ func TestResourceLimistPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes) - list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("expected %#v, got %#v", test.expectedList, list) + + for _, hasMeta := range []bool{true, false} { + var metadata *priorityMetadata + if hasMeta { + metadata = &priorityMetadata{ + podLimits: getResourceLimits(test.pod), + } + } + + list, err := priorityFunction(ResourceLimitsPriorityMap, nil, metadata)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } } }) } From 23fb942ed7272b5aed77aff32655873032874fe8 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Tue, 14 May 2019 14:00:05 +0000 Subject: [PATCH 162/194] Kubernetes version v1.16.0-alpha.0 openapi-spec file updates --- api/openapi-spec/swagger.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index c3b09593344..e2185d97442 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -18406,7 +18406,7 @@ }, "info": { "title": "Kubernetes", - "version": "v1.15.0" + "version": "v1.16.0" }, "paths": { "/api/": { From 4198f28855cbda6dac61408fcba6f2d576a9347c Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 May 2019 09:29:16 -0700 Subject: [PATCH 163/194] BoundServiceAccountTokenVolume: fix InClusterConfig --- .../k8s.io/apiserver/pkg/util/webhook/authentication.go | 1 + staging/src/k8s.io/client-go/rest/transport.go | 7 ++++--- .../src/k8s.io/client-go/tools/clientcmd/client_config.go | 2 ++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go b/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go index fb6a7fa3ba6..dda51b60d2f 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -171,6 +171,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { config.BearerToken = configAuthInfo.Token + config.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { diff --git a/staging/src/k8s.io/client-go/rest/transport.go b/staging/src/k8s.io/client-go/rest/transport.go index bd5749dc623..de33ecbfc36 100644 --- a/staging/src/k8s.io/client-go/rest/transport.go +++ b/staging/src/k8s.io/client-go/rest/transport.go @@ -74,9 +74,10 @@ func (c *Config) TransportConfig() (*transport.Config, error) { KeyFile: c.KeyFile, KeyData: c.KeyData, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index 878e0df79ff..c62ee03c77e 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { @@ -491,6 +492,7 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) } if token := config.overrides.AuthInfo.Token; len(token) > 0 { icc.BearerToken = token + icc.BearerTokenFile = "" } if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { icc.TLSClientConfig.CAFile = certificateAuthorityFile From 96ed93d8897a50b0ce5f22f294baa4c324d8a16a Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 May 2019 09:29:34 -0700 Subject: [PATCH 164/194] BoundServiceAccountTokenVolume: add e2e test --- test/e2e/auth/service_accounts.go | 137 +++++++++++++++++++++++++ test/images/BUILD | 1 + test/images/inclusterclient/BUILD | 34 ++++++ test/images/inclusterclient/Dockerfile | 18 ++++ test/images/inclusterclient/Makefile | 25 +++++ test/images/inclusterclient/VERSION | 1 + test/images/inclusterclient/main.go | 84 +++++++++++++++ test/utils/image/manifest.go | 3 + 8 files changed, 303 insertions(+) create mode 100644 test/images/inclusterclient/BUILD create mode 100644 test/images/inclusterclient/Dockerfile create mode 100644 test/images/inclusterclient/Makefile create mode 100644 test/images/inclusterclient/VERSION create mode 100644 test/images/inclusterclient/main.go diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index e3ca96000aa..088b21a8e9d 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -19,6 +19,8 @@ package auth import ( "fmt" "path" + "regexp" + "strings" "time" authenticationv1 "k8s.io/api/authentication/v1" @@ -38,6 +40,7 @@ import ( ) var mountImage = imageutils.GetE2EImage(imageutils.Mounttest) +var inClusterClientImage = imageutils.GetE2EImage(imageutils.InClusterClient) var _ = SIGDescribe("ServiceAccounts", func() { f := framework.NewDefaultFramework("svcaccounts") @@ -410,4 +413,138 @@ var _ = SIGDescribe("ServiceAccounts", func() { } } }) + + ginkgo.It("should support InClusterConfig with token rotation [Slow] [Feature:TokenRequestProjection]", func() { + cfg, err := framework.LoadConfig() + framework.ExpectNoError(err) + + if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-root-ca.crt", + }, + Data: map[string]string{ + "ca.crt": string(cfg.TLSClientConfig.CAData), + }, + }); err != nil && !apierrors.IsAlreadyExists(err) { + framework.Failf("Unexpected err creating kube-ca-crt: %v", err) + } + + tenMin := int64(10 * 60) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: "inclusterclient", + Image: inClusterClientImage, + VolumeMounts: []v1.VolumeMount{{ + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + Name: "kube-api-access-e2e", + ReadOnly: true, + }}, + }}, + RestartPolicy: v1.RestartPolicyNever, + ServiceAccountName: "default", + Volumes: []v1.Volume{{ + Name: "kube-api-access-e2e", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ServiceAccountToken: &v1.ServiceAccountTokenProjection{ + Path: "token", + ExpirationSeconds: &tenMin, + }, + }, + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "kube-root-ca.crt", + }, + Items: []v1.KeyToPath{ + { + Key: "ca.crt", + Path: "ca.crt", + }, + }, + }, + }, + { + DownwardAPI: &v1.DownwardAPIProjection{ + Items: []v1.DownwardAPIVolumeFile{ + { + Path: "namespace", + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + }, + }, + }, + }, + }, + }}, + }, + } + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + framework.ExpectNoError(err) + + framework.Logf("created pod") + if !framework.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { + framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) + } + + framework.Logf("pod is ready") + + var logs string + if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) { + framework.Logf("polling logs") + logs, err = framework.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") + if err != nil { + framework.Logf("Error pulling logs: %v", err) + return false, nil + } + tokenCount, err := parseInClusterClientLogs(logs) + if err != nil { + return false, fmt.Errorf("inclusterclient reported an error: %v", err) + } + if tokenCount < 2 { + framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount) + return false, nil + } + return true, nil + }); err != nil { + framework.Failf("Unexpected error: %v\n%s", err, logs) + } + }) }) + +var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$") + +func parseInClusterClientLogs(logs string) (int, error) { + seenTokens := map[string]struct{}{} + + lines := strings.Split(logs, "\n") + for _, line := range lines { + parts := reportLogsParser.FindStringSubmatch(line) + if len(parts) != 3 { + continue + } + + key, value := parts[1], parts[2] + switch key { + case "authz_header": + if value == "" { + return 0, fmt.Errorf("saw empty Authorization header") + } + seenTokens[value] = struct{}{} + case "status": + if value == "failed" { + return 0, fmt.Errorf("saw status=failed") + } + } + } + + return len(seenTokens), nil +} diff --git a/test/images/BUILD b/test/images/BUILD index de05303960e..3516f67e89e 100644 --- a/test/images/BUILD +++ b/test/images/BUILD @@ -18,6 +18,7 @@ filegroup( "//test/images/echoserver:all-srcs", "//test/images/entrypoint-tester:all-srcs", "//test/images/fakegitserver:all-srcs", + "//test/images/inclusterclient:all-srcs", "//test/images/liveness:all-srcs", "//test/images/logs-generator:all-srcs", "//test/images/metadata-concealment:all-srcs", diff --git a/test/images/inclusterclient/BUILD b/test/images/inclusterclient/BUILD new file mode 100644 index 00000000000..07cfc21064b --- /dev/null +++ b/test/images/inclusterclient/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = ["main.go"], + importpath = "k8s.io/kubernetes/test/images/inclusterclient", + visibility = ["//visibility:private"], + deps = [ + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/component-base/logs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_binary( + name = "inclusterconfig", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/images/inclusterclient/Dockerfile b/test/images/inclusterclient/Dockerfile new file mode 100644 index 00000000000..478ccddefed --- /dev/null +++ b/test/images/inclusterclient/Dockerfile @@ -0,0 +1,18 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM gcr.io/distroless/static:latest + +ADD inclusterclient /inclusterclient +ENTRYPOINT ["/inclusterclient"] diff --git a/test/images/inclusterclient/Makefile b/test/images/inclusterclient/Makefile new file mode 100644 index 00000000000..0ce0b522c41 --- /dev/null +++ b/test/images/inclusterclient/Makefile @@ -0,0 +1,25 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SRCS = inclusterclient +ARCH ?= amd64 +TARGET ?= $(CURDIR) +GOLANG_VERSION ?= latest +SRC_DIR = $(notdir $(shell pwd)) +export + +bin: + ../image-util.sh bin $(SRCS) + +.PHONY: bin diff --git a/test/images/inclusterclient/VERSION b/test/images/inclusterclient/VERSION new file mode 100644 index 00000000000..d3827e75a5c --- /dev/null +++ b/test/images/inclusterclient/VERSION @@ -0,0 +1 @@ +1.0 diff --git a/test/images/inclusterclient/main.go b/test/images/inclusterclient/main.go new file mode 100644 index 00000000000..bf7a0ac6944 --- /dev/null +++ b/test/images/inclusterclient/main.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/sha256" + "encoding/base64" + "flag" + "fmt" + "log" + "net/http" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/component-base/logs" + "k8s.io/klog" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + pollInterval := flag.Int("poll-interval", 30, "poll interval of call to /healhtz in seconds") + flag.Set("logtostderr", "true") + flag.Parse() + + klog.Infof("started") + + cfg, err := rest.InClusterConfig() + if err != nil { + log.Fatalf("err: %v", err) + } + + cfg.Wrap(func(rt http.RoundTripper) http.RoundTripper { + return &debugRt{ + rt: rt, + } + }) + + c := kubernetes.NewForConfigOrDie(cfg).RESTClient() + + t := time.Tick(time.Duration(*pollInterval) * time.Second) + for { + <-t + klog.Infof("calling /healthz") + b, err := c.Get().AbsPath("/healthz").Do().Raw() + if err != nil { + klog.Errorf("status=failed") + klog.Errorf("error checking /healthz: %v\n%s\n", err, string(b)) + } + } +} + +type debugRt struct { + rt http.RoundTripper +} + +func (rt *debugRt) RoundTrip(req *http.Request) (*http.Response, error) { + authHeader := req.Header.Get("Authorization") + if len(authHeader) != 0 { + authHash := sha256.Sum256([]byte(fmt.Sprintf("%s|%s", "salt", authHeader))) + klog.Infof("authz_header=%s", base64.RawURLEncoding.EncodeToString(authHash[:])) + } else { + klog.Errorf("authz_header=") + } + return rt.rt.RoundTrip(req) +} + +func (rt *debugRt) WrappedRoundTripper() http.RoundTripper { return rt.rt } diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index ca79a708825..e1eea1a98b8 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -130,6 +130,8 @@ const ( GBRedisSlave // Hostexec image Hostexec + // InClusterClient image + InClusterClient // IpcUtils image IpcUtils // Iperf image @@ -211,6 +213,7 @@ func initImageConfigs() map[int]Config { configs[GBFrontend] = Config{sampleRegistry, "gb-frontend", "v6"} configs[GBRedisSlave] = Config{sampleRegistry, "gb-redisslave", "v3"} configs[Hostexec] = Config{e2eRegistry, "hostexec", "1.1"} + configs[InClusterClient] = Config{e2eRegistry, "inclusterclient", "1.0"} configs[IpcUtils] = Config{e2eRegistry, "ipc-utils", "1.0"} configs[Iperf] = Config{e2eRegistry, "iperf", "1.0"} configs[JessieDnsutils] = Config{e2eRegistry, "jessie-dnsutils", "1.0"} From f228ae3364729caed59087e23c42868454bc3ff4 Mon Sep 17 00:00:00 2001 From: Yucheng Wu Date: Tue, 14 May 2019 14:49:38 +0800 Subject: [PATCH 165/194] fix CVE-2019-11244: `kubectl --http-cache=` creates world-writeable cached schema files --- .../discovery/cached/disk/cached_discovery.go | 4 +- .../cached/disk/cached_discovery_test.go | 27 ++++++++++ .../discovery/cached/disk/round_tripper.go | 3 ++ .../cached/disk/round_tripper_test.go | 52 +++++++++++++++++++ 4 files changed, 84 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery.go b/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery.go index 9ede5016bc7..fd8b61d158d 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery.go @@ -172,7 +172,7 @@ func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { } func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil { return err } @@ -191,7 +191,7 @@ func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Obj return err } - err = os.Chmod(f.Name(), 0755) + err = os.Chmod(f.Name(), 0660) if err != nil { return err } diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery_test.go b/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery_test.go index 3ddd4a98b19..3bb7fb89474 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery_test.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/cached_discovery_test.go @@ -19,6 +19,7 @@ package disk import ( "io/ioutil" "os" + "path/filepath" "testing" "time" @@ -96,6 +97,32 @@ func TestNewCachedDiscoveryClient_TTL(t *testing.T) { assert.Equal(c.groupCalls, 2) } +func TestNewCachedDiscoveryClient_PathPerm(t *testing.T) { + assert := assert.New(t) + + d, err := ioutil.TempDir("", "") + assert.NoError(err) + os.RemoveAll(d) + defer os.RemoveAll(d) + + c := fakeDiscoveryClient{} + cdc := newCachedDiscoveryClient(&c, d, 1*time.Nanosecond) + cdc.ServerGroups() + + err = filepath.Walk(d, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + assert.Equal(os.FileMode(0750), info.Mode().Perm()) + } else { + assert.Equal(os.FileMode(0660), info.Mode().Perm()) + } + return nil + }) + assert.NoError(err) +} + type fakeDiscoveryClient struct { groupCalls int resourceCalls int diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go index 7e2a537a9a2..1dfb8297d90 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go @@ -18,6 +18,7 @@ package disk import ( "net/http" + "os" "path/filepath" "github.com/gregjones/httpcache" @@ -35,6 +36,8 @@ type cacheRoundTripper struct { // corresponding requests. func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { d := diskv.New(diskv.Options{ + PathPerm: os.FileMode(0750), + FilePerm: os.FileMode(0660), BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp"), }) diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go index 329be42a5d3..13002c63d22 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go @@ -22,7 +22,10 @@ import ( "net/http" "net/url" "os" + "path/filepath" "testing" + + "github.com/stretchr/testify/assert" ) // copied from k8s.io/client-go/transport/round_trippers_test.go @@ -93,3 +96,52 @@ func TestCacheRoundTripper(t *testing.T) { t.Errorf("Invalid content read from cache %q", string(content)) } } + +func TestCacheRoundTripperPathPerm(t *testing.T) { + assert := assert.New(t) + + rt := &testRoundTripper{} + cacheDir, err := ioutil.TempDir("", "cache-rt") + os.RemoveAll(cacheDir) + defer os.RemoveAll(cacheDir) + + if err != nil { + t.Fatal(err) + } + cache := newCacheRoundTripper(cacheDir, rt) + + // First call, caches the response + req := &http.Request{ + Method: http.MethodGet, + URL: &url.URL{Host: "localhost"}, + } + rt.Response = &http.Response{ + Header: http.Header{"ETag": []string{`"123456"`}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))), + StatusCode: http.StatusOK, + } + resp, err := cache.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "Content" { + t.Errorf(`Expected Body to be "Content", got %q`, string(content)) + } + + err = filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + assert.Equal(os.FileMode(0750), info.Mode().Perm()) + } else { + assert.Equal(os.FileMode(0660), info.Mode().Perm()) + } + return nil + }) + assert.NoError(err) +} From 900d652a9ac11e53293950b3d191295c21430215 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 7 May 2019 13:37:07 -0700 Subject: [PATCH 166/194] Update tests for: Pass {Operation}Option to Webhooks --- plugin/pkg/admission/admit/BUILD | 1 + plugin/pkg/admission/admit/admission_test.go | 3 +- .../alwayspullimages/admission_test.go | 6 +- .../admission/antiaffinity/admission_test.go | 6 +- .../admission_test.go | 2 +- plugin/pkg/admission/deny/BUILD | 1 + plugin/pkg/admission/deny/admission_test.go | 3 +- plugin/pkg/admission/eventratelimit/BUILD | 1 + .../eventratelimit/admission_test.go | 2 + plugin/pkg/admission/exec/admission_test.go | 4 +- .../admission_test.go | 2 +- plugin/pkg/admission/gc/gc_admission_test.go | 8 +- plugin/pkg/admission/imagepolicy/BUILD | 1 + .../admission/imagepolicy/admission_test.go | 17 +- .../admission/limitranger/admission_test.go | 20 +- .../namespace/autoprovision/admission_test.go | 10 +- .../namespace/exists/admission_test.go | 4 +- .../noderestriction/admission_test.go | 268 +++++++++--------- plugin/pkg/admission/nodetaint/BUILD | 1 + .../pkg/admission/nodetaint/admission_test.go | 8 +- .../podnodeselector/admission_test.go | 4 +- .../pkg/admission/podpreset/admission_test.go | 1 + .../admission_test.go | 4 +- .../pkg/admission/priority/admission_test.go | 12 +- .../admission/resourcequota/admission_test.go | 58 ++-- .../podsecuritypolicy/admission_test.go | 10 +- .../securitycontext/scdeny/admission_test.go | 6 +- .../serviceaccount/admission_test.go | 56 ++-- .../persistentvolume/label/admission_test.go | 4 +- .../persistentvolume/resize/admission_test.go | 3 +- .../storageclass/setdefault/admission_test.go | 1 + .../admission_test.go | 1 + .../apiserver/pkg/admission/audit_test.go | 2 +- .../apiserver/pkg/admission/chain_test.go | 10 +- .../apiserver/pkg/admission/errors_test.go | 1 + .../apiserver/pkg/admission/metrics/BUILD | 2 + .../pkg/admission/metrics/metrics_test.go | 16 +- .../namespace/lifecycle/admission_test.go | 34 +-- .../webhook/mutating/dispatcher_test.go | 2 +- .../plugin/webhook/namespace/matcher_test.go | 12 +- .../pkg/admission/plugin/webhook/rules/BUILD | 2 + .../plugin/webhook/rules/rules_test.go | 163 +++++------ .../plugin/webhook/testing/testcase.go | 3 +- .../plugin/banflunder/admission_test.go | 1 + .../admissionwebhook/admission_test.go | 75 +++-- 45 files changed, 473 insertions(+), 378 deletions(-) diff --git a/plugin/pkg/admission/admit/BUILD b/plugin/pkg/admission/admit/BUILD index bd93027e33c..388dfd03d7e 100644 --- a/plugin/pkg/admission/admit/BUILD +++ b/plugin/pkg/admission/admit/BUILD @@ -22,6 +22,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", ], ) diff --git a/plugin/pkg/admission/admit/admission_test.go b/plugin/pkg/admission/admit/admission_test.go index 6721aaf4eb5..ff4bb8a66d8 100644 --- a/plugin/pkg/admission/admit/admission_test.go +++ b/plugin/pkg/admission/admit/admission_test.go @@ -19,13 +19,14 @@ package admit import ( "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" ) func TestAdmissionNonNilAttribute(t *testing.T) { handler := NewAlwaysAdmit() - err := handler.(*alwaysAdmit).Admit(admission.NewAttributesRecord(nil, nil, api.Kind("kind").WithVersion("version"), "namespace", "name", api.Resource("resource").WithVersion("version"), "subresource", admission.Create, false, nil), nil) + err := handler.(*alwaysAdmit).Admit(admission.NewAttributesRecord(nil, nil, api.Kind("kind").WithVersion("version"), "namespace", "name", api.Resource("resource").WithVersion("version"), "subresource", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error returned from admission handler") } diff --git a/plugin/pkg/admission/alwayspullimages/admission_test.go b/plugin/pkg/admission/alwayspullimages/admission_test.go index 6f0756e30e8..7dab5611061 100644 --- a/plugin/pkg/admission/alwayspullimages/admission_test.go +++ b/plugin/pkg/admission/alwayspullimages/admission_test.go @@ -47,7 +47,7 @@ func TestAdmission(t *testing.T) { }, }, } - err := handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error returned from admission handler") } @@ -84,7 +84,7 @@ func TestValidate(t *testing.T) { }, } expectedError := `pods "123" is forbidden: spec.initContainers[0].imagePullPolicy: Unsupported value: "": supported values: "Always"` - err := handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Fatal("missing expected error") } @@ -139,7 +139,7 @@ func TestOtherResources(t *testing.T) { for _, tc := range tests { handler := &AlwaysPullImages{} - err := handler.Admit(admission.NewAttributesRecord(tc.object, nil, api.Kind(tc.kind).WithVersion("version"), namespace, name, api.Resource(tc.resource).WithVersion("version"), tc.subresource, admission.Create, false, nil), nil) + err := handler.Admit(admission.NewAttributesRecord(tc.object, nil, api.Kind(tc.kind).WithVersion("version"), namespace, name, api.Resource(tc.resource).WithVersion("version"), tc.subresource, admission.Create, &metav1.CreateOptions{}, false, nil), nil) if tc.expectError { if err == nil { diff --git a/plugin/pkg/admission/antiaffinity/admission_test.go b/plugin/pkg/admission/antiaffinity/admission_test.go index 9772716c255..b531b6a6aeb 100644 --- a/plugin/pkg/admission/antiaffinity/admission_test.go +++ b/plugin/pkg/admission/antiaffinity/admission_test.go @@ -19,7 +19,7 @@ package antiaffinity import ( "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" @@ -199,7 +199,7 @@ func TestInterPodAffinityAdmission(t *testing.T) { } for _, test := range tests { pod.Spec.Affinity = test.affinity - err := handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", nil, false, nil), nil) if test.errorExpected && err == nil { t.Errorf("Expected error for Anti Affinity %+v but did not get an error", test.affinity) @@ -267,7 +267,7 @@ func TestOtherResources(t *testing.T) { for _, tc := range tests { handler := &Plugin{} - err := handler.Validate(admission.NewAttributesRecord(tc.object, nil, api.Kind(tc.kind).WithVersion("version"), namespace, name, api.Resource(tc.resource).WithVersion("version"), tc.subresource, admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(tc.object, nil, api.Kind(tc.kind).WithVersion("version"), namespace, name, api.Resource(tc.resource).WithVersion("version"), tc.subresource, admission.Create, &metav1.CreateOptions{}, false, nil), nil) if tc.expectError { if err == nil { diff --git a/plugin/pkg/admission/defaulttolerationseconds/admission_test.go b/plugin/pkg/admission/defaulttolerationseconds/admission_test.go index 09f0fcd944a..4bf22839d83 100644 --- a/plugin/pkg/admission/defaulttolerationseconds/admission_test.go +++ b/plugin/pkg/admission/defaulttolerationseconds/admission_test.go @@ -263,7 +263,7 @@ func TestForgivenessAdmission(t *testing.T) { } for _, test := range tests { - err := handler.Admit(admission.NewAttributesRecord(&test.requestedPod, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", false, nil), nil) + err := handler.Admit(admission.NewAttributesRecord(&test.requestedPod, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", nil, false, nil), nil) if err != nil { t.Errorf("[%s]: unexpected error %v for pod %+v", test.description, err, test.requestedPod) } diff --git a/plugin/pkg/admission/deny/BUILD b/plugin/pkg/admission/deny/BUILD index cdbe0e929ad..524c8b90693 100644 --- a/plugin/pkg/admission/deny/BUILD +++ b/plugin/pkg/admission/deny/BUILD @@ -22,6 +22,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", ], ) diff --git a/plugin/pkg/admission/deny/admission_test.go b/plugin/pkg/admission/deny/admission_test.go index 1eb7e9928ae..0d431f4d76a 100644 --- a/plugin/pkg/admission/deny/admission_test.go +++ b/plugin/pkg/admission/deny/admission_test.go @@ -19,13 +19,14 @@ package deny import ( "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" ) func TestAdmission(t *testing.T) { handler := NewAlwaysDeny() - err := handler.(*alwaysDeny).Admit(admission.NewAttributesRecord(nil, nil, api.Kind("kind").WithVersion("version"), "namespace", "name", api.Resource("resource").WithVersion("version"), "subresource", admission.Create, false, nil), nil) + err := handler.(*alwaysDeny).Admit(admission.NewAttributesRecord(nil, nil, api.Kind("kind").WithVersion("version"), "namespace", "name", api.Resource("resource").WithVersion("version"), "subresource", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Error("Expected error returned from admission handler") } diff --git a/plugin/pkg/admission/eventratelimit/BUILD b/plugin/pkg/admission/eventratelimit/BUILD index 97b33f9bbe3..1f154fc3b66 100644 --- a/plugin/pkg/admission/eventratelimit/BUILD +++ b/plugin/pkg/admission/eventratelimit/BUILD @@ -17,6 +17,7 @@ go_test( "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/plugin/pkg/admission/eventratelimit/admission_test.go b/plugin/pkg/admission/eventratelimit/admission_test.go index 468c3ad5c45..82566eea799 100644 --- a/plugin/pkg/admission/eventratelimit/admission_test.go +++ b/plugin/pkg/admission/eventratelimit/admission_test.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/admission" @@ -46,6 +47,7 @@ func attributesForRequest(rq request) admission.Attributes { api.Resource("resource").WithVersion("version"), "", admission.Create, + &metav1.CreateOptions{}, rq.dryRun, &user.DefaultInfo{Name: rq.username}) } diff --git a/plugin/pkg/admission/exec/admission_test.go b/plugin/pkg/admission/exec/admission_test.go index 14f29abf4af..4d48e8c11dc 100644 --- a/plugin/pkg/admission/exec/admission_test.go +++ b/plugin/pkg/admission/exec/admission_test.go @@ -120,7 +120,7 @@ func testAdmission(t *testing.T, pod *corev1.Pod, handler *DenyExec, shouldAccep // pods/exec { - err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), "test", pod.Name, api.Resource("pods").WithVersion("version"), "exec", admission.Connect, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), "test", pod.Name, api.Resource("pods").WithVersion("version"), "exec", admission.Connect, nil, false, nil), nil) if shouldAccept && err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } @@ -131,7 +131,7 @@ func testAdmission(t *testing.T, pod *corev1.Pod, handler *DenyExec, shouldAccep // pods/attach { - err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), "test", pod.Name, api.Resource("pods").WithVersion("version"), "attach", admission.Connect, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), "test", pod.Name, api.Resource("pods").WithVersion("version"), "attach", admission.Connect, nil, false, nil), nil) if shouldAccept && err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } diff --git a/plugin/pkg/admission/extendedresourcetoleration/admission_test.go b/plugin/pkg/admission/extendedresourcetoleration/admission_test.go index b177950b225..fdb3aa5b46c 100644 --- a/plugin/pkg/admission/extendedresourcetoleration/admission_test.go +++ b/plugin/pkg/admission/extendedresourcetoleration/admission_test.go @@ -354,7 +354,7 @@ func TestAdmit(t *testing.T) { }, } for i, test := range tests { - err := plugin.Admit(admission.NewAttributesRecord(&test.requestedPod, nil, core.Kind("Pod").WithVersion("version"), "foo", "name", core.Resource("pods").WithVersion("version"), "", "ignored", false, nil), nil) + err := plugin.Admit(admission.NewAttributesRecord(&test.requestedPod, nil, core.Kind("Pod").WithVersion("version"), "foo", "name", core.Resource("pods").WithVersion("version"), "", "ignored", nil, false, nil), nil) if err != nil { t.Errorf("[%d: %s] unexpected error %v for pod %+v", i, test.description, err, test.requestedPod) } diff --git a/plugin/pkg/admission/gc/gc_admission_test.go b/plugin/pkg/admission/gc/gc_admission_test.go index 8e0ef132ed7..501e13a4154 100644 --- a/plugin/pkg/admission/gc/gc_admission_test.go +++ b/plugin/pkg/admission/gc/gc_admission_test.go @@ -302,11 +302,13 @@ func TestGCAdmission(t *testing.T) { } operation := admission.Create + var options runtime.Object = &metav1.CreateOptions{} if tc.oldObj != nil { operation = admission.Update + options = &metav1.UpdateOptions{} } user := &user.DefaultInfo{Name: tc.username} - attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, false, user) + attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, options, false, user) err = gcAdmit.Validate(attributes, nil) if !tc.checkError(err) { @@ -605,11 +607,13 @@ func TestBlockOwnerDeletionAdmission(t *testing.T) { for _, tc := range tests { operation := admission.Create + var options runtime.Object = &metav1.CreateOptions{} if tc.oldObj != nil { operation = admission.Update + options = &metav1.UpdateOptions{} } user := &user.DefaultInfo{Name: tc.username} - attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, false, user) + attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, options, false, user) err := gcAdmit.Validate(attributes, nil) if !tc.checkError(err) { diff --git a/plugin/pkg/admission/imagepolicy/BUILD b/plugin/pkg/admission/imagepolicy/BUILD index fb8c1c55e54..20630ea3644 100644 --- a/plugin/pkg/admission/imagepolicy/BUILD +++ b/plugin/pkg/admission/imagepolicy/BUILD @@ -42,6 +42,7 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/apis/imagepolicy/install:go_default_library", "//staging/src/k8s.io/api/imagepolicy/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", diff --git a/plugin/pkg/admission/imagepolicy/admission_test.go b/plugin/pkg/admission/imagepolicy/admission_test.go index 3d748082a15..cfa9d0c1174 100644 --- a/plugin/pkg/admission/imagepolicy/admission_test.go +++ b/plugin/pkg/admission/imagepolicy/admission_test.go @@ -29,9 +29,10 @@ import ( "time" "k8s.io/api/imagepolicy/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/client-go/tools/clientcmd/api/v1" + v1 "k8s.io/client-go/tools/clientcmd/api/v1" api "k8s.io/kubernetes/pkg/apis/core" "fmt" @@ -482,7 +483,7 @@ func TestTLSConfig(t *testing.T) { return } pod := goodPod(strconv.Itoa(rand.Intn(1000))) - attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) // Allow all and see if we get an error. service.Allow() @@ -571,7 +572,7 @@ func TestWebhookCache(t *testing.T) { {statusCode: 500, expectedErr: false, expectedAuthorized: true, expectedCached: true}, } - attr := admission.NewAttributesRecord(goodPod("test"), nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(goodPod("test"), nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) serv.allow = true @@ -583,7 +584,7 @@ func TestWebhookCache(t *testing.T) { {statusCode: 200, expectedErr: false, expectedAuthorized: true, expectedCached: false}, {statusCode: 500, expectedErr: false, expectedAuthorized: true, expectedCached: true}, } - attr = admission.NewAttributesRecord(goodPod("test2"), nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr = admission.NewAttributesRecord(goodPod("test2"), nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) testWebhookCacheCases(t, serv, wh, attr, tests) } @@ -757,7 +758,7 @@ func TestContainerCombinations(t *testing.T) { return } - attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) err = wh.Validate(attr, nil) if tt.wantAllowed { @@ -851,7 +852,7 @@ func TestDefaultAllow(t *testing.T) { return } - attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) annotations := make(map[string]string) attr = &fakeAttributes{attr, annotations} @@ -961,7 +962,7 @@ func TestAnnotationFiltering(t *testing.T) { pod := goodPod("test") pod.Annotations = tt.annotations - attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) err = wh.Validate(attr, nil) if err != nil { @@ -1051,7 +1052,7 @@ func TestReturnedAnnotationAdd(t *testing.T) { pod := tt.pod - attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) annotations := make(map[string]string) attr = &fakeAttributes{attr, annotations} diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index ec184e4dfc7..acec3436b6f 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -35,7 +35,7 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) func getComputeResourceList(cpu, memory string) api.ResourceList { @@ -705,20 +705,20 @@ func TestLimitRangerIgnoresSubresource(t *testing.T) { informerFactory.Start(wait.NeverStop) testPod := validPod("testPod", 1, api.ResourceRequirements{}) - err = handler.Admit(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Fatal(err) } - err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error since the pod did not specify resource limits in its create call") } - err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Expected not to call limitranger actions on pod updates") } - err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Should have ignored calls to any subresource of pod %v", err) } @@ -735,20 +735,20 @@ func TestLimitRangerAdmitPod(t *testing.T) { informerFactory.Start(wait.NeverStop) testPod := validPod("testPod", 1, api.ResourceRequirements{}) - err = handler.Admit(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Fatal(err) } - err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error since the pod did not specify resource limits in its create call") } - err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Expected not to call limitranger actions on pod updates") } - err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&testPod, nil, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Should have ignored calls to any subresource of pod %v", err) } @@ -757,7 +757,7 @@ func TestLimitRangerAdmitPod(t *testing.T) { terminatingPod := validPod("terminatingPod", 1, api.ResourceRequirements{}) now := metav1.Now() terminatingPod.DeletionTimestamp = &now - err = handler.Validate(admission.NewAttributesRecord(&terminatingPod, &terminatingPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "terminatingPod", api.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&terminatingPod, &terminatingPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "terminatingPod", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("LimitRange should ignore a pod marked for termination") } diff --git a/plugin/pkg/admission/namespace/autoprovision/admission_test.go b/plugin/pkg/admission/namespace/autoprovision/admission_test.go index cde6f315bf8..63b508f77cf 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission_test.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission_test.go @@ -99,7 +99,7 @@ func TestAdmission(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } @@ -119,7 +119,7 @@ func TestAdmissionNamespaceExists(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } @@ -139,7 +139,7 @@ func TestAdmissionDryRun(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, true, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, true, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } @@ -160,7 +160,7 @@ func TestIgnoreAdmission(t *testing.T) { chainHandler := admission.NewChainHandler(handler) pod := newPod(namespace) - err = chainHandler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = chainHandler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } @@ -182,7 +182,7 @@ func TestAdmissionWithLatentCache(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } diff --git a/plugin/pkg/admission/namespace/exists/admission_test.go b/plugin/pkg/admission/namespace/exists/admission_test.go index c1cbfa5b188..7ba55539e88 100644 --- a/plugin/pkg/admission/namespace/exists/admission_test.go +++ b/plugin/pkg/admission/namespace/exists/admission_test.go @@ -88,7 +88,7 @@ func TestAdmissionNamespaceExists(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } @@ -108,7 +108,7 @@ func TestAdmissionNamespaceDoesNotExist(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { actions := "" for _, action := range mockClient.Actions() { diff --git a/plugin/pkg/admission/noderestriction/admission_test.go b/plugin/pkg/admission/noderestriction/admission_test.go index bca668fd37b..a053d9e83d1 100644 --- a/plugin/pkg/admission/noderestriction/admission_test.go +++ b/plugin/pkg/admission/noderestriction/admission_test.go @@ -374,61 +374,61 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "allow creating a mirror pod bound to self", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coremymirrorpod, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coremymirrorpod, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "forbid update of mirror pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coremymirrorpod, coremymirrorpod, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coremymirrorpod, coremymirrorpod, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow delete of mirror pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "", }, { name: "forbid create of mirror pod status bound to self", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coremymirrorpod, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "status", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coremymirrorpod, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow update of mirror pod status bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coremymirrorpod, coremymirrorpod, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coremymirrorpod, coremymirrorpod, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "forbid delete of mirror pod status bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "status", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow create of eviction for mirror pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "forbid update of eviction for mirror pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for mirror pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow create of unnamed eviction for mirror pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coremymirrorpod.Namespace, coremymirrorpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, @@ -436,61 +436,61 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid creating a mirror pod bound to another", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreothermirrorpod, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreothermirrorpod, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid update of mirror pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreothermirrorpod, coreothermirrorpod, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreothermirrorpod, coreothermirrorpod, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of mirror pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid create of mirror pod status bound to another", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreothermirrorpod, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "status", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreothermirrorpod, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid update of mirror pod status bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreothermirrorpod, coreothermirrorpod, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreothermirrorpod, coreothermirrorpod, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid delete of mirror pod status bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "status", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of eviction for mirror pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid update of eviction for mirror pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for mirror pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of unnamed eviction for mirror pod to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreothermirrorpod.Namespace, coreothermirrorpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, @@ -498,61 +498,61 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid creating a mirror pod unbound", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreunboundmirrorpod, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundmirrorpod, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid update of mirror pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreunboundmirrorpod, coreunboundmirrorpod, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundmirrorpod, coreunboundmirrorpod, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of mirror pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid create of mirror pod status unbound", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreunboundmirrorpod, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "status", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundmirrorpod, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid update of mirror pod status unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreunboundmirrorpod, coreunboundmirrorpod, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundmirrorpod, coreunboundmirrorpod, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid delete of mirror pod status unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "status", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of eviction for mirror pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid update of eviction for mirror pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for mirror pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of unnamed eviction for mirror pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreunboundmirrorpod.Namespace, coreunboundmirrorpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, @@ -560,55 +560,55 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid creating a normal pod bound to self", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coremypod, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coremypod, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "can only create mirror pods", }, { name: "forbid update of normal pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coremypod, coremypod, podKind, coremypod.Namespace, coremypod.Name, podResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coremypod, coremypod, podKind, coremypod.Namespace, coremypod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow delete of normal pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "", }, { name: "forbid create of normal pod status bound to self", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coremypod, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "status", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coremypod, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow update of normal pod status bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coremypod, coremypod, podKind, coremypod.Namespace, coremypod.Name, podResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coremypod, coremypod, podKind, coremypod.Namespace, coremypod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "forbid delete of normal pod status bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "status", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coremypod.Namespace, coremypod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid update of eviction for normal pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for normal pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "allow create of unnamed eviction for normal pod bound to self", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, @@ -616,61 +616,61 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid creating a normal pod bound to another", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreotherpod, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreotherpod, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "can only create mirror pods", }, { name: "forbid update of normal pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreotherpod, coreotherpod, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreotherpod, coreotherpod, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of normal pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "", admission.Delete, &metav1.UpdateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid create of normal pod status bound to another", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreotherpod, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "status", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreotherpod, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid update of normal pod status bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreotherpod, coreotherpod, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreotherpod, coreotherpod, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid delete of normal pod status bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "status", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of eviction for normal pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid update of eviction for normal pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for normal pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Delete, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of unnamed eviction for normal pod bound to another", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreotherpod.Namespace, coreotherpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, @@ -678,61 +678,61 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid creating a normal pod unbound", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "can only create mirror pods", }, { name: "forbid update of normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid create of normal pod status unbound", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid update of normal pod status unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid delete of normal pod status unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of eviction for normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, { name: "forbid update of eviction for normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of unnamed eviction for normal unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "spec.nodeName set to itself", }, @@ -740,31 +740,31 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid delete of unknown pod", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "not found", }, { name: "forbid create of eviction for unknown pod", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "not found", }, { name: "forbid update of eviction for unknown pod", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for unknown pod", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of unnamed eviction for unknown pod", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coremypod.Namespace, coremypod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "not found", }, @@ -772,26 +772,26 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "allow create of eviction for unnamed pod", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), // use the submitted eviction resource name as the pod name err: "", }, { name: "forbid update of eviction for unnamed pod", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid delete of eviction for unnamed pod", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "forbidden: unexpected operation", }, { name: "forbid create of unnamed eviction for unnamed pod", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, coreunnamedpod.Namespace, coreunnamedpod.Name, podResource, "eviction", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "could not determine pod from request data", }, @@ -799,25 +799,25 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid create of pod referencing service account", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(sapod, nil, podKind, sapod.Namespace, sapod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(sapod, nil, podKind, sapod.Namespace, sapod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "reference a service account", }, { name: "forbid create of pod referencing secret", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(secretpod, nil, podKind, secretpod.Namespace, secretpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(secretpod, nil, podKind, secretpod.Namespace, secretpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "reference secrets", }, { name: "forbid create of pod referencing configmap", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(configmappod, nil, podKind, configmappod.Namespace, configmappod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(configmappod, nil, podKind, configmappod.Namespace, configmappod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "reference configmaps", }, { name: "forbid create of pod referencing persistentvolumeclaim", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(pvcpod, nil, podKind, pvcpod.Namespace, pvcpod.Name, podResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(pvcpod, nil, podKind, pvcpod.Namespace, pvcpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "reference persistentvolumeclaims", }, @@ -825,157 +825,157 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "allow create of my node", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mynodeObj, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "allow create of my node pulling name from object", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mynodeObj, nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "allow create of my node with taints", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mynodeObjTaintA, nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjTaintA, nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "allow create of my node with labels", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(setAllowedCreateLabels(mynodeObj, ""), nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(setAllowedCreateLabels(mynodeObj, ""), nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "forbid create of my node with forbidden labels", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(setForbiddenCreateLabels(mynodeObj, ""), nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(setForbiddenCreateLabels(mynodeObj, ""), nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: `is not allowed to set the following labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo`, }, { name: "allow update of my node", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObj, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow delete of my node", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "", }, { name: "allow update of my node status", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObj, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "forbid create of my node with non-nil configSource", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(mynodeObjConfigA, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjConfigA, nil, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "is not allowed to create pods with a non-nil configSource", }, { name: "forbid update of my node: nil configSource to new non-nil configSource", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObjConfigA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjConfigA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "update configSource to a new non-nil configSource", }, { name: "forbid update of my node: non-nil configSource to new non-nil configSource", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObjConfigB, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjConfigB, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "update configSource to a new non-nil configSource", }, { name: "allow update of my node: non-nil configSource unchanged", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObjConfigA, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjConfigA, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: non-nil configSource to nil configSource", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: no change to taints", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: add allowed labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setAllowedUpdateLabels(mynodeObj, ""), mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setAllowedUpdateLabels(mynodeObj, ""), mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: remove allowed labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObj, setAllowedUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, setAllowedUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: modify allowed labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setAllowedUpdateLabels(mynodeObj, "b"), setAllowedUpdateLabels(mynodeObj, "a"), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setAllowedUpdateLabels(mynodeObj, "b"), setAllowedUpdateLabels(mynodeObj, "a"), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: no change to labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setAllLabels(mynodeObj, ""), setAllLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setAllLabels(mynodeObj, ""), setAllLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: add allowed labels while forbidden labels exist unmodified", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setAllLabels(mynodeObj, ""), setForbiddenUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setAllLabels(mynodeObj, ""), setForbiddenUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow update of my node: remove allowed labels while forbidden labels exist unmodified", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, ""), setAllLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, ""), setAllLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "forbid update of my node: add taints", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "is not allowed to modify taints", }, { name: "forbid update of my node: remove taints", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "is not allowed to modify taints", }, { name: "forbid update of my node: change taints", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintB, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintB, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "is not allowed to modify taints", }, { name: "forbid update of my node: add labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, ""), mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, ""), mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, { name: "forbid update of my node: remove labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(mynodeObj, setForbiddenUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(mynodeObj, setForbiddenUpdateLabels(mynodeObj, ""), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, { name: "forbid update of my node: change labels", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, "new"), setForbiddenUpdateLabels(mynodeObj, "old"), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, "new"), setForbiddenUpdateLabels(mynodeObj, "old"), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, @@ -983,31 +983,31 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "forbid create of other node", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(othernodeObj, nil, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(othernodeObj, nil, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "is not allowed to modify node", }, { name: "forbid create of other node pulling name from object", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(othernodeObj, nil, nodeKind, othernodeObj.Namespace, "", nodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(othernodeObj, nil, nodeKind, othernodeObj.Namespace, "", nodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "is not allowed to modify node", }, { name: "forbid update of other node", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(othernodeObj, othernodeObj, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(othernodeObj, othernodeObj, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "is not allowed to modify node", }, { name: "forbid delete of other node", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "is not allowed to modify node", }, { name: "forbid update of other node status", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(othernodeObj, othernodeObj, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "status", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(othernodeObj, othernodeObj, nodeKind, othernodeObj.Namespace, othernodeObj.Name, nodeResource, "status", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "is not allowed to modify node", }, @@ -1016,54 +1016,54 @@ func Test_nodePlugin_Admit(t *testing.T) { name: "forbid create of unbound token", podsGetter: noExistingPods, features: trEnabledFeature, - attributes: admission.NewAttributesRecord(makeTokenRequest("", ""), nil, tokenrequestKind, "ns", "mysa", svcacctResource, "token", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(makeTokenRequest("", ""), nil, tokenrequestKind, "ns", "mysa", svcacctResource, "token", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "not bound to a pod", }, { name: "forbid create of token bound to nonexistant pod", podsGetter: noExistingPods, features: trEnabledFeature, - attributes: admission.NewAttributesRecord(makeTokenRequest("nopod", "someuid"), nil, tokenrequestKind, "ns", "mysa", svcacctResource, "token", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(makeTokenRequest("nopod", "someuid"), nil, tokenrequestKind, "ns", "mysa", svcacctResource, "token", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "not found", }, { name: "forbid create of token bound to pod without uid", podsGetter: existingPods, features: trEnabledFeature, - attributes: admission.NewAttributesRecord(makeTokenRequest(coremypod.Name, ""), nil, tokenrequestKind, "ns", "mysa", svcacctResource, "token", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(makeTokenRequest(coremypod.Name, ""), nil, tokenrequestKind, "ns", "mysa", svcacctResource, "token", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "pod binding without a uid", }, { name: "forbid create of token bound to pod scheduled on another node", podsGetter: existingPods, features: trEnabledFeature, - attributes: admission.NewAttributesRecord(makeTokenRequest(coreotherpod.Name, coreotherpod.UID), nil, tokenrequestKind, coreotherpod.Namespace, "mysa", svcacctResource, "token", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(makeTokenRequest(coreotherpod.Name, coreotherpod.UID), nil, tokenrequestKind, coreotherpod.Namespace, "mysa", svcacctResource, "token", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "pod scheduled on a different node", }, { name: "allow create of token bound to pod scheduled this node", podsGetter: existingPods, features: trEnabledFeature, - attributes: admission.NewAttributesRecord(makeTokenRequest(coremypod.Name, coremypod.UID), nil, tokenrequestKind, coremypod.Namespace, "mysa", svcacctResource, "token", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(makeTokenRequest(coremypod.Name, coremypod.UID), nil, tokenrequestKind, coremypod.Namespace, "mysa", svcacctResource, "token", admission.Create, &metav1.CreateOptions{}, false, mynode), }, // Unrelated objects { name: "allow create of unrelated object", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(&api.ConfigMap{}, nil, configmapKind, "myns", "mycm", configmapResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(&api.ConfigMap{}, nil, configmapKind, "myns", "mycm", configmapResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), err: "", }, { name: "allow update of unrelated object", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(&api.ConfigMap{}, &api.ConfigMap{}, configmapKind, "myns", "mycm", configmapResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(&api.ConfigMap{}, &api.ConfigMap{}, configmapKind, "myns", "mycm", configmapResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: "", }, { name: "allow delete of unrelated object", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, configmapKind, "myns", "mycm", configmapResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, configmapKind, "myns", "mycm", configmapResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), err: "", }, @@ -1071,140 +1071,140 @@ func Test_nodePlugin_Admit(t *testing.T) { { name: "allow unrelated user creating a normal pod unbound", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Create, false, bob), + attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, bob), err: "", }, { name: "allow unrelated user update of normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Update, false, bob), + attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Update, &metav1.UpdateOptions{}, false, bob), err: "", }, { name: "allow unrelated user delete of normal pod unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Delete, false, bob), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "", admission.Delete, &metav1.DeleteOptions{}, false, bob), err: "", }, { name: "allow unrelated user create of normal pod status unbound", podsGetter: noExistingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Create, false, bob), + attributes: admission.NewAttributesRecord(coreunboundpod, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Create, &metav1.CreateOptions{}, false, bob), err: "", }, { name: "allow unrelated user update of normal pod status unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Update, false, bob), + attributes: admission.NewAttributesRecord(coreunboundpod, coreunboundpod, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Update, &metav1.UpdateOptions{}, false, bob), err: "", }, { name: "allow unrelated user delete of normal pod status unbound", podsGetter: existingPods, - attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Delete, false, bob), + attributes: admission.NewAttributesRecord(nil, nil, podKind, coreunboundpod.Namespace, coreunboundpod.Name, podResource, "status", admission.Delete, &metav1.DeleteOptions{}, false, bob), err: "", }, // Node leases { name: "disallowed create lease - feature disabled", - attributes: admission.NewAttributesRecord(lease, nil, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(lease, nil, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Create, &metav1.DeleteOptions{}, false, mynode), features: leaseDisabledFeature, err: "forbidden: disabled by feature gate NodeLease", }, { name: "disallowed create lease in namespace other than kube-node-lease - feature enabled", - attributes: admission.NewAttributesRecord(leaseWrongNS, nil, leaseKind, leaseWrongNS.Namespace, leaseWrongNS.Name, leaseResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(leaseWrongNS, nil, leaseKind, leaseWrongNS.Namespace, leaseWrongNS.Name, leaseResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), features: leaseEnabledFeature, err: "forbidden: ", }, { name: "disallowed update lease in namespace other than kube-node-lease - feature enabled", - attributes: admission.NewAttributesRecord(leaseWrongNS, leaseWrongNS, leaseKind, leaseWrongNS.Namespace, leaseWrongNS.Name, leaseResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(leaseWrongNS, leaseWrongNS, leaseKind, leaseWrongNS.Namespace, leaseWrongNS.Name, leaseResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), features: leaseEnabledFeature, err: "forbidden: ", }, { name: "disallowed delete lease in namespace other than kube-node-lease - feature enabled", - attributes: admission.NewAttributesRecord(nil, nil, leaseKind, leaseWrongNS.Namespace, leaseWrongNS.Name, leaseResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, leaseKind, leaseWrongNS.Namespace, leaseWrongNS.Name, leaseResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), features: leaseEnabledFeature, err: "forbidden: ", }, { name: "disallowed create another node's lease - feature enabled", - attributes: admission.NewAttributesRecord(leaseWrongName, nil, leaseKind, leaseWrongName.Namespace, leaseWrongName.Name, leaseResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(leaseWrongName, nil, leaseKind, leaseWrongName.Namespace, leaseWrongName.Name, leaseResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), features: leaseEnabledFeature, err: "forbidden: ", }, { name: "disallowed update another node's lease - feature enabled", - attributes: admission.NewAttributesRecord(leaseWrongName, leaseWrongName, leaseKind, leaseWrongName.Namespace, leaseWrongName.Name, leaseResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(leaseWrongName, leaseWrongName, leaseKind, leaseWrongName.Namespace, leaseWrongName.Name, leaseResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), features: leaseEnabledFeature, err: "forbidden: ", }, { name: "disallowed delete another node's lease - feature enabled", - attributes: admission.NewAttributesRecord(nil, nil, leaseKind, leaseWrongName.Namespace, leaseWrongName.Name, leaseResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, leaseKind, leaseWrongName.Namespace, leaseWrongName.Name, leaseResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), features: leaseEnabledFeature, err: "forbidden: ", }, { name: "allowed create node lease - feature enabled", - attributes: admission.NewAttributesRecord(lease, nil, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(lease, nil, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), features: leaseEnabledFeature, err: "", }, { name: "allowed update node lease - feature enabled", - attributes: admission.NewAttributesRecord(lease, lease, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(lease, lease, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), features: leaseEnabledFeature, err: "", }, { name: "allowed delete node lease - feature enabled", - attributes: admission.NewAttributesRecord(nil, nil, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, leaseKind, lease.Namespace, lease.Name, leaseResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), features: leaseEnabledFeature, err: "", }, // CSINode { name: "disallowed create CSINode - feature disabled", - attributes: admission.NewAttributesRecord(nodeInfo, nil, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(nodeInfo, nil, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), features: csiNodeInfoDisabledFeature, err: fmt.Sprintf("forbidden: disabled by feature gates %s and %s", features.KubeletPluginsWatcher, features.CSINodeInfo), }, { name: "disallowed create another node's CSINode - feature enabled", - attributes: admission.NewAttributesRecord(nodeInfoWrongName, nil, csiNodeKind, nodeInfoWrongName.Namespace, nodeInfoWrongName.Name, csiNodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(nodeInfoWrongName, nil, csiNodeKind, nodeInfoWrongName.Namespace, nodeInfoWrongName.Name, csiNodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), features: csiNodeInfoEnabledFeature, err: "forbidden: ", }, { name: "disallowed update another node's CSINode - feature enabled", - attributes: admission.NewAttributesRecord(nodeInfoWrongName, nodeInfoWrongName, csiNodeKind, nodeInfoWrongName.Namespace, nodeInfoWrongName.Name, csiNodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(nodeInfoWrongName, nodeInfoWrongName, csiNodeKind, nodeInfoWrongName.Namespace, nodeInfoWrongName.Name, csiNodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), features: csiNodeInfoEnabledFeature, err: "forbidden: ", }, { name: "disallowed delete another node's CSINode - feature enabled", - attributes: admission.NewAttributesRecord(nil, nil, csiNodeKind, nodeInfoWrongName.Namespace, nodeInfoWrongName.Name, csiNodeResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, csiNodeKind, nodeInfoWrongName.Namespace, nodeInfoWrongName.Name, csiNodeResource, "", admission.Delete, &metav1.DeleteOptions{}, false, mynode), features: csiNodeInfoEnabledFeature, err: "forbidden: ", }, { name: "allowed create node CSINode - feature enabled", - attributes: admission.NewAttributesRecord(nodeInfo, nil, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Create, false, mynode), + attributes: admission.NewAttributesRecord(nodeInfo, nil, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Create, &metav1.CreateOptions{}, false, mynode), features: csiNodeInfoEnabledFeature, err: "", }, { name: "allowed update node CSINode - feature enabled", - attributes: admission.NewAttributesRecord(nodeInfo, nodeInfo, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Update, false, mynode), + attributes: admission.NewAttributesRecord(nodeInfo, nodeInfo, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), features: csiNodeInfoEnabledFeature, err: "", }, { name: "allowed delete node CSINode - feature enabled", - attributes: admission.NewAttributesRecord(nil, nil, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Delete, false, mynode), + attributes: admission.NewAttributesRecord(nil, nil, csiNodeKind, nodeInfo.Namespace, nodeInfo.Name, csiNodeResource, "", admission.Delete, &metav1.UpdateOptions{}, false, mynode), features: csiNodeInfoEnabledFeature, err: "", }, diff --git a/plugin/pkg/admission/nodetaint/BUILD b/plugin/pkg/admission/nodetaint/BUILD index 03a745fb27e..c01864b455d 100644 --- a/plugin/pkg/admission/nodetaint/BUILD +++ b/plugin/pkg/admission/nodetaint/BUILD @@ -22,6 +22,7 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/component-base/featuregate:go_default_library", diff --git a/plugin/pkg/admission/nodetaint/admission_test.go b/plugin/pkg/admission/nodetaint/admission_test.go index 6dad6f0dff9..6dd56c506d6 100644 --- a/plugin/pkg/admission/nodetaint/admission_test.go +++ b/plugin/pkg/admission/nodetaint/admission_test.go @@ -21,6 +21,7 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/component-base/featuregate" @@ -62,6 +63,7 @@ func Test_nodeTaints(t *testing.T) { oldNode api.Node features featuregate.FeatureGate operation admission.Operation + options runtime.Object expectedTaints []api.Taint }{ { @@ -69,6 +71,7 @@ func Test_nodeTaints(t *testing.T) { node: myNodeObj, features: enableTaintNodesByCondition, operation: admission.Create, + options: &metav1.CreateOptions{}, expectedTaints: []api.Taint{notReadyTaint}, }, { @@ -76,6 +79,7 @@ func Test_nodeTaints(t *testing.T) { node: myNodeObj, features: disableTaintNodesByCondition, operation: admission.Create, + options: &metav1.CreateOptions{}, expectedTaints: nil, }, { @@ -83,6 +87,7 @@ func Test_nodeTaints(t *testing.T) { node: myTaintedNodeObj, features: enableTaintNodesByCondition, operation: admission.Create, + options: &metav1.CreateOptions{}, expectedTaints: []api.Taint{notReadyTaint}, }, { @@ -90,12 +95,13 @@ func Test_nodeTaints(t *testing.T) { node: myUnreadyNodeObj, features: enableTaintNodesByCondition, operation: admission.Create, + options: &metav1.CreateOptions{}, expectedTaints: []api.Taint{notReadyTaint}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - attributes := admission.NewAttributesRecord(&tt.node, &tt.oldNode, nodeKind, myNodeObj.Namespace, myNodeObj.Name, resource, "", tt.operation, false, mynode) + attributes := admission.NewAttributesRecord(&tt.node, &tt.oldNode, nodeKind, myNodeObj.Namespace, myNodeObj.Name, resource, "", tt.operation, tt.options, false, mynode) c := NewPlugin() if tt.features != nil { c.features = tt.features diff --git a/plugin/pkg/admission/podnodeselector/admission_test.go b/plugin/pkg/admission/podnodeselector/admission_test.go index 73f1da09834..b7596ed7645 100644 --- a/plugin/pkg/admission/podnodeselector/admission_test.go +++ b/plugin/pkg/admission/podnodeselector/admission_test.go @@ -161,7 +161,7 @@ func TestPodAdmission(t *testing.T) { handler.clusterNodeSelectors[namespace.Name] = test.whitelist pod.Spec = api.PodSpec{NodeSelector: test.podNodeSelector} - err := handler.Admit(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Admit(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if test.admit && err != nil { t.Errorf("Test: %s, expected no error but got: %s", test.testName, err) } else if !test.admit && err == nil { @@ -170,7 +170,7 @@ func TestPodAdmission(t *testing.T) { if test.admit && !labels.Equals(test.mergedNodeSelector, labels.Set(pod.Spec.NodeSelector)) { t.Errorf("Test: %s, expected: %s but got: %s", test.testName, test.mergedNodeSelector, pod.Spec.NodeSelector) } - err = handler.Validate(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if test.admit && err != nil { t.Errorf("Test: %s, expected no error but got: %s", test.testName, err) } else if !test.admit && err == nil { diff --git a/plugin/pkg/admission/podpreset/admission_test.go b/plugin/pkg/admission/podpreset/admission_test.go index 3dd8692fd99..babf068ce70 100644 --- a/plugin/pkg/admission/podpreset/admission_test.go +++ b/plugin/pkg/admission/podpreset/admission_test.go @@ -819,6 +819,7 @@ func admitPod(pod *api.Pod, pip *settingsv1alpha1.PodPreset) error { api.Resource("pods").WithVersion("version"), "", kadmission.Create, + &metav1.CreateOptions{}, false, &user.DefaultInfo{}, ) diff --git a/plugin/pkg/admission/podtolerationrestriction/admission_test.go b/plugin/pkg/admission/podtolerationrestriction/admission_test.go index 6cc63c98a32..9bfde7184a9 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission_test.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission_test.go @@ -266,7 +266,7 @@ func TestPodAdmission(t *testing.T) { pod := test.pod pod.Spec.Tolerations = test.podTolerations - err = handler.Admit(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if test.admit && err != nil { t.Errorf("Test: %s, expected no error but got: %s", test.testName, err) } else if !test.admit && err == nil { @@ -343,7 +343,7 @@ func TestIgnoreUpdatingInitializedPod(t *testing.T) { } // if the update of initialized pod is not ignored, an error will be returned because the pod's Tolerations conflicts with namespace's Tolerations. - err = handler.Admit(admission.NewAttributesRecord(pod, pod, api.Kind("Pod").WithVersion("version"), "testNamespace", pod.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(pod, pod, api.Kind("Pod").WithVersion("version"), "testNamespace", pod.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("expected no error, got: %v", err) } diff --git a/plugin/pkg/admission/priority/admission_test.go b/plugin/pkg/admission/priority/admission_test.go index e81ec4ea029..81bf5774524 100644 --- a/plugin/pkg/admission/priority/admission_test.go +++ b/plugin/pkg/admission/priority/admission_test.go @@ -30,7 +30,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" - "k8s.io/kubernetes/pkg/apis/scheduling/v1" + v1 "k8s.io/kubernetes/pkg/apis/scheduling/v1" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/features" ) @@ -155,6 +155,7 @@ func TestPriorityClassAdmission(t *testing.T) { scheduling.Resource("priorityclasses").WithVersion("version"), "", admission.Create, + &metav1.CreateOptions{}, false, test.userInfo, ) @@ -200,7 +201,7 @@ func TestDefaultPriority(t *testing.T) { name: "add a default class", classesBefore: []*scheduling.PriorityClass{nondefaultClass1}, classesAfter: []*scheduling.PriorityClass{nondefaultClass1, defaultClass1}, - attributes: admission.NewAttributesRecord(defaultClass1, nil, pcKind, "", defaultClass1.Name, pcResource, "", admission.Create, false, nil), + attributes: admission.NewAttributesRecord(defaultClass1, nil, pcKind, "", defaultClass1.Name, pcResource, "", admission.Create, &metav1.CreateOptions{}, false, nil), expectedDefaultBefore: scheduling.DefaultPriorityWhenNoDefaultClassExists, expectedDefaultNameBefore: "", expectedDefaultAfter: defaultClass1.Value, @@ -210,7 +211,7 @@ func TestDefaultPriority(t *testing.T) { name: "multiple default classes resolves to the minimum value among them", classesBefore: []*scheduling.PriorityClass{defaultClass1, defaultClass2}, classesAfter: []*scheduling.PriorityClass{defaultClass2}, - attributes: admission.NewAttributesRecord(nil, nil, pcKind, "", defaultClass1.Name, pcResource, "", admission.Delete, false, nil), + attributes: admission.NewAttributesRecord(nil, nil, pcKind, "", defaultClass1.Name, pcResource, "", admission.Delete, &metav1.DeleteOptions{}, false, nil), expectedDefaultBefore: defaultClass1.Value, expectedDefaultNameBefore: defaultClass1.Name, expectedDefaultAfter: defaultClass2.Value, @@ -220,7 +221,7 @@ func TestDefaultPriority(t *testing.T) { name: "delete default priority class", classesBefore: []*scheduling.PriorityClass{defaultClass1}, classesAfter: []*scheduling.PriorityClass{}, - attributes: admission.NewAttributesRecord(nil, nil, pcKind, "", defaultClass1.Name, pcResource, "", admission.Delete, false, nil), + attributes: admission.NewAttributesRecord(nil, nil, pcKind, "", defaultClass1.Name, pcResource, "", admission.Delete, &metav1.DeleteOptions{}, false, nil), expectedDefaultBefore: defaultClass1.Value, expectedDefaultNameBefore: defaultClass1.Name, expectedDefaultAfter: scheduling.DefaultPriorityWhenNoDefaultClassExists, @@ -230,7 +231,7 @@ func TestDefaultPriority(t *testing.T) { name: "update default class and remove its global default", classesBefore: []*scheduling.PriorityClass{defaultClass1}, classesAfter: []*scheduling.PriorityClass{&updatedDefaultClass1}, - attributes: admission.NewAttributesRecord(&updatedDefaultClass1, defaultClass1, pcKind, "", defaultClass1.Name, pcResource, "", admission.Update, false, nil), + attributes: admission.NewAttributesRecord(&updatedDefaultClass1, defaultClass1, pcKind, "", defaultClass1.Name, pcResource, "", admission.Update, &metav1.UpdateOptions{}, false, nil), expectedDefaultBefore: defaultClass1.Value, expectedDefaultNameBefore: defaultClass1.Name, expectedDefaultAfter: scheduling.DefaultPriorityWhenNoDefaultClassExists, @@ -600,6 +601,7 @@ func TestPodAdmission(t *testing.T) { api.Resource("pods").WithVersion("version"), "", admission.Create, + &metav1.CreateOptions{}, false, nil, ) diff --git a/plugin/pkg/admission/resourcequota/admission_test.go b/plugin/pkg/admission/resourcequota/admission_test.go index 8b6e45203ab..57e6aa948fe 100644 --- a/plugin/pkg/admission/resourcequota/admission_test.go +++ b/plugin/pkg/admission/resourcequota/admission_test.go @@ -153,7 +153,7 @@ func TestAdmissionIgnoresDelete(t *testing.T) { evaluator: evaluator, } namespace := "default" - err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", corev1.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", corev1.Resource("pods").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil) if err != nil { t.Errorf("ResourceQuota should admit all deletes: %v", err) } @@ -190,11 +190,11 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { } informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "subresource", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "subresource", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } @@ -235,7 +235,7 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { } informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -318,13 +318,13 @@ func TestAdmitDryRun(t *testing.T) { informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, true, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, true, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } newPod = validPod("too-large-pod", 1, getResourceRequirements(getResourceList("100m", "60Gi"), getResourceList("", ""))) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, true, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, true, nil), nil) if err == nil { t.Errorf("Expected error but got none") } @@ -384,7 +384,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { Ports: []api.ServicePort{{Port: 1234}}, }, } - err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -485,7 +485,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { }, } - err := handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -544,7 +544,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { }, } - err := handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -641,7 +641,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { Ports: []api.ServicePort{{Port: 1234}}, }, } - err := handler.Validate(admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -724,7 +724,7 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { } informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error exceeding quota") } @@ -770,7 +770,7 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // verify all values are specified as required on the quota newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error because the pod does not specify a memory limit") } @@ -821,7 +821,7 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) // Add to the lru cache so we do not do a live client lookup liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*corev1.ResourceQuota{}}) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Did not expect an error because the pod is in a different namespace than the quota") } @@ -890,7 +890,7 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) activeDeadlineSeconds := int64(30) newPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -994,7 +994,7 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { // create a pod that is best effort because it does not make a request for anything newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1084,7 +1084,7 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { } informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1193,7 +1193,7 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { // unset the namespace newPod.ObjectMeta.Namespace = "" - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Got unexpected error: %v", err) } @@ -1236,14 +1236,14 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // verify quota rejects negative pvc storage requests newPvc := validPersistentVolumeClaim("not-allowed-pvc", getResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("-1Gi")}, api.ResourceList{})) - err := handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error because the pvc has negative storage usage") } // verify quota accepts non-negative pvc storage requests newPvc = validPersistentVolumeClaim("not-allowed-pvc", getResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("1Gi")}, api.ResourceList{})) - err = handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1284,7 +1284,7 @@ func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { // create a pod that should pass existing quota newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1318,7 +1318,7 @@ func TestAdmitLimitedResourceNoQuota(t *testing.T) { evaluator: evaluator, } newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error for consuming a limited resource without quota.") } @@ -1352,7 +1352,7 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { evaluator: evaluator, } newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -1400,7 +1400,7 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { } indexer.Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1460,7 +1460,7 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { indexer.Add(resourceQuota1) indexer.Add(resourceQuota2) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1508,7 +1508,7 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { } indexer.Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Fatalf("Expected an error since the quota did not cover cpu") } @@ -2169,7 +2169,7 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { if testCase.anotherQuota != nil { indexer.Add(testCase.anotherQuota) } - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if testCase.expErr == "" { if err != nil { t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr) @@ -2221,7 +2221,7 @@ func TestAdmitZeroDeltaUsageWithoutCoveringQuota(t *testing.T) { Spec: api.ServiceSpec{Type: api.ServiceTypeLoadBalancer}, } - err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -2267,7 +2267,7 @@ func TestAdmitRejectIncreaseUsageWithoutCoveringQuota(t *testing.T) { Spec: api.ServiceSpec{Type: api.ServiceTypeLoadBalancer}, } - err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error for consuming a limited resource without quota.") } @@ -2313,7 +2313,7 @@ func TestAdmitAllowDecreaseUsageWithoutCoveringQuota(t *testing.T) { }, } - err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Expected no error for decreasing a limited resource without quota, got %v", err) } diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go index 6712ab1ee1c..9936bda0612 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -473,7 +473,7 @@ func TestAdmitPreferNonmutating(t *testing.T) { func TestFailClosedOnInvalidPod(t *testing.T) { plugin := NewTestAdmission(nil, nil) pod := &v1.Pod{} - attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, false, &user.DefaultInfo{}) + attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) err := plugin.Admit(attrs, nil) if err == nil { @@ -1776,7 +1776,7 @@ func testPSPAdmitAdvanced(testCaseName string, op kadmission.Operation, psps []* originalPod := pod.DeepCopy() plugin := NewTestAdmission(psps, authz) - attrs := kadmission.NewAttributesRecord(pod, oldPod, kapi.Kind("Pod").WithVersion("version"), pod.Namespace, "", kapi.Resource("pods").WithVersion("version"), "", op, false, userInfo) + attrs := kadmission.NewAttributesRecord(pod, oldPod, kapi.Kind("Pod").WithVersion("version"), pod.Namespace, "", kapi.Resource("pods").WithVersion("version"), "", op, nil, false, userInfo) annotations := make(map[string]string) attrs = &fakeAttributes{attrs, annotations} err := plugin.Admit(attrs, nil) @@ -2240,7 +2240,7 @@ func TestPolicyAuthorizationErrors(t *testing.T) { pod.Spec.SecurityContext.HostPID = true plugin := NewTestAdmission(tc.inPolicies, authz) - attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), ns, "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, false, &user.DefaultInfo{Name: userName}) + attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), ns, "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{Name: userName}) allowedPod, _, validationErrs, err := plugin.computeSecurityContext(attrs, pod, true, "") assert.Nil(t, allowedPod) @@ -2333,7 +2333,7 @@ func TestPreferValidatedPSP(t *testing.T) { pod.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = &allowPrivilegeEscalation plugin := NewTestAdmission(tc.inPolicies, authz) - attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "ns", "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Update, false, &user.DefaultInfo{Name: "test"}) + attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "ns", "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Update, &metav1.UpdateOptions{}, false, &user.DefaultInfo{Name: "test"}) _, pspName, validationErrs, err := plugin.computeSecurityContext(attrs, pod, false, tc.validatedPSPHint) assert.NoError(t, err) diff --git a/plugin/pkg/admission/securitycontext/scdeny/admission_test.go b/plugin/pkg/admission/securitycontext/scdeny/admission_test.go index 65d7ad1d169..0111341d00b 100644 --- a/plugin/pkg/admission/securitycontext/scdeny/admission_test.go +++ b/plugin/pkg/admission/securitycontext/scdeny/admission_test.go @@ -82,7 +82,7 @@ func TestAdmission(t *testing.T) { p.Spec.SecurityContext = tc.podSc p.Spec.Containers[0].SecurityContext = tc.sc - err := handler.Validate(admission.NewAttributesRecord(p, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(p, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", nil, false, nil), nil) if err != nil && !tc.expectError { t.Errorf("%v: unexpected error: %v", tc.name, err) } else if err == nil && tc.expectError { @@ -96,7 +96,7 @@ func TestAdmission(t *testing.T) { p.Spec.InitContainers = p.Spec.Containers p.Spec.Containers = nil - err = handler.Validate(admission.NewAttributesRecord(p, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", false, nil), nil) + err = handler.Validate(admission.NewAttributesRecord(p, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", nil, false, nil), nil) if err != nil && !tc.expectError { t.Errorf("%v: unexpected error: %v", tc.name, err) } else if err == nil && tc.expectError { @@ -140,7 +140,7 @@ func TestPodSecurityContextAdmission(t *testing.T) { } for _, test := range tests { pod.Spec.SecurityContext = &test.securityContext - err := handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", false, nil), nil) + err := handler.Validate(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), "foo", "name", api.Resource("pods").WithVersion("version"), "", "ignored", nil, false, nil), nil) if test.errorExpected && err == nil { t.Errorf("Expected error for security context %+v but did not get an error", test.securityContext) diff --git a/plugin/pkg/admission/serviceaccount/admission_test.go b/plugin/pkg/admission/serviceaccount/admission_test.go index 32c0c020728..712ceb60b6b 100644 --- a/plugin/pkg/admission/serviceaccount/admission_test.go +++ b/plugin/pkg/admission/serviceaccount/admission_test.go @@ -64,7 +64,7 @@ func TestIgnoresNonCreate(t *testing.T) { func TestIgnoresNonPodResource(t *testing.T) { pod := &api.Pod{} - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("CustomResource").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("CustomResource").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err != nil { t.Errorf("Expected non-pod resource allowed, got err: %v", err) @@ -72,7 +72,7 @@ func TestIgnoresNonPodResource(t *testing.T) { } func TestIgnoresNilObject(t *testing.T) { - attrs := admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err != nil { t.Errorf("Expected nil object allowed allowed, got err: %v", err) @@ -81,7 +81,7 @@ func TestIgnoresNilObject(t *testing.T) { func TestIgnoresNonPodObject(t *testing.T) { obj := &api.Namespace{} - attrs := admission.NewAttributesRecord(obj, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(obj, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err != nil { t.Errorf("Expected non pod object allowed, got err: %v", err) @@ -101,7 +101,7 @@ func TestIgnoresMirrorPod(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err != nil { t.Errorf("Expected mirror pod without service account or secrets allowed, got err: %v", err) @@ -119,7 +119,7 @@ func TestRejectsMirrorPodWithServiceAccount(t *testing.T) { ServiceAccountName: "default", }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err == nil { t.Errorf("Expected a mirror pod to be prevented from referencing a service account") @@ -139,7 +139,7 @@ func TestRejectsMirrorPodWithSecretVolumes(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err == nil { t.Errorf("Expected a mirror pod to be prevented from referencing a secret volume") @@ -164,7 +164,7 @@ func TestRejectsMirrorPodWithServiceAccountTokenVolumeProjections(t *testing.T) }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := NewServiceAccount().Admit(attrs, nil) if err == nil { t.Errorf("Expected a mirror pod to be prevented from referencing a ServiceAccountToken volume projection") @@ -189,7 +189,7 @@ func TestAssignsDefaultServiceAccountAndToleratesMissingAPIToken(t *testing.T) { }) pod := &api.Pod{} - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -217,7 +217,7 @@ func TestAssignsDefaultServiceAccountAndRejectsMissingAPIToken(t *testing.T) { }) pod := &api.Pod{} - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err == nil || !errors.IsServerTimeout(err) { t.Errorf("Expected server timeout error for missing API token: %v", err) @@ -242,7 +242,7 @@ func TestFetchesUncachedServiceAccount(t *testing.T) { admit.RequireAPIToken = false pod := &api.Pod{} - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -264,7 +264,7 @@ func TestDeniesInvalidServiceAccount(t *testing.T) { admit.SetExternalKubeInformerFactory(informerFactory) pod := &api.Pod{} - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err == nil { t.Errorf("Expected error for missing service account, got none") @@ -330,7 +330,7 @@ func TestAutomountsAPIToken(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -359,7 +359,7 @@ func TestAutomountsAPIToken(t *testing.T) { }, }, } - attrs = admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs = admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err != nil { t.Errorf("Unexpected error: %v", err) } @@ -441,7 +441,7 @@ func TestRespectsExistingMount(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -471,7 +471,7 @@ func TestRespectsExistingMount(t *testing.T) { }, }, } - attrs = admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs = admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err != nil { t.Errorf("Unexpected error: %v", err) } @@ -517,7 +517,7 @@ func TestAllowsReferencedSecret(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod1, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod1, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err != nil { t.Errorf("Unexpected error: %v", err) } @@ -541,7 +541,7 @@ func TestAllowsReferencedSecret(t *testing.T) { }, }, } - attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err != nil { t.Errorf("Unexpected error: %v", err) } @@ -565,7 +565,7 @@ func TestAllowsReferencedSecret(t *testing.T) { }, }, } - attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err != nil { t.Errorf("Unexpected error: %v", err) } @@ -595,7 +595,7 @@ func TestRejectsUnreferencedSecretVolumes(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod1, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod1, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err == nil { t.Errorf("Expected rejection for using a secret the service account does not reference") } @@ -619,7 +619,7 @@ func TestRejectsUnreferencedSecretVolumes(t *testing.T) { }, }, } - attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") { t.Errorf("Unexpected error: %v", err) } @@ -643,7 +643,7 @@ func TestRejectsUnreferencedSecretVolumes(t *testing.T) { }, }, } - attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") { t.Errorf("Unexpected error: %v", err) } @@ -674,7 +674,7 @@ func TestAllowUnreferencedSecretVolumesForPermissiveSAs(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err == nil { t.Errorf("Expected rejection for using a secret the service account does not reference") @@ -706,7 +706,7 @@ func TestAllowsReferencedImagePullSecrets(t *testing.T) { ImagePullSecrets: []api.LocalObjectReference{{Name: "foo"}}, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -735,7 +735,7 @@ func TestRejectsUnreferencedImagePullSecrets(t *testing.T) { ImagePullSecrets: []api.LocalObjectReference{{Name: "foo"}}, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err == nil { t.Errorf("Expected rejection for using a secret the service account does not reference") @@ -768,7 +768,7 @@ func TestDoNotAddImagePullSecrets(t *testing.T) { ImagePullSecrets: []api.LocalObjectReference{{Name: "foo"}}, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -802,7 +802,7 @@ func TestAddImagePullSecrets(t *testing.T) { informerFactory.Core().V1().ServiceAccounts().Informer().GetStore().Add(sa) pod := &api.Pod{} - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -883,7 +883,7 @@ func TestMultipleReferencedSecrets(t *testing.T) { }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) if err := admit.Admit(attrs, nil); err != nil { t.Fatal(err) } @@ -1037,7 +1037,7 @@ func TestAutomountIsBackwardsCompatible(t *testing.T) { }, }, } - attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) + attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) err := admit.Admit(attrs, nil) if err != nil { t.Errorf("Unexpected error: %v", err) diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go index c066d5d7bf5..c551dfef186 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go @@ -23,7 +23,7 @@ import ( "sort" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -756,7 +756,7 @@ func Test_PVLAdmission(t *testing.T) { setPVLabeler(testcase.handler, testcase.pvlabeler) handler := admission.NewChainHandler(testcase.handler) - err := handler.Admit(admission.NewAttributesRecord(testcase.preAdmissionPV, nil, api.Kind("PersistentVolume").WithVersion("version"), testcase.preAdmissionPV.Namespace, testcase.preAdmissionPV.Name, api.Resource("persistentvolumes").WithVersion("version"), "", admission.Create, false, nil), nil) + err := handler.Admit(admission.NewAttributesRecord(testcase.preAdmissionPV, nil, api.Kind("PersistentVolume").WithVersion("version"), testcase.preAdmissionPV.Namespace, testcase.preAdmissionPV.Name, api.Resource("persistentvolumes").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if !reflect.DeepEqual(err, testcase.err) { t.Logf("expected error: %q", testcase.err) t.Logf("actual error: %q", err) diff --git a/plugin/pkg/admission/storage/persistentvolume/resize/admission_test.go b/plugin/pkg/admission/storage/persistentvolume/resize/admission_test.go index e0cb263bddb..699a24abff3 100644 --- a/plugin/pkg/admission/storage/persistentvolume/resize/admission_test.go +++ b/plugin/pkg/admission/storage/persistentvolume/resize/admission_test.go @@ -254,7 +254,8 @@ func TestPVCResizeAdmission(t *testing.T) { for _, tc := range tests { operation := admission.Update - attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, false, nil) + operationOptions := &metav1.CreateOptions{} + attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, operationOptions, false, nil) err := ctrl.Validate(attributes, nil) if !tc.checkError(err) { diff --git a/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go b/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go index 45152522f2b..3d7eaf18ed5 100644 --- a/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go +++ b/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go @@ -208,6 +208,7 @@ func TestAdmission(t *testing.T) { api.Resource("persistentvolumeclaims").WithVersion("version"), "", // subresource admission.Create, + &metav1.CreateOptions{}, false, // dryRun nil, // userInfo ) diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go index e5e9f8108b1..15f77f8af70 100644 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go +++ b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go @@ -131,6 +131,7 @@ func TestAdmit(t *testing.T) { test.resource, "", // subresource admission.Create, + &metav1.CreateOptions{}, false, // dryRun nil, // userInfo ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/audit_test.go b/staging/src/k8s.io/apiserver/pkg/admission/audit_test.go index 0b04cf2646a..4ead632f6ed 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/audit_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/audit_test.go @@ -64,7 +64,7 @@ func (h fakeHandler) Handles(o Operation) bool { } func attributes() Attributes { - return NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{}, "", "", false, nil) + return NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{}, "", "", nil, false, nil) } func TestWithAudit(t *testing.T) { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/chain_test.go b/staging/src/k8s.io/apiserver/pkg/admission/chain_test.go index 20ab2faad2a..490a0af171e 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/chain_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/chain_test.go @@ -21,6 +21,7 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -63,6 +64,7 @@ func TestAdmitAndValidate(t *testing.T) { name string ns string operation Operation + options runtime.Object chain chainAdmissionHandler accept bool calls map[string]bool @@ -71,6 +73,7 @@ func TestAdmitAndValidate(t *testing.T) { name: "all accept", ns: sysns, operation: Create, + options: &metav1.CreateOptions{}, chain: []Interface{ makeHandler("a", true, Update, Delete, Create), makeHandler("b", true, Delete, Create), @@ -83,6 +86,7 @@ func TestAdmitAndValidate(t *testing.T) { name: "ignore handler", ns: otherns, operation: Create, + options: &metav1.CreateOptions{}, chain: []Interface{ makeHandler("a", true, Update, Delete, Create), makeHandler("b", false, Delete), @@ -95,6 +99,7 @@ func TestAdmitAndValidate(t *testing.T) { name: "ignore all", ns: sysns, operation: Connect, + options: nil, chain: []Interface{ makeHandler("a", true, Update, Delete, Create), makeHandler("b", false, Delete), @@ -107,6 +112,7 @@ func TestAdmitAndValidate(t *testing.T) { name: "reject one", ns: otherns, operation: Delete, + options: &metav1.DeleteOptions{}, chain: []Interface{ makeHandler("a", true, Update, Delete, Create), makeHandler("b", false, Delete), @@ -119,7 +125,7 @@ func TestAdmitAndValidate(t *testing.T) { for _, test := range tests { t.Logf("testcase = %s", test.name) // call admit and check that validate was not called at all - err := test.chain.Admit(NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, false, nil), nil) + err := test.chain.Admit(NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, test.options, false, nil), nil) accepted := (err == nil) if accepted != test.accept { t.Errorf("unexpected result of admit call: %v", accepted) @@ -140,7 +146,7 @@ func TestAdmitAndValidate(t *testing.T) { } // call validate and check that admit was not called at all - err = test.chain.Validate(NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, false, nil), nil) + err = test.chain.Validate(NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, test.options, false, nil), nil) accepted = (err == nil) if accepted != test.accept { t.Errorf("unexpected result of validate call: %v\n", accepted) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/errors_test.go b/staging/src/k8s.io/apiserver/pkg/admission/errors_test.go index 871d7a57156..5c60c91f605 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/errors_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/errors_test.go @@ -36,6 +36,7 @@ func TestNewForbidden(t *testing.T) { schema.GroupVersionResource{Group: "foo", Version: "bar", Resource: "baz"}, "", Create, + nil, false, nil) err := errors.New("some error") diff --git a/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD index 7df816a7a21..e0f92b550d1 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD @@ -20,6 +20,8 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go index c0ce0bc1ef3..2934b624b9b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" ) @@ -28,7 +30,7 @@ import ( var ( kind = schema.GroupVersionKind{Group: "kgroup", Version: "kversion", Kind: "kind"} resource = schema.GroupVersionResource{Group: "rgroup", Version: "rversion", Resource: "resource"} - attr = admission.NewAttributesRecord(nil, nil, kind, "ns", "name", resource, "subresource", admission.Create, false, nil) + attr = admission.NewAttributesRecord(nil, nil, kind, "ns", "name", resource, "subresource", admission.Create, &metav1.CreateOptions{}, false, nil) ) func TestObserveAdmissionStep(t *testing.T) { @@ -85,6 +87,7 @@ func TestWithMetrics(t *testing.T) { name string ns string operation admission.Operation + options runtime.Object handler admission.Interface admit, validate bool } @@ -93,6 +96,7 @@ func TestWithMetrics(t *testing.T) { "both-interfaces-admit-and-validate", "some-ns", admission.Create, + &metav1.CreateOptions{}, &mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true, true}, true, true, }, @@ -100,6 +104,7 @@ func TestWithMetrics(t *testing.T) { "both-interfaces-dont-admit", "some-ns", admission.Create, + &metav1.CreateOptions{}, &mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false, true}, false, true, }, @@ -107,6 +112,7 @@ func TestWithMetrics(t *testing.T) { "both-interfaces-admit-dont-validate", "some-ns", admission.Create, + &metav1.CreateOptions{}, &mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true, false}, true, false, }, @@ -114,6 +120,7 @@ func TestWithMetrics(t *testing.T) { "validate-interfaces-validate", "some-ns", admission.Create, + &metav1.CreateOptions{}, &validatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true}, true, true, }, @@ -121,6 +128,7 @@ func TestWithMetrics(t *testing.T) { "validate-interfaces-dont-validate", "some-ns", admission.Create, + &metav1.CreateOptions{}, &validatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false}, true, false, }, @@ -128,6 +136,7 @@ func TestWithMetrics(t *testing.T) { "mutating-interfaces-admit", "some-ns", admission.Create, + &metav1.CreateOptions{}, &mutatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true}, true, true, }, @@ -135,6 +144,7 @@ func TestWithMetrics(t *testing.T) { "mutating-interfaces-dont-admit", "some-ns", admission.Create, + &metav1.CreateOptions{}, &mutatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false}, false, true, }, @@ -144,7 +154,7 @@ func TestWithMetrics(t *testing.T) { h := WithMetrics(test.handler, Metrics.ObserveAdmissionController, test.name) // test mutation - err := h.(admission.MutationInterface).Admit(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, false, nil), nil) + err := h.(admission.MutationInterface).Admit(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, test.options, false, nil), nil) if test.admit && err != nil { t.Errorf("expected admit to succeed, but failed: %v", err) continue @@ -169,7 +179,7 @@ func TestWithMetrics(t *testing.T) { } // test validation - err = h.(admission.ValidationInterface).Validate(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, false, nil), nil) + err = h.(admission.ValidationInterface).Validate(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, test.options, false, nil), nil) if test.validate && err != nil { t.Errorf("expected admit to succeed, but failed: %v", err) continue diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go index 593c107dbf0..ccc17392ee0 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -104,7 +104,7 @@ func TestAccessReviewCheckOnMissingNamespace(t *testing.T) { } informerFactory.Start(wait.NeverStop) - err = handler.Admit(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "LocalSubjectAccesReview"}, namespace, "", schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "localsubjectaccessreviews"}, "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "LocalSubjectAccesReview"}, namespace, "", schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "localsubjectaccessreviews"}, "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Error(err) } @@ -124,7 +124,7 @@ func TestAdmissionNamespaceDoesNotExist(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { actions := "" for _, action := range mockClient.Actions() { @@ -134,19 +134,19 @@ func TestAdmissionNamespaceDoesNotExist(t *testing.T) { } // verify create operations in the namespace cause an error - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected error rejecting creates in a namespace when it is missing") } // verify update operations in the namespace cause an error - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected error rejecting updates in a namespace when it is missing") } // verify delete operations in the namespace can proceed - err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } @@ -166,7 +166,7 @@ func TestAdmissionNamespaceActive(t *testing.T) { informerFactory.Start(wait.NeverStop) pod := newPod(namespace) - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("unexpected error returned from admission handler") } @@ -187,31 +187,31 @@ func TestAdmissionNamespaceTerminating(t *testing.T) { pod := newPod(namespace) // verify create operations in the namespace cause an error - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected error rejecting creates in a namespace when it is terminating") } // verify update operations in the namespace can proceed - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } // verify delete operations in the namespace can proceed - err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } // verify delete of namespace default can never proceed - err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Namespace").GroupKind().WithVersion("version"), "", metav1.NamespaceDefault, v1.Resource("namespaces").WithVersion("version"), "", admission.Delete, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Namespace").GroupKind().WithVersion("version"), "", metav1.NamespaceDefault, v1.Resource("namespaces").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected an error that this namespace can never be deleted") } // verify delete of namespace other than default can proceed - err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Namespace").GroupKind().WithVersion("version"), "", "other", v1.Resource("namespaces").WithVersion("version"), "", admission.Delete, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Namespace").GroupKind().WithVersion("version"), "", "other", v1.Resource("namespaces").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil) if err != nil { t.Errorf("Did not expect an error %v", err) } @@ -238,7 +238,7 @@ func TestAdmissionNamespaceForceLiveLookup(t *testing.T) { pod := newPod(namespace) // verify create operations in the namespace is allowed - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err != nil { t.Errorf("Unexpected error rejecting creates in an active namespace") } @@ -248,7 +248,7 @@ func TestAdmissionNamespaceForceLiveLookup(t *testing.T) { getCalls = 0 // verify delete of namespace can proceed - err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Namespace").GroupKind().WithVersion("version"), namespace, namespace, v1.Resource("namespaces").WithVersion("version"), "", admission.Delete, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Namespace").GroupKind().WithVersion("version"), namespace, namespace, v1.Resource("namespaces").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil) if err != nil { t.Errorf("Expected namespace deletion to be allowed") } @@ -261,7 +261,7 @@ func TestAdmissionNamespaceForceLiveLookup(t *testing.T) { phases[namespace] = v1.NamespaceTerminating // verify create operations in the namespace cause an error - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected error rejecting creates in a namespace right after deleting it") } @@ -274,7 +274,7 @@ func TestAdmissionNamespaceForceLiveLookup(t *testing.T) { fakeClock.Step(forceLiveLookupTTL) // verify create operations in the namespace cause an error - err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if err == nil { t.Errorf("Expected error rejecting creates in a namespace right after deleting it") } @@ -287,7 +287,7 @@ func TestAdmissionNamespaceForceLiveLookup(t *testing.T) { fakeClock.Step(time.Millisecond) // verify create operations in the namespace don't force a live lookup after the timeout - handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil), nil) + handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil) if getCalls != 0 { t.Errorf("Expected no live lookup of the namespace at t=forceLiveLookupTTL+1ms, got %d", getCalls) } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher_test.go index 53bcf0124dc..a38f6387d67 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher_test.go @@ -122,7 +122,7 @@ func TestDispatch(t *testing.T) { plugin: &Plugin{}, } attr := generic.VersionedAttributes{ - Attributes: admission.NewAttributesRecord(test.out, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{}, "", admission.Operation(""), false, nil), + Attributes: admission.NewAttributesRecord(test.out, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{}, "", admission.Operation(""), nil, false, nil), VersionedOldObject: nil, VersionedObject: test.in, } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher_test.go index bf7dee828c9..616ff7bbe31 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher_test.go @@ -75,27 +75,27 @@ func TestGetNamespaceLabels(t *testing.T) { }{ { name: "request is for creating namespace, the labels should be from the object itself", - attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, "", namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Create, false, nil), + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, "", namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Create, &metav1.CreateOptions{}, false, nil), expectedLabels: namespace2Labels, }, { name: "request is for updating namespace, the labels should be from the new object", - attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace2.Name, namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Update, false, nil), + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace2.Name, namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Update, &metav1.UpdateOptions{}, false, nil), expectedLabels: namespace2Labels, }, { name: "request is for deleting namespace, the labels should be from the cache", - attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace1.Name, namespace1.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Delete, false, nil), + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace1.Name, namespace1.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Delete, &metav1.DeleteOptions{}, false, nil), expectedLabels: namespace1Labels, }, { name: "request is for namespace/finalizer", - attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "namespaces"}, "finalizers", admission.Create, false, nil), + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "namespaces"}, "finalizers", admission.Create, &metav1.CreateOptions{}, false, nil), expectedLabels: namespace1Labels, }, { name: "request is for pod", - attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "pods"}, "", admission.Create, false, nil), + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "pods"}, "", admission.Create, &metav1.CreateOptions{}, false, nil), expectedLabels: namespace1Labels, }, } @@ -117,7 +117,7 @@ func TestNotExemptClusterScopedResource(t *testing.T) { hook := ®istrationv1beta1.Webhook{ NamespaceSelector: &metav1.LabelSelector{}, } - attr := admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, "", "mock-name", schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "", admission.Create, false, nil) + attr := admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, "", "mock-name", schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "", admission.Create, &metav1.CreateOptions{}, false, nil) matcher := Matcher{} matches, err := matcher.MatchNamespaceSelector(hook, attr) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD index e48ba5583f1..9b22e407f13 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD @@ -20,6 +20,8 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules_test.go index 85fba433eb7..ac09ee9f6f8 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules_test.go @@ -21,6 +21,8 @@ import ( "testing" adreg "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" @@ -33,37 +35,40 @@ type ruleTest struct { } type tests map[string]ruleTest -func a(group, version, resource, subresource, name string, operation admission.Operation) admission.Attributes { +func a(group, version, resource, subresource, name string, operation admission.Operation, operationOptions runtime.Object) admission.Attributes { return admission.NewAttributesRecord( nil, nil, schema.GroupVersionKind{Group: group, Version: version, Kind: "k" + resource}, "ns", name, schema.GroupVersionResource{Group: group, Version: version, Resource: resource}, subresource, operation, + operationOptions, false, nil, ) } -func namespacedAttributes(group, version, resource, subresource, name string, operation admission.Operation) admission.Attributes { +func namespacedAttributes(group, version, resource, subresource, name string, operation admission.Operation, operationOptions runtime.Object) admission.Attributes { return admission.NewAttributesRecord( nil, nil, schema.GroupVersionKind{Group: group, Version: version, Kind: "k" + resource}, "ns", name, schema.GroupVersionResource{Group: group, Version: version, Resource: resource}, subresource, operation, + operationOptions, false, nil, ) } -func clusterScopedAttributes(group, version, resource, subresource, name string, operation admission.Operation) admission.Attributes { +func clusterScopedAttributes(group, version, resource, subresource, name string, operation admission.Operation, operationOptions runtime.Object) admission.Attributes { return admission.NewAttributesRecord( nil, nil, schema.GroupVersionKind{Group: group, Version: version, Kind: "k" + resource}, "", name, schema.GroupVersionResource{Group: group, Version: version, Resource: resource}, subresource, operation, + operationOptions, false, nil, ) @@ -82,7 +87,7 @@ func TestGroup(t *testing.T) { }, }, match: attrList( - a("g", "v", "r", "", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), ), }, "exact": { @@ -92,12 +97,12 @@ func TestGroup(t *testing.T) { }, }, match: attrList( - a("g1", "v", "r", "", "name", admission.Create), - a("g2", "v2", "r3", "", "name", admission.Create), + a("g1", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g2", "v2", "r3", "", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("g3", "v", "r", "", "name", admission.Create), - a("g4", "v", "r", "", "name", admission.Create), + a("g3", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g4", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), ), }, } @@ -127,7 +132,7 @@ func TestVersion(t *testing.T) { }, }, match: attrList( - a("g", "v", "r", "", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), ), }, "exact": { @@ -137,12 +142,12 @@ func TestVersion(t *testing.T) { }, }, match: attrList( - a("g1", "v1", "r", "", "name", admission.Create), - a("g2", "v2", "r", "", "name", admission.Create), + a("g1", "v1", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g2", "v2", "r", "", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("g1", "v3", "r", "", "name", admission.Create), - a("g2", "v4", "r", "", "name", admission.Create), + a("g1", "v3", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g2", "v4", "r", "", "name", admission.Create, &metav1.CreateOptions{}), ), }, } @@ -167,65 +172,65 @@ func TestOperation(t *testing.T) { "wildcard": { rule: adreg.RuleWithOperations{Operations: []adreg.OperationType{adreg.OperationAll}}, match: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "", "name", admission.Update), - a("g", "v", "r", "", "name", admission.Delete), - a("g", "v", "r", "", "name", admission.Connect), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "", "name", admission.Update, &metav1.UpdateOptions{}), + a("g", "v", "r", "", "name", admission.Delete, &metav1.DeleteOptions{}), + a("g", "v", "r", "", "name", admission.Connect, nil), ), }, "create": { rule: adreg.RuleWithOperations{Operations: []adreg.OperationType{adreg.Create}}, match: attrList( - a("g", "v", "r", "", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("g", "v", "r", "", "name", admission.Update), - a("g", "v", "r", "", "name", admission.Delete), - a("g", "v", "r", "", "name", admission.Connect), + a("g", "v", "r", "", "name", admission.Update, &metav1.UpdateOptions{}), + a("g", "v", "r", "", "name", admission.Delete, &metav1.DeleteOptions{}), + a("g", "v", "r", "", "name", admission.Connect, nil), ), }, "update": { rule: adreg.RuleWithOperations{Operations: []adreg.OperationType{adreg.Update}}, match: attrList( - a("g", "v", "r", "", "name", admission.Update), + a("g", "v", "r", "", "name", admission.Update, &metav1.UpdateOptions{}), ), noMatch: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "", "name", admission.Delete), - a("g", "v", "r", "", "name", admission.Connect), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "", "name", admission.Delete, &metav1.DeleteOptions{}), + a("g", "v", "r", "", "name", admission.Connect, nil), ), }, "delete": { rule: adreg.RuleWithOperations{Operations: []adreg.OperationType{adreg.Delete}}, match: attrList( - a("g", "v", "r", "", "name", admission.Delete), + a("g", "v", "r", "", "name", admission.Delete, &metav1.DeleteOptions{}), ), noMatch: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "", "name", admission.Update), - a("g", "v", "r", "", "name", admission.Connect), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "", "name", admission.Update, &metav1.UpdateOptions{}), + a("g", "v", "r", "", "name", admission.Connect, nil), ), }, "connect": { rule: adreg.RuleWithOperations{Operations: []adreg.OperationType{adreg.Connect}}, match: attrList( - a("g", "v", "r", "", "name", admission.Connect), + a("g", "v", "r", "", "name", admission.Connect, nil), ), noMatch: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "", "name", admission.Update), - a("g", "v", "r", "", "name", admission.Delete), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "", "name", admission.Update, &metav1.UpdateOptions{}), + a("g", "v", "r", "", "name", admission.Delete, &metav1.DeleteOptions{}), ), }, "multiple": { rule: adreg.RuleWithOperations{Operations: []adreg.OperationType{adreg.Update, adreg.Delete}}, match: attrList( - a("g", "v", "r", "", "name", admission.Update), - a("g", "v", "r", "", "name", admission.Delete), + a("g", "v", "r", "", "name", admission.Update, &metav1.UpdateOptions{}), + a("g", "v", "r", "", "name", admission.Delete, &metav1.DeleteOptions{}), ), noMatch: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "", "name", admission.Connect), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "", "name", admission.Connect, nil), ), }, } @@ -254,12 +259,12 @@ func TestResource(t *testing.T) { }, }, match: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("2", "v", "r2", "", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r2", "", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("g", "v", "r", "exec", "name", admission.Create), - a("2", "v", "r2", "proxy", "name", admission.Create), + a("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r2", "proxy", "name", admission.Create, &metav1.CreateOptions{}), ), }, "r & subresources": { @@ -269,12 +274,12 @@ func TestResource(t *testing.T) { }, }, match: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "exec", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("2", "v", "r2", "", "name", admission.Create), - a("2", "v", "r2", "proxy", "name", admission.Create), + a("2", "v", "r2", "", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r2", "proxy", "name", admission.Create, &metav1.CreateOptions{}), ), }, "r & subresources or r2": { @@ -284,12 +289,12 @@ func TestResource(t *testing.T) { }, }, match: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("g", "v", "r", "exec", "name", admission.Create), - a("2", "v", "r2", "", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r2", "", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("2", "v", "r2", "proxy", "name", admission.Create), + a("2", "v", "r2", "proxy", "name", admission.Create, &metav1.CreateOptions{}), ), }, "proxy or exec": { @@ -299,14 +304,14 @@ func TestResource(t *testing.T) { }, }, match: attrList( - a("g", "v", "r", "exec", "name", admission.Create), - a("2", "v", "r2", "proxy", "name", admission.Create), - a("2", "v", "r3", "proxy", "name", admission.Create), + a("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r2", "proxy", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r3", "proxy", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - a("g", "v", "r", "", "name", admission.Create), - a("2", "v", "r2", "", "name", admission.Create), - a("2", "v", "r4", "scale", "name", admission.Create), + a("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r2", "", "name", admission.Create, &metav1.CreateOptions{}), + a("2", "v", "r4", "scale", "name", admission.Create, &metav1.CreateOptions{}), ), }, } @@ -339,16 +344,16 @@ func TestScope(t *testing.T) { }, }, match: attrList( - clusterScopedAttributes("g", "v", "r", "", "name", admission.Create), - clusterScopedAttributes("g", "v", "r", "exec", "name", admission.Create), - clusterScopedAttributes("", "v1", "namespaces", "", "ns", admission.Create), - clusterScopedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create), - namespacedAttributes("", "v1", "namespaces", "", "ns", admission.Create), - namespacedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create), + clusterScopedAttributes("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("", "v1", "namespaces", "", "ns", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("", "v1", "namespaces", "", "ns", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - namespacedAttributes("g", "v", "r", "", "name", admission.Create), - namespacedAttributes("g", "v", "r", "exec", "name", admission.Create), + namespacedAttributes("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), ), }, "namespace scope": { @@ -359,16 +364,16 @@ func TestScope(t *testing.T) { }, }, match: attrList( - namespacedAttributes("g", "v", "r", "", "name", admission.Create), - namespacedAttributes("g", "v", "r", "exec", "name", admission.Create), + namespacedAttributes("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList( - clusterScopedAttributes("", "v1", "namespaces", "", "ns", admission.Create), - clusterScopedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create), - namespacedAttributes("", "v1", "namespaces", "", "ns", admission.Create), - namespacedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create), - clusterScopedAttributes("g", "v", "r", "", "name", admission.Create), - clusterScopedAttributes("g", "v", "r", "exec", "name", admission.Create), + clusterScopedAttributes("", "v1", "namespaces", "", "ns", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("", "v1", "namespaces", "", "ns", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), ), }, "all scopes": { @@ -379,14 +384,14 @@ func TestScope(t *testing.T) { }, }, match: attrList( - namespacedAttributes("g", "v", "r", "", "name", admission.Create), - namespacedAttributes("g", "v", "r", "exec", "name", admission.Create), - clusterScopedAttributes("g", "v", "r", "", "name", admission.Create), - clusterScopedAttributes("g", "v", "r", "exec", "name", admission.Create), - clusterScopedAttributes("", "v1", "namespaces", "", "ns", admission.Create), - clusterScopedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create), - namespacedAttributes("", "v1", "namespaces", "", "ns", admission.Create), - namespacedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create), + namespacedAttributes("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("g", "v", "r", "", "name", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("g", "v", "r", "exec", "name", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("", "v1", "namespaces", "", "ns", admission.Create, &metav1.CreateOptions{}), + clusterScopedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("", "v1", "namespaces", "", "ns", admission.Create, &metav1.CreateOptions{}), + namespacedAttributes("", "v1", "namespaces", "finalize", "ns", admission.Create, &metav1.CreateOptions{}), ), noMatch: attrList(), }, diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go index b99d8ab45b1..250ce00ab04 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go @@ -101,9 +101,10 @@ func newAttributesRecord(object metav1.Object, oldObject metav1.Object, kind sch Name: "webhook-test", UID: "webhook-test", } + options := &metav1.UpdateOptions{} return &FakeAttributes{ - Attributes: admission.NewAttributesRecord(object.(runtime.Object), oldObject.(runtime.Object), kind, namespace, name, gvr, subResource, admission.Update, dryRun, &userInfo), + Attributes: admission.NewAttributesRecord(object.(runtime.Object), oldObject.(runtime.Object), kind, namespace, name, gvr, subResource, admission.Update, options, dryRun, &userInfo), } } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go index a6479aa1a98..c4c2d8628e1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go @@ -136,6 +136,7 @@ func TestBanflunderAdmissionPlugin(t *testing.T) { scenario.admissionInputResource, "", admission.Create, + &metav1.CreateOptions{}, false, nil), nil, diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index bd506e4dcf1..eeae7e0c82a 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -151,9 +151,11 @@ type holder struct { recordNamespace string recordName string - expectGVK schema.GroupVersionKind - expectObject bool - expectOldObject bool + expectGVK schema.GroupVersionKind + expectObject bool + expectOldObject bool + expectOptionsGVK schema.GroupVersionKind + expectOptions bool recorded map[string]*v1beta1.AdmissionRequest } @@ -169,12 +171,14 @@ func (h *holder) reset(t *testing.T) { h.recordNamespace = "" h.expectObject = false h.expectOldObject = false + h.expectOptionsGVK = schema.GroupVersionKind{} + h.expectOptions = false h.recorded = map[string]*v1beta1.AdmissionRequest{ mutation: nil, validation: nil, } } -func (h *holder) expect(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, operation v1beta1.Operation, name, namespace string, object, oldObject bool) { +func (h *holder) expect(gvr schema.GroupVersionResource, gvk, optionsGVK schema.GroupVersionKind, operation v1beta1.Operation, name, namespace string, object, oldObject, options bool) { // Special-case namespaces, since the object name shows up in request attributes for update/delete requests if len(namespace) == 0 && gvk.Group == "" && gvk.Version == "v1" && gvk.Kind == "Namespace" && operation != v1beta1.Create { namespace = name @@ -189,6 +193,8 @@ func (h *holder) expect(gvr schema.GroupVersionResource, gvk schema.GroupVersion h.recordNamespace = namespace h.expectObject = object h.expectOldObject = oldObject + h.expectOptionsGVK = optionsGVK + h.expectOptions = options h.recorded = map[string]*v1beta1.AdmissionRequest{ mutation: nil, validation: nil, @@ -283,6 +289,14 @@ func (h *holder) verifyRequest(request *v1beta1.AdmissionRequest) error { return fmt.Errorf("unexpected old object: %#v", request.OldObject.Object) } + if h.expectOptions { + if err := h.verifyOptions(request.Options.Object); err != nil { + return fmt.Errorf("options error: %v", err) + } + } else if request.Options.Object != nil { + return fmt.Errorf("unexpected options: %#v", request.Options.Object) + } + return nil } @@ -296,6 +310,16 @@ func (h *holder) verifyObject(obj runtime.Object) error { return nil } +func (h *holder) verifyOptions(options runtime.Object) error { + if options == nil { + return fmt.Errorf("no options sent") + } + if options.GetObjectKind().GroupVersionKind() != h.expectOptionsGVK { + return fmt.Errorf("expected %#v, got %#v", h.expectOptionsGVK, options.GetObjectKind().GroupVersionKind()) + } + return nil +} + // TestWebhookV1beta1 tests communication between API server and webhook process. func TestWebhookV1beta1(t *testing.T) { // holder communicates expectations to webhooks, and results from webhooks @@ -456,7 +480,7 @@ func testResourceCreate(c *testContext) { if c.resource.Namespaced { ns = testNamespace } - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Create, stubObj.GetName(), ns, true, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkCreateOptions, v1beta1.Create, stubObj.GetName(), ns, true, false, true) _, err = c.client.Resource(c.gvr).Namespace(ns).Create(stubObj, metav1.CreateOptions{}) if err != nil { c.t.Error(err) @@ -471,7 +495,7 @@ func testResourceUpdate(c *testContext) { return err } obj.SetAnnotations(map[string]string{"update": "true"}) - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkUpdateOptions, v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true, true) _, err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Update(obj, metav1.UpdateOptions{}) return err }); err != nil { @@ -486,7 +510,7 @@ func testResourcePatch(c *testContext) { c.t.Error(err) return } - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkUpdateOptions, v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true, true) _, err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Patch( obj.GetName(), types.MergePatchType, @@ -506,7 +530,7 @@ func testResourceDelete(c *testContext) { } background := metav1.DeletePropagationBackground zero := int64(0) - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Delete, obj.GetName(), obj.GetNamespace(), false, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkDeleteOptions, v1beta1.Delete, obj.GetName(), obj.GetNamespace(), false, false, true) err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Delete(obj.GetName(), &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) if err != nil { c.t.Error(err) @@ -552,7 +576,7 @@ func testResourceDeletecollection(c *testContext) { } // set expectations - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Delete, "", obj.GetNamespace(), false, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkDeleteOptions, v1beta1.Delete, "", obj.GetNamespace(), false, false, true) // delete err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).DeleteCollection(&metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}, metav1.ListOptions{LabelSelector: "webhooktest=true"}) @@ -617,7 +641,7 @@ func testSubresourceUpdate(c *testContext) { submitObj.SetAnnotations(map[string]string{"subresourceupdate": "true"}) // set expectations - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkUpdateOptions, v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true, true) _, err = c.client.Resource(gvrWithoutSubresources).Namespace(obj.GetNamespace()).Update( submitObj, @@ -644,7 +668,7 @@ func testSubresourcePatch(c *testContext) { subresources := strings.Split(c.gvr.Resource, "/")[1:] // set expectations - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkUpdateOptions, v1beta1.Update, obj.GetName(), obj.GetNamespace(), true, true, true) _, err = c.client.Resource(gvrWithoutSubresources).Namespace(obj.GetNamespace()).Patch( obj.GetName(), @@ -680,7 +704,7 @@ func testNamespaceDelete(c *testContext) { background := metav1.DeletePropagationBackground zero := int64(0) - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Delete, obj.GetName(), obj.GetNamespace(), false, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkDeleteOptions, v1beta1.Delete, obj.GetName(), obj.GetNamespace(), false, false, true) err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Delete(obj.GetName(), &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) if err != nil { c.t.Error(err) @@ -706,7 +730,7 @@ func testNamespaceDelete(c *testContext) { } // then run the final delete and make sure admission is called again - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Delete, obj.GetName(), obj.GetNamespace(), false, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkDeleteOptions, v1beta1.Delete, obj.GetName(), obj.GetNamespace(), false, false, true) err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Delete(obj.GetName(), &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) if err != nil { c.t.Error(err) @@ -736,7 +760,7 @@ func testDeploymentRollback(c *testContext) { gvrWithoutSubresources.Resource = strings.Split(gvrWithoutSubresources.Resource, "/")[0] subresources := strings.Split(c.gvr.Resource, "/")[1:] - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Create, obj.GetName(), obj.GetNamespace(), true, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkCreateOptions, v1beta1.Create, obj.GetName(), obj.GetNamespace(), true, false, true) var rollbackObj runtime.Object switch c.gvr { @@ -785,7 +809,7 @@ func testPodConnectSubresource(c *testContext) { for _, httpMethod := range []string{"GET", "POST"} { c.t.Logf("verifying %v", httpMethod) - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Connect, pod.GetName(), pod.GetNamespace(), true, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), schema.GroupVersionKind{}, v1beta1.Connect, pod.GetName(), pod.GetNamespace(), true, false, false) var err error switch c.gvr { case gvr("", "v1", "pods/exec"): @@ -827,7 +851,7 @@ func testPodBindingEviction(c *testContext) { } }() - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Create, pod.GetName(), pod.GetNamespace(), true, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), gvkCreateOptions, v1beta1.Create, pod.GetName(), pod.GetNamespace(), true, false, true) switch c.gvr { case gvr("", "v1", "bindings"): @@ -895,7 +919,7 @@ func testSubresourceProxy(c *testContext) { } // set expectations - c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), v1beta1.Connect, obj.GetName(), obj.GetNamespace(), true, false) + c.admissionHolder.expect(c.gvr, gvk(c.resource.Group, c.resource.Version, c.resource.Kind), schema.GroupVersionKind{}, v1beta1.Connect, obj.GetName(), obj.GetNamespace(), true, false, false) // run the request. we don't actually care if the request is successful, just that admission gets called as expected err = request.Resource(gvrWithoutSubresources.Resource).Name(obj.GetName()).SubResource(subresources...).Do().Error() if err != nil { @@ -918,6 +942,7 @@ func newWebhookHandler(t *testing.T, holder *holder, phase string) http.Handler t.Error(err) return } + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { t.Errorf("contentType=%s, expect application/json", contentType) return @@ -955,6 +980,16 @@ func newWebhookHandler(t *testing.T, holder *holder, phase string) http.Handler review.Request.OldObject.Object = u } + if len(review.Request.Options.Raw) > 0 { + u := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := json.Unmarshal(review.Request.Options.Raw, u); err != nil { + t.Errorf("Fail to deserialize options object: %s for admission request %#+v with error: %v", string(review.Request.Options.Raw), review.Request, err) + http.Error(w, err.Error(), 400) + return + } + review.Request.Options.Object = u + } + if review.Request.UserInfo.Username == testClientUsername { // only record requests originating from this integration test's client holder.record(phase, review.Request) @@ -1043,6 +1078,12 @@ func gvk(group, version, kind string) schema.GroupVersionKind { return schema.GroupVersionKind{Group: group, Version: version, Kind: kind} } +var ( + gvkCreateOptions = metav1.SchemeGroupVersion.WithKind("CreateOptions") + gvkUpdateOptions = metav1.SchemeGroupVersion.WithKind("UpdateOptions") + gvkDeleteOptions = metav1.SchemeGroupVersion.WithKind("DeleteOptions") +) + func shouldTestResource(gvr schema.GroupVersionResource, resource metav1.APIResource) bool { if !sets.NewString(resource.Verbs...).HasAny("create", "update", "patch", "connect", "delete", "deletecollection") { return false From 3af7a72719373d97c3846b0f5897f6e2ef785849 Mon Sep 17 00:00:00 2001 From: Mike Crute Date: Mon, 1 Apr 2019 15:49:57 -0700 Subject: [PATCH 167/194] Remove hardcoded region list from AWS provider This extracts the region list from the AWS SDK and accounts for special opt-in regions. This will ensure that the regions are always up-to-date as we update the AWS SDK instead of requiring duplicated accounting. --- .../k8s.io/legacy-cloud-providers/aws/BUILD | 2 - .../k8s.io/legacy-cloud-providers/aws/aws.go | 50 +++++++--- .../legacy-cloud-providers/aws/aws_test.go | 41 ++++++++ .../legacy-cloud-providers/aws/regions.go | 96 ------------------- .../aws/regions_test.go | 85 ---------------- 5 files changed, 76 insertions(+), 198 deletions(-) delete mode 100644 staging/src/k8s.io/legacy-cloud-providers/aws/regions.go delete mode 100644 staging/src/k8s.io/legacy-cloud-providers/aws/regions_test.go diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/BUILD b/staging/src/k8s.io/legacy-cloud-providers/aws/BUILD index 7805a85b498..127b74bc54a 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/BUILD +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/BUILD @@ -19,7 +19,6 @@ go_library( "device_allocator.go", "instances.go", "log_handler.go", - "regions.go", "retry_handler.go", "sets_ippermissions.go", "tags.go", @@ -74,7 +73,6 @@ go_test( "aws_test.go", "device_allocator_test.go", "instances_test.go", - "regions_test.go", "retry_handler_test.go", "tags_test.go", ], diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go index 8417e8e5bb8..f7c05545895 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go @@ -270,9 +270,6 @@ const MaxReadThenCreateRetries = 30 // need hardcoded defaults. const DefaultVolumeType = "gp2" -// Used to call recognizeWellKnownRegions just once -var once sync.Once - // Services is an abstraction over AWS, to allow mocking/other implementations type Services interface { Compute(region string) (EC2, error) @@ -1214,14 +1211,8 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { return nil, err } - // Trust that if we get a region from configuration or AWS metadata that it is valid, - // and register ECR providers - recognizeRegion(regionName) - if !cfg.Global.DisableStrictZoneCheck { - valid := isRegionValid(regionName) - if !valid { - // This _should_ now be unreachable, given we call RecognizeRegion + if !isRegionValid(regionName, metadata) { return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone) } } else { @@ -1303,14 +1294,43 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { } } - // Register regions, in particular for ECR credentials - once.Do(func() { - recognizeWellKnownRegions() - }) - return awsCloud, nil } +// isRegionValid accepts an AWS region name and returns if the region is a +// valid region known to the AWS SDK. Considers the region returned from the +// EC2 metadata service to be a valid region as it's only available on a host +// running in a valid AWS region. +func isRegionValid(region string, metadata EC2Metadata) bool { + // Does the AWS SDK know about the region? + for _, p := range endpoints.DefaultPartitions() { + for r := range p.Regions() { + if r == region { + return true + } + } + } + + // ap-northeast-3 is purposely excluded from the SDK because it + // requires an access request (for more details see): + // https://github.com/aws/aws-sdk-go/issues/1863 + if region == "ap-northeast-3" { + return true + } + + // Fallback to checking if the region matches the instance metadata region + // (ignoring any user overrides). This just accounts for running an old + // build of Kubernetes in a new region that wasn't compiled into the SDK + // when Kubernetes was built. + if az, err := getAvailabilityZone(metadata); err == nil { + if r, err := azToRegion(az); err == nil && region == r { + return true + } + } + + return false +} + // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { c.clientBuilder = clientBuilder diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go index 7ad3180a318..8f9216241ee 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go @@ -1835,6 +1835,47 @@ func TestCreateDisk(t *testing.T) { awsServices.ec2.(*MockedFakeEC2).AssertExpectations(t) } +func TestRegionIsValid(t *testing.T) { + fake := newMockedFakeAWSServices("fakeCluster") + fake.selfInstance.Placement = &ec2.Placement{ + AvailabilityZone: aws.String("pl-fake-999a"), + } + + // This is the legacy list that was removed, using this to ensure we avoid + // region regressions if something goes wrong in the SDK + regions := []string{ + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "cn-north-1", + "cn-northwest-1", + "us-gov-west-1", + "ap-northeast-3", + + // Ensures that we always trust what the metadata service returns + "pl-fake-999", + } + + for _, region := range regions { + assert.True(t, isRegionValid(region, fake.metadata), "expected region '%s' to be valid but it was not", region) + } + + assert.False(t, isRegionValid("pl-fake-991a", fake.metadata), "expected region 'pl-fake-991' to be invalid but it was not") +} + func TestGetCandidateZonesForDynamicVolume(t *testing.T) { tests := []struct { name string diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/regions.go b/staging/src/k8s.io/legacy-cloud-providers/aws/regions.go deleted file mode 100644 index f834ee1aa79..00000000000 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/regions.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package aws - -import ( - "sync" - - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/util/sets" -) - -// wellKnownRegions is the complete list of regions known to the AWS cloudprovider -// and credentialprovider. -var wellKnownRegions = [...]string{ - // from `aws ec2 describe-regions --region us-east-1 --query Regions[].RegionName | sort` - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ca-central-1", - "eu-central-1", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-west-1", - "us-west-2", - - // these are not registered in many / most accounts - "cn-north-1", - "cn-northwest-1", - "us-gov-west-1", -} - -// awsRegionsMutex protects awsRegions -var awsRegionsMutex sync.Mutex - -// awsRegions is a set of recognized regions -var awsRegions sets.String - -// recognizeRegion is called for each AWS region we know about. -// It currently registers a credential provider for that region. -// There are two paths to discovering a region: -// * we hard-code some well-known regions -// * if a region is discovered from instance metadata, we add that -func recognizeRegion(region string) { - awsRegionsMutex.Lock() - defer awsRegionsMutex.Unlock() - - if awsRegions == nil { - awsRegions = sets.NewString() - } - - if awsRegions.Has(region) { - klog.V(6).Infof("found AWS region %q again - ignoring", region) - return - } - - klog.V(4).Infof("found AWS region %q", region) - - awsRegions.Insert(region) -} - -// recognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion -func recognizeWellKnownRegions() { - for _, region := range wellKnownRegions { - recognizeRegion(region) - } -} - -// isRegionValid checks if the region is in the set of known regions -func isRegionValid(region string) bool { - awsRegionsMutex.Lock() - defer awsRegionsMutex.Unlock() - - return awsRegions.Has(region) -} diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/regions_test.go b/staging/src/k8s.io/legacy-cloud-providers/aws/regions_test.go deleted file mode 100644 index c48bc705862..00000000000 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/regions_test.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package aws - -import ( - "testing" -) - -// TestRegions does basic checking of region verification / addition -func TestRegions(t *testing.T) { - recognizeWellKnownRegions() - - tests := []struct { - Add string - Lookup string - ExpectIsRegion bool - }{ - { - Lookup: "us-east-1", - ExpectIsRegion: true, - }, - { - Lookup: "us-east-1a", - ExpectIsRegion: false, - }, - { - Add: "us-test-1", - Lookup: "us-east-1", - ExpectIsRegion: true, - }, - { - Lookup: "us-test-1", - ExpectIsRegion: true, - }, - { - Add: "us-test-1", - Lookup: "us-test-1", - ExpectIsRegion: true, - }, - } - - for _, test := range tests { - if test.Add != "" { - recognizeRegion(test.Add) - } - - if test.Lookup != "" { - if isRegionValid(test.Lookup) != test.ExpectIsRegion { - t.Fatalf("region valid mismatch: %q", test.Lookup) - } - } - } -} - -// TestRecognizesNewRegion verifies that we see a region from metadata, we recognize it as valid -func TestRecognizesNewRegion(t *testing.T) { - region := "us-testrecognizesnewregion-1" - if isRegionValid(region) { - t.Fatalf("region already valid: %q", region) - } - - awsServices := NewFakeAWSServices(TestClusterID).WithAz(region + "a") - _, err := newAWSCloud(CloudConfig{}, awsServices) - if err != nil { - t.Errorf("error building AWS cloud: %v", err) - } - - if !isRegionValid(region) { - t.Fatalf("newly discovered region not valid: %q", region) - } -} From 1e9ce46f0a677c16493a8987ab0aceb3709a821e Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Apr 2019 09:46:54 -0700 Subject: [PATCH 168/194] migrate k8s.io/apimachinery/pkg/util/diff to cmp --- .../apimachinery/pkg/api/resource/quantity.go | 6 + .../k8s.io/apimachinery/pkg/util/diff/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 242 ++---------------- .../apimachinery/pkg/util/diff/diff_test.go | 133 ---------- 4 files changed, 24 insertions(+), 359 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go index 54fda58064d..93a6c0c5004 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -584,6 +584,12 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } +// Equal checks equality of two Quantities. This is useful for testing with +// cmp.Equal. +func (q Quantity) Equal(v Quantity) bool { + return q.Cmp(v) == 0 +} + // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 diff --git a/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD index 0089ba84821..006929eacbc 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD @@ -18,8 +18,8 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/diff", importpath = "k8s.io/apimachinery/pkg/util/diff", deps = [ - "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/diff/diff.go b/staging/src/k8s.io/apimachinery/pkg/util/diff/diff.go index 5ce92482999..a006b925a9e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -18,16 +18,12 @@ package diff import ( "bytes" - "encoding/json" "fmt" - "reflect" - "sort" "strings" "text/tabwriter" "github.com/davecgh/go-spew/spew" - - "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/google/go-cmp/cmp" ) // StringDiff diffs a and b and returns a human readable diff. @@ -50,233 +46,29 @@ func StringDiff(a, b string) string { return string(out) } -// ObjectDiff writes the two objects out as JSON and prints out the identical part of -// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. -// For debugging tests. +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectDiff(a, b interface{}) string { - ab, err := json.Marshal(a) - if err != nil { - panic(fmt.Sprintf("a: %v", err)) - } - bb, err := json.Marshal(b) - if err != nil { - panic(fmt.Sprintf("b: %v", err)) - } - return StringDiff(string(ab), string(bb)) + return legacyDiff(a, b) } -// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, -// which shows absolutely everything by recursing into every single pointer -// (go's %#v formatters OTOH stop at a certain point). This is needed when you -// can't figure out why reflect.DeepEqual is returning false and nothing is -// showing you differences. This will. +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectGoPrintDiff(a, b interface{}) string { - s := spew.ConfigState{DisableMethods: true} - return StringDiff( - s.Sprintf("%#v", a), - s.Sprintf("%#v", b), - ) + return legacyDiff(a, b) } -// ObjectReflectDiff returns a multi-line formatted diff between two objects -// of equal type. If an object with private fields is passed you will -// only see string comparison for those fields. Otherwise this presents the -// most human friendly diff of two structs of equal type in this package. +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectReflectDiff(a, b interface{}) string { - if a == nil && b == nil { - return "" - } - if a == nil { - return fmt.Sprintf("a is nil and b is not-nil") - } - if b == nil { - return fmt.Sprintf("a is not-nil and b is nil") - } - vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) - if vA.Type() != vB.Type() { - return fmt.Sprintf("type A %T and type B %T do not match", a, b) - } - diffs := objectReflectDiff(field.NewPath("object"), vA, vB) - if len(diffs) == 0 { - return "" - } - out := []string{""} - for _, d := range diffs { - elidedA, elidedB := limit(d.a, d.b, 80) - out = append(out, - fmt.Sprintf("%s:", d.path), - fmt.Sprintf(" a: %s", elidedA), - fmt.Sprintf(" b: %s", elidedB), - ) - } - return strings.Join(out, "\n") -} - -// limit: -// 1. stringifies aObj and bObj -// 2. elides identical prefixes if either is too long -// 3. elides remaining content from the end if either is too long -func limit(aObj, bObj interface{}, max int) (string, string) { - elidedPrefix := "" - elidedASuffix := "" - elidedBSuffix := "" - a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj) - - if aObj != nil && bObj != nil { - if aType, bType := fmt.Sprintf("%T", aObj), fmt.Sprintf("%T", bObj); aType != bType { - a = fmt.Sprintf("%s (%s)", a, aType) - b = fmt.Sprintf("%s (%s)", b, bType) - } - } - - for { - switch { - case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]: - // a is too long, b has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]: - // b is too long, a has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(a) > max: - a = a[:max] - elidedASuffix = "..." - - case len(b) > max: - b = b[:max] - elidedBSuffix = "..." - - default: - // both are short enough - return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix - } - } -} - -func public(s string) bool { - if len(s) == 0 { - return false - } - return s[:1] == strings.ToUpper(s[:1]) -} - -type diff struct { - path *field.Path - a, b interface{} -} - -type orderedDiffs []diff - -func (d orderedDiffs) Len() int { return len(d) } -func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d orderedDiffs) Less(i, j int) bool { - a, b := d[i].path.String(), d[j].path.String() - if a < b { - return true - } - return false -} - -func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { - switch a.Type().Kind() { - case reflect.Struct: - var changes []diff - for i := 0; i < a.Type().NumField(); i++ { - if !public(a.Type().Field(i).Name) { - if reflect.DeepEqual(a.Interface(), b.Interface()) { - continue - } - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { - changes = append(changes, sub...) - } - } - return changes - case reflect.Ptr, reflect.Interface: - if a.IsNil() || b.IsNil() { - switch { - case a.IsNil() && b.IsNil(): - return nil - case a.IsNil(): - return []diff{{path: path, a: nil, b: b.Interface()}} - default: - return []diff{{path: path, a: a.Interface(), b: nil}} - } - } - return objectReflectDiff(path, a.Elem(), b.Elem()) - case reflect.Chan: - if !reflect.DeepEqual(a.Interface(), b.Interface()) { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - case reflect.Slice: - lA, lB := a.Len(), b.Len() - l := lA - if lB < lA { - l = lB - } - if lA == lB && lA == 0 { - if a.IsNil() != b.IsNil() { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - } - var diffs []diff - for i := 0; i < l; i++ { - if !reflect.DeepEqual(a.Index(i), b.Index(i)) { - diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) - } - } - for i := l; i < lA; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) - } - for i := l; i < lB; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) - } - return diffs - case reflect.Map: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - aKeys := make(map[interface{}]interface{}) - for _, key := range a.MapKeys() { - aKeys[key.Interface()] = a.MapIndex(key).Interface() - } - var missing []diff - for _, key := range b.MapKeys() { - if _, ok := aKeys[key.Interface()]; ok { - delete(aKeys, key.Interface()) - if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { - continue - } - missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) - continue - } - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) - } - for key, value := range aKeys { - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) - } - if len(missing) == 0 { - missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) - } - sort.Sort(orderedDiffs(missing)) - return missing - default: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - if !a.CanInterface() { - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } + return legacyDiff(a, b) } // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, diff --git a/staging/src/k8s.io/apimachinery/pkg/util/diff/diff_test.go b/staging/src/k8s.io/apimachinery/pkg/util/diff/diff_test.go index 6af43f415b7..eb61a11d779 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/diff/diff_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/diff/diff_test.go @@ -20,92 +20,6 @@ import ( "testing" ) -func TestObjectReflectDiff(t *testing.T) { - type struct1 struct{ A []int } - - testCases := map[string]struct { - a, b interface{} - out string - }{ - "both nil": { - a: interface{}(nil), - b: interface{}(nil), - }, - "a nil": { - a: interface{}(nil), - b: "test", - out: "a is nil and b is not-nil", - }, - "b nil": { - a: "test", - b: interface{}(nil), - out: "a is not-nil and b is nil", - }, - "map": { - a: map[string]int{}, - b: map[string]int{}, - }, - "detect nil map": { - a: map[string]int(nil), - b: map[string]int{}, - out: ` -object: - a: map[string]int(nil) - b: map[string]int{}`, - }, - "detect map changes": { - a: map[string]int{"test": 1, "other": 2}, - b: map[string]int{"test": 2, "third": 3}, - out: ` -object[other]: - a: 2 - b: -object[test]: - a: 1 - b: 2 -object[third]: - a: - b: 3`, - }, - "nil slice": {a: struct1{A: nil}, b: struct1{A: nil}}, - "empty slice": {a: struct1{A: []int{}}, b: struct1{A: []int{}}}, - "detect slice changes 1": {a: struct1{A: []int{1}}, b: struct1{A: []int{2}}, out: ` -object.A[0]: - a: 1 - b: 2`, - }, - "detect slice changes 2": {a: struct1{A: []int{}}, b: struct1{A: []int{2}}, out: ` -object.A[0]: - a: - b: 2`, - }, - "detect slice changes 3": {a: struct1{A: []int{1}}, b: struct1{A: []int{}}, out: ` -object.A[0]: - a: 1 - b: `, - }, - "detect nil vs empty slices": {a: struct1{A: nil}, b: struct1{A: []int{}}, out: ` -object.A: - a: []int(nil) - b: []int{}`, - }, - "display type differences": {a: []interface{}{int64(1)}, b: []interface{}{uint64(1)}, out: ` -object[0]: - a: 1 (int64) - b: 0x1 (uint64)`, - }, - } - for name, test := range testCases { - expect := test.out - if len(expect) == 0 { - expect = "" - } - if actual := ObjectReflectDiff(test.a, test.b); actual != expect { - t.Errorf("%s: unexpected output: %s", name, actual) - } - } -} - func TestStringDiff(t *testing.T) { diff := StringDiff("aaabb", "aaacc") expect := "aaa\n\nA: bb\n\nB: cc\n\n" @@ -113,50 +27,3 @@ func TestStringDiff(t *testing.T) { t.Errorf("diff returned %v", diff) } } - -func TestLimit(t *testing.T) { - testcases := []struct { - a interface{} - b interface{} - expectA string - expectB string - }{ - { - a: `short a`, - b: `short b`, - expectA: `"short a"`, - expectB: `"short b"`, - }, - { - a: `short a`, - b: `long b needs truncating`, - expectA: `"short a"`, - expectB: `"long b ne...`, - }, - { - a: `long a needs truncating`, - b: `long b needs truncating`, - expectA: `...g a needs ...`, - expectB: `...g b needs ...`, - }, - { - a: `long common prefix with different stuff at the end of a`, - b: `long common prefix with different stuff at the end of b`, - expectA: `...end of a"`, - expectB: `...end of b"`, - }, - { - a: `long common prefix with different stuff at the end of a`, - b: `long common prefix with different stuff at the end of b which continues`, - expectA: `...of a"`, - expectB: `...of b which...`, - }, - } - - for _, tc := range testcases { - a, b := limit(tc.a, tc.b, 10) - if a != tc.expectA || b != tc.expectB { - t.Errorf("limit(%q, %q)\n\texpected: %s, %s\n\tgot: %s, %s", tc.a, tc.b, tc.expectA, tc.expectB, a, b) - } - } -} From 76f683a8f3dc2977846e16b2ea14208a51c2cb6b Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Mon, 22 Apr 2019 21:41:46 -0700 Subject: [PATCH 169/194] vendor github.com/google/go-cmp --- Godeps/LICENSES | 35 + go.mod | 2 +- go.sum | 4 +- staging/src/k8s.io/api/go.sum | 2 + .../src/k8s.io/apiextensions-apiserver/go.sum | 3 +- staging/src/k8s.io/apimachinery/go.mod | 1 + staging/src/k8s.io/apimachinery/go.sum | 2 + staging/src/k8s.io/apiserver/go.mod | 1 - staging/src/k8s.io/apiserver/go.sum | 4 +- staging/src/k8s.io/cli-runtime/go.sum | 2 + staging/src/k8s.io/client-go/go.sum | 2 + staging/src/k8s.io/cloud-provider/go.sum | 2 + staging/src/k8s.io/cluster-bootstrap/go.sum | 2 + staging/src/k8s.io/component-base/go.sum | 2 + staging/src/k8s.io/csi-translation-lib/go.sum | 2 + staging/src/k8s.io/kube-aggregator/go.sum | 3 +- .../src/k8s.io/kube-controller-manager/go.sum | 2 + staging/src/k8s.io/kube-proxy/go.sum | 2 + staging/src/k8s.io/kube-scheduler/go.sum | 2 + staging/src/k8s.io/kubelet/go.sum | 2 + .../src/k8s.io/legacy-cloud-providers/go.sum | 2 + staging/src/k8s.io/metrics/go.sum | 2 + staging/src/k8s.io/node-api/go.sum | 2 + staging/src/k8s.io/sample-apiserver/go.sum | 3 +- staging/src/k8s.io/sample-cli-plugin/go.sum | 2 + staging/src/k8s.io/sample-controller/go.sum | 2 + vendor/BUILD | 1 + vendor/github.com/google/go-cmp/LICENSE | 27 + vendor/github.com/google/go-cmp/cmp/BUILD | 46 ++ .../github.com/google/go-cmp/cmp/compare.go | 616 ++++++++++++++++++ .../google/go-cmp/cmp/export_panic.go | 15 + .../google/go-cmp/cmp/export_unsafe.go | 23 + .../google/go-cmp/cmp/internal/diff/BUILD | 26 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 ++++ .../google/go-cmp/cmp/internal/diff/diff.go | 372 +++++++++++ .../google/go-cmp/cmp/internal/flags/BUILD | 27 + .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../google/go-cmp/cmp/internal/function/BUILD | 23 + .../go-cmp/cmp/internal/function/func.go | 99 +++ .../google/go-cmp/cmp/internal/value/BUILD | 27 + .../cmp/internal/value/pointer_purego.go | 23 + .../cmp/internal/value/pointer_unsafe.go | 26 + .../google/go-cmp/cmp/internal/value/sort.go | 104 +++ .../google/go-cmp/cmp/internal/value/zero.go | 45 ++ .../github.com/google/go-cmp/cmp/options.go | 524 +++++++++++++++ vendor/github.com/google/go-cmp/cmp/path.go | 308 +++++++++ vendor/github.com/google/go-cmp/cmp/report.go | 51 ++ .../google/go-cmp/cmp/report_compare.go | 296 +++++++++ .../google/go-cmp/cmp/report_reflect.go | 279 ++++++++ .../google/go-cmp/cmp/report_slices.go | 333 ++++++++++ .../google/go-cmp/cmp/report_text.go | 382 +++++++++++ .../google/go-cmp/cmp/report_value.go | 121 ++++ vendor/modules.txt | 6 + 56 files changed, 4049 insertions(+), 9 deletions(-) create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/BUILD create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/BUILD create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/BUILD create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/BUILD create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/BUILD create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 99551d040ca..7a2f270670e 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -9400,6 +9400,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/google/go-cmp licensed under: = + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/google/go-cmp/LICENSE 4ac66f7dea41d8d116cb7fb28aeff2ab +================================================================================ + + ================================================================================ = vendor/github.com/google/gofuzz licensed under: = diff --git a/go.mod b/go.mod index acd4a7d9249..747e8871346 100644 --- a/go.mod +++ b/go.mod @@ -286,7 +286,7 @@ replace ( github.com/google/btree => github.com/google/btree v0.0.0-20160524151835-7d79101e329e github.com/google/cadvisor => github.com/google/cadvisor v0.33.2-0.20190411163913-9db8c7dee20a github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.21 - github.com/google/go-cmp => github.com/google/go-cmp v0.2.0 + github.com/google/go-cmp => github.com/google/go-cmp v0.3.0 github.com/google/gofuzz => github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/google/uuid => github.com/google/uuid v1.0.0 github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d diff --git a/go.sum b/go.sum index 35416ab20c2..9f14ece4d94 100644 --- a/go.sum +++ b/go.sum @@ -177,8 +177,8 @@ github.com/google/cadvisor v0.33.2-0.20190411163913-9db8c7dee20a h1:4N3IykedbdXG github.com/google/cadvisor v0.33.2-0.20190411163913-9db8c7dee20a/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index 97da973bff2..77466c65fe6 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -9,6 +9,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 73a51fdeec6..de2364aeb5d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -85,7 +85,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index 9452d2bad6b..b47358be679 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -12,6 +12,7 @@ require ( github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 github.com/golang/protobuf v1.2.0 + github.com/google/go-cmp v0.3.0 github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/google/uuid v1.0.0 github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index fd89f50abad..7a547e60e59 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -14,6 +14,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieF github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index 58dad118855..5ca2fb7d7f4 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -24,7 +24,6 @@ require ( github.com/go-openapi/swag v0.17.2 // indirect github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/google/go-cmp v0.2.0 // indirect github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c // indirect diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 3db1319429a..e2913a4526c 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -62,8 +62,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index 593d5ff209e..82c5e91fad2 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -35,6 +35,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 1de81bd91bd..f81aceabd2d 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -22,6 +22,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index fa7acb50662..72bcc2914e3 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -15,6 +15,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 9d012777040..44ad19d7d2f 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -8,6 +8,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCP github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index 70cb47ea1ff..4ef3cbf6e3e 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -13,6 +13,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index 9999e119937..71180aad28e 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -12,6 +12,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index c7f2831571b..41a22c8c14a 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -62,7 +62,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 52c9218ed97..f0923677eb0 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -10,6 +10,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCP github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 52c9218ed97..f0923677eb0 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -10,6 +10,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCP github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 52c9218ed97..f0923677eb0 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -10,6 +10,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCP github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 9d012777040..44ad19d7d2f 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -8,6 +8,8 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCP github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum index 1f0689dd625..66ef7b4cff1 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.sum +++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum @@ -29,6 +29,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index 6883f1e3729..12f4f6d1ec1 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -16,6 +16,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/node-api/go.sum b/staging/src/k8s.io/node-api/go.sum index 0ad3b265c51..f08159a0702 100644 --- a/staging/src/k8s.io/node-api/go.sum +++ b/staging/src/k8s.io/node-api/go.sum @@ -17,6 +17,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index 6f93a29faa0..3830d03eeac 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -60,7 +60,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index 593d5ff209e..82c5e91fad2 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -35,6 +35,8 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index 75b80670838..1a93cf22765 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -17,6 +17,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/BUILD b/vendor/BUILD index f322d363c58..2050114696d 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -249,6 +249,7 @@ filegroup( "//vendor/github.com/google/cadvisor/watcher:all-srcs", "//vendor/github.com/google/cadvisor/zfs:all-srcs", "//vendor/github.com/google/certificate-transparency-go:all-srcs", + "//vendor/github.com/google/go-cmp/cmp:all-srcs", "//vendor/github.com/google/gofuzz:all-srcs", "//vendor/github.com/google/uuid:all-srcs", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:all-srcs", diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 00000000000..32017f8fa1d --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/BUILD b/vendor/github.com/google/go-cmp/cmp/BUILD new file mode 100644 index 00000000000..8e3422c8e8e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/BUILD @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "compare.go", + "export_unsafe.go", + "options.go", + "path.go", + "report.go", + "report_compare.go", + "report_reflect.go", + "report_slices.go", + "report_text.go", + "report_value.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/google/go-cmp/cmp", + importpath = "github.com/google/go-cmp/cmp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/google/go-cmp/cmp/internal/diff:go_default_library", + "//vendor/github.com/google/go-cmp/cmp/internal/flags:go_default_library", + "//vendor/github.com/google/go-cmp/cmp/internal/function:go_default_library", + "//vendor/github.com/google/go-cmp/cmp/internal/value:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/google/go-cmp/cmp/internal/diff:all-srcs", + "//vendor/github.com/google/go-cmp/cmp/internal/flags:all-srcs", + "//vendor/github.com/google/go-cmp/cmp/internal/function:all-srcs", + "//vendor/github.com/google/go-cmp/cmp/internal/value:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000000..2133562b01c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000000..abc3a1c3e76 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000000..59d4ee91b47 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/BUILD b/vendor/github.com/google/go-cmp/cmp/internal/diff/BUILD new file mode 100644 index 00000000000..57276db7160 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "debug_disable.go", + "diff.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/google/go-cmp/cmp/internal/diff", + importpath = "github.com/google/go-cmp/cmp/internal/diff", + visibility = ["//vendor/github.com/google/go-cmp/cmp:__subpackages__"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000000..fe98dcc6774 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000000..597b6ae56b1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000000..3d2e42662ca --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/BUILD b/vendor/github.com/google/go-cmp/cmp/internal/flags/BUILD new file mode 100644 index 00000000000..f447972d225 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "flags.go", + "toolchain_legacy.go", + "toolchain_recent.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/google/go-cmp/cmp/internal/flags", + importpath = "github.com/google/go-cmp/cmp/internal/flags", + visibility = ["//vendor/github.com/google/go-cmp/cmp:__subpackages__"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000000..a9e7fc0b5b3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000000..01aed0a1532 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000000..c0b667f58b0 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/BUILD b/vendor/github.com/google/go-cmp/cmp/internal/function/BUILD new file mode 100644 index 00000000000..c82060c6025 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["func.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/google/go-cmp/cmp/internal/function", + importpath = "github.com/google/go-cmp/cmp/internal/function", + visibility = ["//vendor/github.com/google/go-cmp/cmp:__subpackages__"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000000..ace1dbe86e5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/BUILD b/vendor/github.com/google/go-cmp/cmp/internal/value/BUILD new file mode 100644 index 00000000000..5ff7cbf2cad --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "pointer_unsafe.go", + "sort.go", + "zero.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/google/go-cmp/cmp/internal/value", + importpath = "github.com/google/go-cmp/cmp/internal/value", + visibility = ["//vendor/github.com/google/go-cmp/cmp:__subpackages__"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000000..0a01c4796f1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000000..da134ae2a80 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000000..938f646f000 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,104 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000000..d13a12ccfcd --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,45 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import "reflect" + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000000..793448160ee --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000000..96fffd291f7 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000000..6ddf29993e5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000000..05efb992c53 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000000..5521c604c54 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,279 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.AvoidStringer = true + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000000..8cb3265e767 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + lastLineIdx = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = (*prev).Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000000..80605d0e440 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,382 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name = name + "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000000..83031a7f507 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9ed0853bec4..d98b33d2a52 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -465,6 +465,12 @@ github.com/google/certificate-transparency-go/jsonclient github.com/google/certificate-transparency-go/tls github.com/google/certificate-transparency-go/x509 github.com/google/certificate-transparency-go/x509/pkix +# github.com/google/go-cmp v0.3.0 => github.com/google/go-cmp v0.3.0 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf => github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/google/gofuzz # github.com/google/uuid v1.0.0 => github.com/google/uuid v1.0.0 From 6aea7fcd8349872ef102c4317a392b5662dd6492 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Mon, 13 May 2019 18:39:55 -0700 Subject: [PATCH 170/194] Added topology translation and backward compatible access modes --- .../k8s.io/csi-translation-lib/plugins/BUILD | 5 +- .../csi-translation-lib/plugins/aws_ebs.go | 2 +- .../csi-translation-lib/plugins/gce_pd.go | 90 +++++++++- .../plugins/gce_pd_test.go | 165 ++++++++++++++++-- .../plugins/in_tree_volume.go | 4 +- .../plugins/openstack_cinder.go | 2 +- .../k8s.io/csi-translation-lib/translate.go | 11 +- 7 files changed, 252 insertions(+), 27 deletions(-) diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/BUILD b/staging/src/k8s.io/csi-translation-lib/plugins/BUILD index 5aa90077f73..88c1a91f080 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/BUILD +++ b/staging/src/k8s.io/csi-translation-lib/plugins/BUILD @@ -40,5 +40,8 @@ go_test( "gce_pd_test.go", ], embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/api/storage/v1:go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + ], ) diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go b/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go index 5d4cd74f365..6a9a8f6a6c8 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go @@ -45,7 +45,7 @@ func NewAWSElasticBlockStoreCSITranslator() InTreePlugin { } // TranslateInTreeStorageClassParametersToCSI translates InTree EBS storage class parameters to CSI storage class -func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) { +func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) { return sc, nil } diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go index 5f7b82d7d5e..95ada8d227d 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd.go @@ -33,6 +33,9 @@ const ( // GCEPDInTreePluginName is the name of the intree plugin for GCE PD GCEPDInTreePluginName = "kubernetes.io/gce-pd" + // GCEPDTopologyKey is the zonal topology key for GCE PD CSI Driver + GCEPDTopologyKey = "topology.gke.io/zone" + // Volume ID Expected Format // "projects/{projectName}/zones/{zoneName}/disks/{diskName}" volIDZonalFmt = "projects/%s/zones/%s/disks/%s" @@ -56,24 +59,104 @@ func NewGCEPersistentDiskCSITranslator() InTreePlugin { return &gcePersistentDiskCSITranslator{} } +func translateAllowedTopologies(terms []v1.TopologySelectorTerm) ([]v1.TopologySelectorTerm, error) { + if terms == nil { + return nil, nil + } + + newTopologies := []v1.TopologySelectorTerm{} + for _, term := range terms { + newTerm := v1.TopologySelectorTerm{} + for _, exp := range term.MatchLabelExpressions { + var newExp v1.TopologySelectorLabelRequirement + if exp.Key == v1.LabelZoneFailureDomain { + newExp = v1.TopologySelectorLabelRequirement{ + Key: GCEPDTopologyKey, + Values: exp.Values, + } + } else if exp.Key == GCEPDTopologyKey { + newExp = exp + } else { + return nil, fmt.Errorf("unknown topology key: %v", exp.Key) + } + newTerm.MatchLabelExpressions = append(newTerm.MatchLabelExpressions, newExp) + } + newTopologies = append(newTopologies, newTerm) + } + return newTopologies, nil +} + +func generateToplogySelectors(key string, values []string) []v1.TopologySelectorTerm { + return []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: key, + Values: values, + }, + }, + }, + } +} + // TranslateInTreeStorageClassParametersToCSI translates InTree GCE storage class parameters to CSI storage class -func (g *gcePersistentDiskCSITranslator) TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) { +func (g *gcePersistentDiskCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) { + var generatedTopologies []v1.TopologySelectorTerm + np := map[string]string{} for k, v := range sc.Parameters { switch strings.ToLower(k) { case "fstype": + // prefixed fstype parameter is stripped out by external provisioner np["csi.storage.k8s.io/fstype"] = v + // Strip out zone and zones parameters and translate them into topologies instead + case "zone": + generatedTopologies = generateToplogySelectors(GCEPDTopologyKey, []string{v}) + case "zones": + generatedTopologies = generateToplogySelectors(GCEPDTopologyKey, strings.Split(v, ",")) default: np[k] = v } } + + if len(generatedTopologies) > 0 && len(sc.AllowedTopologies) > 0 { + return nil, fmt.Errorf("cannot simultaneously set allowed topologies and zone/zones parameters") + } else if len(generatedTopologies) > 0 { + sc.AllowedTopologies = generatedTopologies + } else if len(sc.AllowedTopologies) > 0 { + newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies) + if err != nil { + return nil, fmt.Errorf("failed translating allowed topologies: %v", err) + } + sc.AllowedTopologies = newTopologies + } + sc.Parameters = np - // TODO(#77235): Translate AccessModes and zone/zones to AccessibleTopologies - return sc, nil } +// backwardCompatibleAccessModes translates all instances of ReadWriteMany +// access mode from the in-tree plugin to ReadWriteOnce. This is because in-tree +// plugin never supported ReadWriteMany but also did not validate or enforce +// this access mode for pre-provisioned volumes. The GCE PD CSI Driver validates +// and enforces (fails) ReadWriteMany. Therefore we treat all in-tree +// ReadWriteMany as ReadWriteOnce volumes to not break legacy volumes. +func backwardCompatibleAccessModes(ams []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + if ams == nil { + return nil + } + newAM := []v1.PersistentVolumeAccessMode{} + for _, am := range ams { + if am == v1.ReadWriteMany { + newAM = append(newAM, v1.ReadWriteOnce) + } else { + newAM = append(newAM, am) + } + } + return newAM +} + // TranslateInTreePVToCSI takes a PV with GCEPersistentDisk set from in-tree // and converts the GCEPersistentDisk source to a CSIPersistentVolumeSource func (g *gcePersistentDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { @@ -119,6 +202,7 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.Persisten pv.Spec.PersistentVolumeSource.GCEPersistentDisk = nil pv.Spec.PersistentVolumeSource.CSI = csiSource + pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes) return pv, nil } diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go index e5d970fb220..cd64ec92275 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/gce_pd_test.go @@ -24,49 +24,186 @@ import ( storage "k8s.io/api/storage/v1" ) -func NewStorageClass(params map[string]string) storage.StorageClass { - return storage.StorageClass{ - Parameters: params, +func NewStorageClass(params map[string]string, allowedTopologies []v1.TopologySelectorTerm) *storage.StorageClass { + return &storage.StorageClass{ + Parameters: params, + AllowedTopologies: allowedTopologies, } } -func TestTranslatePDInTreeVolumeOptionsToCSI(t *testing.T) { +func TestTranslatePDInTreeStorageClassToCSI(t *testing.T) { g := NewGCEPersistentDiskCSITranslator() tcs := []struct { name string - options storage.StorageClass - expOptions storage.StorageClass + options *storage.StorageClass + expOptions *storage.StorageClass + expErr bool }{ { name: "nothing special", - options: NewStorageClass(map[string]string{"foo": "bar"}), - expOptions: NewStorageClass(map[string]string{"foo": "bar"}), + options: NewStorageClass(map[string]string{"foo": "bar"}, nil), + expOptions: NewStorageClass(map[string]string{"foo": "bar"}, nil), }, { name: "fstype", - options: NewStorageClass(map[string]string{"fstype": "myfs"}), - expOptions: NewStorageClass(map[string]string{"csi.storage.k8s.io/fstype": "myfs"}), + options: NewStorageClass(map[string]string{"fstype": "myfs"}, nil), + expOptions: NewStorageClass(map[string]string{"csi.storage.k8s.io/fstype": "myfs"}, nil), }, { name: "empty params", - options: NewStorageClass(map[string]string{}), - expOptions: NewStorageClass(map[string]string{}), + options: NewStorageClass(map[string]string{}, nil), + expOptions: NewStorageClass(map[string]string{}, nil), + }, + { + name: "zone", + options: NewStorageClass(map[string]string{"zone": "foo"}, nil), + expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(GCEPDTopologyKey, []string{"foo"})), + }, + { + name: "zones", + options: NewStorageClass(map[string]string{"zones": "foo,bar,baz"}, nil), + expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(GCEPDTopologyKey, []string{"foo", "bar", "baz"})), + }, + { + name: "some normal topology", + options: NewStorageClass(map[string]string{}, generateToplogySelectors(GCEPDTopologyKey, []string{"foo"})), + expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(GCEPDTopologyKey, []string{"foo"})), + }, + { + name: "some translated topology", + options: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelZoneFailureDomain, []string{"foo"})), + expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(GCEPDTopologyKey, []string{"foo"})), + }, + { + name: "zone and topology", + options: NewStorageClass(map[string]string{"zone": "foo"}, generateToplogySelectors(GCEPDTopologyKey, []string{"foo"})), + expErr: true, }, } for _, tc := range tcs { t.Logf("Testing %v", tc.name) - gotOptions, err := g.TranslateInTreeVolumeOptionsToCSI(tc.options) - if err != nil { + gotOptions, err := g.TranslateInTreeStorageClassToCSI(tc.options) + if err != nil && !tc.expErr { t.Errorf("Did not expect error but got: %v", err) } + if err == nil && tc.expErr { + t.Errorf("Expected error, but did not get one.") + } if !reflect.DeepEqual(gotOptions, tc.expOptions) { t.Errorf("Got parameters: %v, expected :%v", gotOptions, tc.expOptions) } } } +func TestTranslateAllowedTopologies(t *testing.T) { + testCases := []struct { + name string + topology []v1.TopologySelectorTerm + expectedToplogy []v1.TopologySelectorTerm + expErr bool + }{ + { + name: "no translation", + topology: generateToplogySelectors(GCEPDTopologyKey, []string{"foo", "bar"}), + expectedToplogy: []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: GCEPDTopologyKey, + Values: []string{"foo", "bar"}, + }, + }, + }, + }, + }, + { + name: "translate", + topology: []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: "failure-domain.beta.kubernetes.io/zone", + Values: []string{"foo", "bar"}, + }, + }, + }, + }, + expectedToplogy: []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: GCEPDTopologyKey, + Values: []string{"foo", "bar"}, + }, + }, + }, + }, + }, + { + name: "combo", + topology: []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: "failure-domain.beta.kubernetes.io/zone", + Values: []string{"foo", "bar"}, + }, + { + Key: GCEPDTopologyKey, + Values: []string{"boo", "baz"}, + }, + }, + }, + }, + expectedToplogy: []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: GCEPDTopologyKey, + Values: []string{"foo", "bar"}, + }, + { + Key: GCEPDTopologyKey, + Values: []string{"boo", "baz"}, + }, + }, + }, + }, + }, + { + name: "some other key", + topology: []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: "test", + Values: []string{"foo", "bar"}, + }, + }, + }, + }, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Logf("Running test: %v", tc.name) + gotTop, err := translateAllowedTopologies(tc.topology) + if err != nil && !tc.expErr { + t.Errorf("Did not expect an error, got: %v", err) + } + if err == nil && tc.expErr { + t.Errorf("Expected an error but did not get one") + } + + if !reflect.DeepEqual(gotTop, tc.expectedToplogy) { + t.Errorf("Expected topology: %v, but got: %v", tc.expectedToplogy, gotTop) + } + } +} + func TestBackwardCompatibleAccessModes(t *testing.T) { testCases := []struct { name string diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go b/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go index 29a0ef5f8cd..d50316743c9 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go @@ -24,9 +24,9 @@ import ( // InTreePlugin handles translations between CSI and in-tree sources in a PV type InTreePlugin interface { - // TranslateInTreeVolumeOptionsToCSI takes in-tree volume options + // TranslateInTreeStorageClassToCSI takes in-tree volume options // and translates them to a volume options consumable by CSI plugin - TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) + TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) // TranslateInTreePVToCSI takes a persistent volume and will translate // the in-tree source to a CSI Source. The input persistent volume can be modified diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go b/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go index 5b22b5e6951..de283453a4b 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/openstack_cinder.go @@ -41,7 +41,7 @@ func NewOpenStackCinderCSITranslator() InTreePlugin { } // TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class -func (t *osCinderCSITranslator) TranslateInTreeVolumeOptionsToCSI(sc storage.StorageClass) (storage.StorageClass, error) { +func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) { return sc, nil } diff --git a/staging/src/k8s.io/csi-translation-lib/translate.go b/staging/src/k8s.io/csi-translation-lib/translate.go index d22c0c2c619..a13ac422df2 100644 --- a/staging/src/k8s.io/csi-translation-lib/translate.go +++ b/staging/src/k8s.io/csi-translation-lib/translate.go @@ -33,15 +33,16 @@ var ( } ) -// TranslateInTreeVolumeOptionsToCSI takes in-tree volume options -// and translates them to a set of parameters consumable by CSI plugin -func TranslateInTreeVolumeOptionsToCSI(inTreePluginName string, sc storage.StorageClass) (storage.StorageClass, error) { +// TranslateInTreeStorageClassToCSI takes in-tree Storage Class +// and translates it to a set of parameters consumable by CSI plugin +func TranslateInTreeStorageClassToCSI(inTreePluginName string, sc *storage.StorageClass) (*storage.StorageClass, error) { + newSC := sc.DeepCopy() for _, curPlugin := range inTreePlugins { if inTreePluginName == curPlugin.GetInTreePluginName() { - return curPlugin.TranslateInTreeVolumeOptionsToCSI(sc) + return curPlugin.TranslateInTreeStorageClassToCSI(newSC) } } - return storage.StorageClass{}, fmt.Errorf("could not find in-tree storage class parameter translation logic for %#v", inTreePluginName) + return nil, fmt.Errorf("could not find in-tree storage class parameter translation logic for %#v", inTreePluginName) } // TranslateInTreePVToCSI takes a persistent volume and will translate From bf09f4ba4f955cd1d0b164b6eb4749f3de9d7f68 Mon Sep 17 00:00:00 2001 From: Erick Fejta Date: Mon, 13 May 2019 23:31:16 -0700 Subject: [PATCH 171/194] Update bazel-toolschains for bazel <= 0.25.1 rbe support --- build/root/WORKSPACE | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 45f1ff382b6..632f765420d 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -1,13 +1,15 @@ +workspace(name = "io_k8s_kubernetes") + load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("//build:workspace_mirror.bzl", "mirror") http_archive( name = "bazel_toolchains", - sha256 = "f5acacb61693e00c993dbe3357cb4eb71eb49c6ed1e8b11215cef8738c7674cb", - strip_prefix = "bazel-toolchains-997c10a", + sha256 = "3a6ffe6dd91ee975f5d5bc5c50b34f58e3881dfac59a7b7aba3323bd8f8571a8", + strip_prefix = "bazel-toolchains-92dd8a7", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/997c10a.tar.gz", - "https://github.com/bazelbuild/bazel-toolchains/archive/997c10a.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/92dd8a7.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/archive/92dd8a7.tar.gz", ], ) From 7d62316f8f0448e0e05389d883c816715d696484 Mon Sep 17 00:00:00 2001 From: Mike Crute Date: Mon, 13 May 2019 14:55:54 -0700 Subject: [PATCH 172/194] Migrate fake cloud provider to staging --- hack/.golint_failures | 1 - pkg/cloudprovider/providers/BUILD | 1 - pkg/controller/cloud/BUILD | 2 +- pkg/controller/cloud/node_controller_test.go | 22 +-- .../cloud/node_lifecycle_controller_test.go | 26 ++-- pkg/controller/route/BUILD | 2 +- pkg/controller/route/route_controller_test.go | 6 +- pkg/controller/service/BUILD | 2 +- .../service/service_controller_test.go | 20 +-- pkg/kubelet/cloudresource/BUILD | 2 +- .../cloud_request_manager_test.go | 6 +- pkg/kubelet/nodestatus/BUILD | 2 +- pkg/kubelet/nodestatus/setters_test.go | 4 +- pkg/volume/azure_file/BUILD | 2 +- pkg/volume/azure_file/azure_file_test.go | 4 +- pkg/volume/vsphere_volume/BUILD | 2 +- .../vsphere_volume/vsphere_volume_test.go | 4 +- staging/src/k8s.io/cloud-provider/BUILD | 1 + .../src/k8s.io/cloud-provider}/fake/BUILD | 3 +- .../src/k8s.io/cloud-provider}/fake/doc.go | 2 +- .../src/k8s.io/cloud-provider}/fake/fake.go | 138 ++++++++++-------- test/integration/serving/BUILD | 2 +- test/integration/serving/serving_test.go | 4 +- test/integration/volume/BUILD | 2 +- test/integration/volume/attach_detach_test.go | 4 +- .../volume/persistent_volumes_test.go | 4 +- vendor/modules.txt | 1 + 27 files changed, 146 insertions(+), 123 deletions(-) rename {pkg/cloudprovider/providers => staging/src/k8s.io/cloud-provider}/fake/BUILD (86%) rename {pkg/cloudprovider/providers => staging/src/k8s.io/cloud-provider}/fake/doc.go (90%) rename {pkg/cloudprovider/providers => staging/src/k8s.io/cloud-provider}/fake/fake.go (55%) diff --git a/hack/.golint_failures b/hack/.golint_failures index 786e60789cc..0b5ec8e5fa9 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -50,7 +50,6 @@ pkg/apis/rbac/validation pkg/apis/storage pkg/apis/storage/v1 pkg/apis/storage/v1beta1 -pkg/cloudprovider/providers/fake pkg/cloudprovider/providers/photon pkg/controller pkg/controller/apis/config/v1alpha1 diff --git a/pkg/cloudprovider/providers/BUILD b/pkg/cloudprovider/providers/BUILD index fd83ea439e8..66dc0a30f05 100644 --- a/pkg/cloudprovider/providers/BUILD +++ b/pkg/cloudprovider/providers/BUILD @@ -36,7 +36,6 @@ filegroup( srcs = [ ":package-srcs", "//pkg/cloudprovider/providers/cloudstack:all-srcs", - "//pkg/cloudprovider/providers/fake:all-srcs", "//pkg/cloudprovider/providers/openstack:all-srcs", "//pkg/cloudprovider/providers/ovirt:all-srcs", "//pkg/cloudprovider/providers/photon:all-srcs", diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 4ff7b17aad8..73d3a0948d2 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -46,7 +46,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/testutil:go_default_library", "//pkg/kubelet/apis:go_default_library", @@ -60,6 +59,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index f78b2f73c87..95c22911829 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/tools/record" cloudprovider "k8s.io/cloud-provider" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/testutil" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -135,7 +135,7 @@ func TestEnsureNodeExistsByProviderID(t *testing.T) { for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { - fc := &fakecloud.FakeCloud{ + fc := &fakecloud.Cloud{ ExistsByProviderID: tc.existsByProviderID, Err: tc.nodeNameErr, ErrByProviderID: tc.providerIDErr, @@ -199,7 +199,7 @@ func TestNodeInitialized(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{ types.NodeName("node0"): "t1.micro", }, @@ -264,7 +264,7 @@ func TestNodeIgnored(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{ types.NodeName("node0"): "t1.micro", }, @@ -336,7 +336,7 @@ func TestGCECondition(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{ types.NodeName("node0"): "t1.micro", }, @@ -421,7 +421,7 @@ func TestZoneInitialized(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{ types.NodeName("node0"): "t1.micro", }, @@ -511,7 +511,7 @@ func TestNodeAddresses(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{}, Addresses: []v1.NodeAddress{ { @@ -624,7 +624,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{ types.NodeName("node0"): "t1.micro", types.NodeName("node0.aws.12345"): "t2.macro", @@ -839,7 +839,7 @@ func TestNodeAddressesNotUpdate(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{}, Addresses: []v1.NodeAddress{ { @@ -914,7 +914,7 @@ func TestNodeProviderID(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{}, Addresses: []v1.NodeAddress{ { @@ -997,7 +997,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) { factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) - fakeCloud := &fakecloud.FakeCloud{ + fakeCloud := &fakecloud.Cloud{ InstanceTypes: map[types.NodeName]string{}, Addresses: []v1.NodeAddress{ { diff --git a/pkg/controller/cloud/node_lifecycle_controller_test.go b/pkg/controller/cloud/node_lifecycle_controller_test.go index c6e395274c5..67685f36be9 100644 --- a/pkg/controller/cloud/node_lifecycle_controller_test.go +++ b/pkg/controller/cloud/node_lifecycle_controller_test.go @@ -30,8 +30,8 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/klog" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/pkg/controller/testutil" ) @@ -39,7 +39,7 @@ func Test_NodesDeleted(t *testing.T) { testcases := []struct { name string fnh *testutil.FakeNodeHandler - fakeCloud *fakecloud.FakeCloud + fakeCloud *fakecloud.Cloud deleteNodes []*v1.Node }{ { @@ -66,7 +66,7 @@ func Test_NodesDeleted(t *testing.T) { DeletedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ ExistsByProviderID: false, }, deleteNodes: []*v1.Node{ @@ -100,7 +100,7 @@ func Test_NodesDeleted(t *testing.T) { DeletedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ ExistsByProviderID: false, ErrByProviderID: errors.New("err!"), }, @@ -133,7 +133,7 @@ func Test_NodesDeleted(t *testing.T) { DeletedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ ExistsByProviderID: true, }, deleteNodes: []*v1.Node{}, @@ -162,7 +162,7 @@ func Test_NodesDeleted(t *testing.T) { DeletedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ ExistsByProviderID: false, }, deleteNodes: []*v1.Node{ @@ -193,7 +193,7 @@ func Test_NodesDeleted(t *testing.T) { DeletedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ NodeShutdown: false, ExistsByProviderID: true, ExtID: map[types.NodeName]string{ @@ -229,7 +229,7 @@ func Test_NodesDeleted(t *testing.T) { DeletedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ ExistsByProviderID: false, }, deleteNodes: []*v1.Node{}, @@ -270,7 +270,7 @@ func Test_NodesShutdown(t *testing.T) { testcases := []struct { name string fnh *testutil.FakeNodeHandler - fakeCloud *fakecloud.FakeCloud + fakeCloud *fakecloud.Cloud updatedNodes []*v1.Node }{ { @@ -297,7 +297,7 @@ func Test_NodesShutdown(t *testing.T) { UpdatedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ NodeShutdown: true, ErrShutdownByProviderID: nil, }, @@ -349,7 +349,7 @@ func Test_NodesShutdown(t *testing.T) { UpdatedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ NodeShutdown: false, ErrShutdownByProviderID: errors.New("err!"), }, @@ -379,7 +379,7 @@ func Test_NodesShutdown(t *testing.T) { UpdatedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ NodeShutdown: false, ErrShutdownByProviderID: nil, }, @@ -409,7 +409,7 @@ func Test_NodesShutdown(t *testing.T) { UpdatedNodes: []*v1.Node{}, Clientset: fake.NewSimpleClientset(), }, - fakeCloud: &fakecloud.FakeCloud{ + fakeCloud: &fakecloud.Cloud{ NodeShutdown: true, ErrShutdownByProviderID: nil, }, diff --git a/pkg/controller/route/BUILD b/pkg/controller/route/BUILD index f74931ae354..accb9a40b07 100644 --- a/pkg/controller/route/BUILD +++ b/pkg/controller/route/BUILD @@ -42,7 +42,6 @@ go_test( srcs = ["route_controller_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -52,6 +51,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", ], ) diff --git a/pkg/controller/route/route_controller_test.go b/pkg/controller/route/route_controller_test.go index 4283779546b..89148dfc01d 100644 --- a/pkg/controller/route/route_controller_test.go +++ b/pkg/controller/route/route_controller_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" cloudprovider "k8s.io/cloud-provider" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/controller" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" ) @@ -226,9 +226,9 @@ func TestReconcile(t *testing.T) { }, } for i, testCase := range testCases { - cloud := &fakecloud.FakeCloud{RouteMap: make(map[string]*fakecloud.FakeRoute)} + cloud := &fakecloud.Cloud{RouteMap: make(map[string]*fakecloud.Route)} for _, route := range testCase.initialRoutes { - fakeRoute := &fakecloud.FakeRoute{} + fakeRoute := &fakecloud.Route{} fakeRoute.ClusterName = cluster fakeRoute.Route = *route cloud.RouteMap[route.Name] = fakeRoute diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index 744d36d85c0..e679bad8ec2 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -43,7 +43,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/api/testapi:go_default_library", - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -54,6 +53,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", ], ) diff --git a/pkg/controller/service/service_controller_test.go b/pkg/controller/service/service_controller_test.go index 78d9e783bbd..088d3389259 100644 --- a/pkg/controller/service/service_controller_test.go +++ b/pkg/controller/service/service_controller_test.go @@ -32,8 +32,8 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/api/testapi" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/pkg/controller" ) @@ -52,8 +52,8 @@ func defaultExternalService() *v1.Service { func alwaysReady() bool { return true } -func newController() (*ServiceController, *fakecloud.FakeCloud, *fake.Clientset) { - cloud := &fakecloud.FakeCloud{} +func newController() (*ServiceController, *fakecloud.Cloud, *fake.Clientset) { + cloud := &fakecloud.Cloud{} cloud.Region = region client := fake.NewSimpleClientset() @@ -166,7 +166,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) { t.Errorf("unexpected client actions: %v", actions) } } else { - var balancer *fakecloud.FakeBalancer + var balancer *fakecloud.Balancer for k := range cloud.Balancers { if balancer == nil { b := cloud.Balancers[k] @@ -205,7 +205,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { } table := []struct { services []*v1.Service - expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall + expectedUpdateCalls []fakecloud.UpdateBalancerCall }{ { // No services present: no calls should be made. @@ -225,7 +225,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { services: []*v1.Service{ newService("s0", "333", v1.ServiceTypeLoadBalancer), }, - expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ {Service: newService("s0", "333", v1.ServiceTypeLoadBalancer), Hosts: nodes}, }, }, @@ -236,7 +236,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { newService("s1", "555", v1.ServiceTypeLoadBalancer), newService("s2", "666", v1.ServiceTypeLoadBalancer), }, - expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ {Service: newService("s0", "444", v1.ServiceTypeLoadBalancer), Hosts: nodes}, {Service: newService("s1", "555", v1.ServiceTypeLoadBalancer), Hosts: nodes}, {Service: newService("s2", "666", v1.ServiceTypeLoadBalancer), Hosts: nodes}, @@ -250,7 +250,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { newService("s3", "999", v1.ServiceTypeLoadBalancer), newService("s4", "123", v1.ServiceTypeClusterIP), }, - expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ {Service: newService("s1", "888", v1.ServiceTypeLoadBalancer), Hosts: nodes}, {Service: newService("s3", "999", v1.ServiceTypeLoadBalancer), Hosts: nodes}, }, @@ -261,7 +261,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { newService("s0", "234", v1.ServiceTypeLoadBalancer), nil, }, - expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ {Service: newService("s0", "234", v1.ServiceTypeLoadBalancer), Hosts: nodes}, }, }, @@ -527,7 +527,7 @@ func TestSyncService(t *testing.T) { func TestProcessServiceDeletion(t *testing.T) { var controller *ServiceController - var cloud *fakecloud.FakeCloud + var cloud *fakecloud.Cloud // Add a global svcKey name svcKey := "external-balancer" diff --git a/pkg/kubelet/cloudresource/BUILD b/pkg/kubelet/cloudresource/BUILD index 9c539ff6741..8de49d6aca8 100644 --- a/pkg/kubelet/cloudresource/BUILD +++ b/pkg/kubelet/cloudresource/BUILD @@ -19,9 +19,9 @@ go_test( srcs = ["cloud_request_manager_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider/providers/fake:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", ], ) diff --git a/pkg/kubelet/cloudresource/cloud_request_manager_test.go b/pkg/kubelet/cloudresource/cloud_request_manager_test.go index e6d829e8447..9c74d737b79 100644 --- a/pkg/kubelet/cloudresource/cloud_request_manager_test.go +++ b/pkg/kubelet/cloudresource/cloud_request_manager_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + "k8s.io/cloud-provider/fake" ) func createNodeInternalIPAddress(address string) []v1.NodeAddress { @@ -38,7 +38,7 @@ func createNodeInternalIPAddress(address string) []v1.NodeAddress { func TestNodeAddressesDelay(t *testing.T) { syncPeriod := 100 * time.Millisecond - cloud := &fake.FakeCloud{ + cloud := &fake.Cloud{ Addresses: createNodeInternalIPAddress("10.0.1.12"), // Set the request delay so the manager timeouts and collects the node addresses later RequestDelay: 200 * time.Millisecond, @@ -82,7 +82,7 @@ func TestNodeAddressesDelay(t *testing.T) { } func TestNodeAddressesUsesLastSuccess(t *testing.T) { - cloud := &fake.FakeCloud{} + cloud := &fake.Cloud{} manager := NewSyncManager(cloud, "defaultNode", 0).(*cloudResourceSyncManager) // These tests are stateful and order dependant. diff --git a/pkg/kubelet/nodestatus/BUILD b/pkg/kubelet/nodestatus/BUILD index a82737108f8..393c858dcec 100644 --- a/pkg/kubelet/nodestatus/BUILD +++ b/pkg/kubelet/nodestatus/BUILD @@ -46,7 +46,6 @@ go_test( srcs = ["setters_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", @@ -62,6 +61,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index 6a3e73cd9d0..f00d68e3ee8 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/uuid" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -249,7 +249,7 @@ func TestNodeAddress(t *testing.T) { } hostname := testKubeletHostname externalCloudProvider := false - cloud := &fakecloud.FakeCloud{ + cloud := &fakecloud.Cloud{ Addresses: testCase.nodeAddresses, Err: nil, } diff --git a/pkg/volume/azure_file/BUILD b/pkg/volume/azure_file/BUILD index 999850dc493..30bfee3d12e 100644 --- a/pkg/volume/azure_file/BUILD +++ b/pkg/volume/azure_file/BUILD @@ -38,7 +38,6 @@ go_test( srcs = ["azure_file_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", @@ -46,6 +45,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", ], diff --git a/pkg/volume/azure_file/azure_file_test.go b/pkg/volume/azure_file/azure_file_test.go index 80cd1d62e64..1c6a35cdbad 100644 --- a/pkg/volume/azure_file/azure_file_test.go +++ b/pkg/volume/azure_file/azure_file_test.go @@ -30,7 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -119,7 +119,7 @@ func TestPluginWithoutCloudProvider(t *testing.T) { func TestPluginWithOtherCloudProvider(t *testing.T) { tmpDir := getTestTempDir(t) defer os.RemoveAll(tmpDir) - cloud := &fakecloud.FakeCloud{} + cloud := &fakecloud.Cloud{} testPlugin(t, tmpDir, volumetest.NewFakeVolumeHostWithCloudProvider(tmpDir, nil, nil, cloud)) } diff --git a/pkg/volume/vsphere_volume/BUILD b/pkg/volume/vsphere_volume/BUILD index 1d07f3a363d..1263202752d 100644 --- a/pkg/volume/vsphere_volume/BUILD +++ b/pkg/volume/vsphere_volume/BUILD @@ -45,7 +45,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", @@ -54,6 +53,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/vsphere:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/pkg/volume/vsphere_volume/vsphere_volume_test.go b/pkg/volume/vsphere_volume/vsphere_volume_test.go index 80706652d87..0bae75180fc 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_test.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" cloudprovider "k8s.io/cloud-provider" - "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -203,7 +203,7 @@ func TestUnsupportedCloudProvider(t *testing.T) { }{ {name: "nil cloudprovider", cloudProvider: nil}, {name: "vSphere", cloudProvider: &vsphere.VSphere{}, success: true}, - {name: "fake cloudprovider", cloudProvider: &fake.FakeCloud{}}, + {name: "fake cloudprovider", cloudProvider: &fake.Cloud{}}, } for _, tc := range testcases { diff --git a/staging/src/k8s.io/cloud-provider/BUILD b/staging/src/k8s.io/cloud-provider/BUILD index 18b3c718dae..9bb94000886 100644 --- a/staging/src/k8s.io/cloud-provider/BUILD +++ b/staging/src/k8s.io/cloud-provider/BUILD @@ -35,6 +35,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/cloud-provider/fake:all-srcs", "//staging/src/k8s.io/cloud-provider/node:all-srcs", "//staging/src/k8s.io/cloud-provider/service/helpers:all-srcs", "//staging/src/k8s.io/cloud-provider/volume:all-srcs", diff --git a/pkg/cloudprovider/providers/fake/BUILD b/staging/src/k8s.io/cloud-provider/fake/BUILD similarity index 86% rename from pkg/cloudprovider/providers/fake/BUILD rename to staging/src/k8s.io/cloud-provider/fake/BUILD index baa744df6ed..8651834c641 100644 --- a/pkg/cloudprovider/providers/fake/BUILD +++ b/staging/src/k8s.io/cloud-provider/fake/BUILD @@ -11,7 +11,8 @@ go_library( "doc.go", "fake.go", ], - importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/fake", + importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/fake", + importpath = "k8s.io/cloud-provider/fake", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/cloudprovider/providers/fake/doc.go b/staging/src/k8s.io/cloud-provider/fake/doc.go similarity index 90% rename from pkg/cloudprovider/providers/fake/doc.go rename to staging/src/k8s.io/cloud-provider/fake/doc.go index d550c26c2d5..c5761fe0d1f 100644 --- a/pkg/cloudprovider/providers/fake/doc.go +++ b/staging/src/k8s.io/cloud-provider/fake/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package fake is a test-double implementation of cloudprovider // Interface, LoadBalancer and Instances. It is useful for testing. -package fake // import "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" +package fake // import "k8s.io/cloud-provider/fake" diff --git a/pkg/cloudprovider/providers/fake/fake.go b/staging/src/k8s.io/cloud-provider/fake/fake.go similarity index 55% rename from pkg/cloudprovider/providers/fake/fake.go rename to staging/src/k8s.io/cloud-provider/fake/fake.go index 01cec8a75a3..854a10380cd 100644 --- a/pkg/cloudprovider/providers/fake/fake.go +++ b/staging/src/k8s.io/cloud-provider/fake/fake.go @@ -31,8 +31,8 @@ import ( const defaultProviderName = "fake" -// FakeBalancer is a fake storage of balancer information -type FakeBalancer struct { +// Balancer is a fake storage of balancer information +type Balancer struct { Name string Region string LoadBalancerIP string @@ -40,21 +40,22 @@ type FakeBalancer struct { Hosts []*v1.Node } -type FakeUpdateBalancerCall struct { +// UpdateBalancerCall represents a fake call to update load balancers +type UpdateBalancerCall struct { Service *v1.Service Hosts []*v1.Node } -var _ cloudprovider.Interface = (*FakeCloud)(nil) -var _ cloudprovider.Instances = (*FakeCloud)(nil) -var _ cloudprovider.LoadBalancer = (*FakeCloud)(nil) -var _ cloudprovider.Routes = (*FakeCloud)(nil) -var _ cloudprovider.Zones = (*FakeCloud)(nil) -var _ cloudprovider.PVLabeler = (*FakeCloud)(nil) -var _ cloudprovider.Clusters = (*FakeCloud)(nil) +var _ cloudprovider.Interface = (*Cloud)(nil) +var _ cloudprovider.Instances = (*Cloud)(nil) +var _ cloudprovider.LoadBalancer = (*Cloud)(nil) +var _ cloudprovider.Routes = (*Cloud)(nil) +var _ cloudprovider.Zones = (*Cloud)(nil) +var _ cloudprovider.PVLabeler = (*Cloud)(nil) +var _ cloudprovider.Clusters = (*Cloud)(nil) -// FakeCloud is a test-double implementation of Interface, LoadBalancer, Instances, and Routes. It is useful for testing. -type FakeCloud struct { +// Cloud is a test-double implementation of Interface, LoadBalancer, Instances, and Routes. It is useful for testing. +type Cloud struct { Exists bool Err error @@ -73,9 +74,9 @@ type FakeCloud struct { ClusterList []string MasterName string ExternalIP net.IP - Balancers map[string]FakeBalancer - UpdateCalls []FakeUpdateBalancerCall - RouteMap map[string]*FakeRoute + Balancers map[string]Balancer + UpdateCalls []UpdateBalancerCall + RouteMap map[string]*Route Lock sync.Mutex Provider string addCallLock sync.Mutex @@ -85,12 +86,13 @@ type FakeCloud struct { RequestDelay time.Duration } -type FakeRoute struct { +// Route is a representation of an advanced routing rule. +type Route struct { ClusterName string Route cloudprovider.Route } -func (f *FakeCloud) addCall(desc string) { +func (f *Cloud) addCall(desc string) { f.addCallLock.Lock() defer f.addCallLock.Unlock() @@ -99,29 +101,32 @@ func (f *FakeCloud) addCall(desc string) { f.Calls = append(f.Calls, desc) } -// ClearCalls clears internal record of method calls to this FakeCloud. -func (f *FakeCloud) ClearCalls() { +// ClearCalls clears internal record of method calls to this Cloud. +func (f *Cloud) ClearCalls() { f.Calls = []string{} } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (f *FakeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +func (f *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { } -func (f *FakeCloud) ListClusters(ctx context.Context) ([]string, error) { +// ListClusters lists the names of the available clusters. +func (f *Cloud) ListClusters(ctx context.Context) ([]string, error) { return f.ClusterList, f.Err } -func (f *FakeCloud) Master(ctx context.Context, name string) (string, error) { +// Master gets back the address (either DNS name or IP address) of the master node for the cluster. +func (f *Cloud) Master(ctx context.Context, name string) (string, error) { return f.MasterName, f.Err } -func (f *FakeCloud) Clusters() (cloudprovider.Clusters, bool) { +// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise. +func (f *Cloud) Clusters() (cloudprovider.Clusters, bool) { return f, true } // ProviderName returns the cloud provider ID. -func (f *FakeCloud) ProviderName() string { +func (f *Cloud) ProviderName() string { if f.Provider == "" { return defaultProviderName } @@ -129,33 +134,35 @@ func (f *FakeCloud) ProviderName() string { } // HasClusterID returns true if the cluster has a clusterID -func (f *FakeCloud) HasClusterID() bool { +func (f *Cloud) HasClusterID() bool { return true } // LoadBalancer returns a fake implementation of LoadBalancer. // Actually it just returns f itself. -func (f *FakeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { +func (f *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { return f, true } // Instances returns a fake implementation of Instances. // // Actually it just returns f itself. -func (f *FakeCloud) Instances() (cloudprovider.Instances, bool) { +func (f *Cloud) Instances() (cloudprovider.Instances, bool) { return f, true } -func (f *FakeCloud) Zones() (cloudprovider.Zones, bool) { +// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise. +func (f *Cloud) Zones() (cloudprovider.Zones, bool) { return f, true } -func (f *FakeCloud) Routes() (cloudprovider.Routes, bool) { +// Routes returns a routes interface along with whether the interface is supported. +func (f *Cloud) Routes() (cloudprovider.Routes, bool) { return f, true } // GetLoadBalancer is a stub implementation of LoadBalancer.GetLoadBalancer. -func (f *FakeCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { +func (f *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}} @@ -163,17 +170,17 @@ func (f *FakeCloud) GetLoadBalancer(ctx context.Context, clusterName string, ser } // GetLoadBalancerName is a stub implementation of LoadBalancer.GetLoadBalancerName. -func (f *FakeCloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string { +func (f *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string { // TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names. return cloudprovider.DefaultLoadBalancerName(service) } // EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer. // It adds an entry "create" into the internal method call record. -func (f *FakeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (f *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { f.addCall("create") if f.Balancers == nil { - f.Balancers = make(map[string]FakeBalancer) + f.Balancers = make(map[string]Balancer) } name := f.GetLoadBalancerName(ctx, clusterName, service) @@ -185,7 +192,7 @@ func (f *FakeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, } region := zone.Region - f.Balancers[name] = FakeBalancer{name, region, spec.LoadBalancerIP, spec.Ports, nodes} + f.Balancers[name] = Balancer{name, region, spec.LoadBalancerIP, spec.Ports, nodes} status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}} @@ -195,38 +202,42 @@ func (f *FakeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, // UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer. // It adds an entry "update" into the internal method call record. -func (f *FakeCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { +func (f *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { f.addCall("update") - f.UpdateCalls = append(f.UpdateCalls, FakeUpdateBalancerCall{service, nodes}) + f.UpdateCalls = append(f.UpdateCalls, UpdateBalancerCall{service, nodes}) return f.Err } // EnsureLoadBalancerDeleted is a test-spy implementation of LoadBalancer.EnsureLoadBalancerDeleted. // It adds an entry "delete" into the internal method call record. -func (f *FakeCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { +func (f *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { f.addCall("delete") return f.Err } -func (f *FakeCloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { +// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances +// expected format for the key is standard ssh-keygen format: +func (f *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { return cloudprovider.NotImplemented } -// Implementation of Instances.CurrentNodeName -func (f *FakeCloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { +// CurrentNodeName returns the name of the node we are currently running on +// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname +func (f *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } // NodeAddresses is a test-spy implementation of Instances.NodeAddresses. // It adds an entry "node-addresses" into the internal method call record. -func (f *FakeCloud) NodeAddresses(ctx context.Context, instance types.NodeName) ([]v1.NodeAddress, error) { +func (f *Cloud) NodeAddresses(ctx context.Context, instance types.NodeName) ([]v1.NodeAddress, error) { f.addCall("node-addresses") f.addressesMux.Lock() defer f.addressesMux.Unlock() return f.Addresses, f.Err } -func (f *FakeCloud) SetNodeAddresses(nodeAddresses []v1.NodeAddress) { +// SetNodeAddresses sets the addresses for a node +func (f *Cloud) SetNodeAddresses(nodeAddresses []v1.NodeAddress) { f.addressesMux.Lock() defer f.addressesMux.Unlock() f.Addresses = nodeAddresses @@ -234,7 +245,7 @@ func (f *FakeCloud) SetNodeAddresses(nodeAddresses []v1.NodeAddress) { // NodeAddressesByProviderID is a test-spy implementation of Instances.NodeAddressesByProviderID. // It adds an entry "node-addresses-by-provider-id" into the internal method call record. -func (f *FakeCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { +func (f *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { f.addCall("node-addresses-by-provider-id") f.addressesMux.Lock() defer f.addressesMux.Unlock() @@ -242,39 +253,39 @@ func (f *FakeCloud) NodeAddressesByProviderID(ctx context.Context, providerID st } // InstanceID returns the cloud provider ID of the node with the specified Name. -func (f *FakeCloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { +func (f *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { f.addCall("instance-id") return f.ExtID[nodeName], nil } // InstanceType returns the type of the specified instance. -func (f *FakeCloud) InstanceType(ctx context.Context, instance types.NodeName) (string, error) { +func (f *Cloud) InstanceType(ctx context.Context, instance types.NodeName) (string, error) { f.addCall("instance-type") return f.InstanceTypes[instance], nil } // InstanceTypeByProviderID returns the type of the specified instance. -func (f *FakeCloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { +func (f *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { f.addCall("instance-type-by-provider-id") return f.InstanceTypes[types.NodeName(providerID)], nil } // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. -func (f *FakeCloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { +func (f *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { f.addCall("instance-exists-by-provider-id") return f.ExistsByProviderID, f.ErrByProviderID } // InstanceShutdownByProviderID returns true if the instances is in safe state to detach volumes -func (f *FakeCloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { +func (f *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { f.addCall("instance-shutdown-by-provider-id") return f.NodeShutdown, f.ErrShutdownByProviderID } // List is a test-spy implementation of Instances.List. // It adds an entry "list" into the internal method call record. -func (f *FakeCloud) List(filter string) ([]types.NodeName, error) { +func (f *Cloud) List(filter string) ([]types.NodeName, error) { f.addCall("list") result := []types.NodeName{} for _, machine := range f.Machines { @@ -285,7 +296,11 @@ func (f *FakeCloud) List(filter string) ([]types.NodeName, error) { return result, f.Err } -func (f *FakeCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { +// GetZone returns the Zone containing the current failure zone and locality region that the program is running in +// In most cases, this method is called from the kubelet querying a local metadata service to acquire its zone. +// For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone +// can no longer be called from the kubelets. +func (f *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { f.addCall("get-zone") return f.Zone, f.Err } @@ -293,7 +308,7 @@ func (f *FakeCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { // GetZoneByProviderID implements Zones.GetZoneByProviderID // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. -func (f *FakeCloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { +func (f *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { f.addCall("get-zone-by-provider-id") return f.Zone, f.Err } @@ -301,12 +316,13 @@ func (f *FakeCloud) GetZoneByProviderID(ctx context.Context, providerID string) // GetZoneByNodeName implements Zones.GetZoneByNodeName // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. -func (f *FakeCloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { +func (f *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { f.addCall("get-zone-by-node-name") return f.Zone, f.Err } -func (f *FakeCloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { +// ListRoutes lists all managed routes that belong to the specified clusterName +func (f *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { f.Lock.Lock() defer f.Lock.Unlock() f.addCall("list-routes") @@ -320,7 +336,10 @@ func (f *FakeCloud) ListRoutes(ctx context.Context, clusterName string) ([]*clou return routes, f.Err } -func (f *FakeCloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { +// CreateRoute creates the described managed route +// route.Name will be ignored, although the cloud-provider may use nameHint +// to create a more user-meaningful name. +func (f *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { f.Lock.Lock() defer f.Lock.Unlock() f.addCall("create-route") @@ -329,7 +348,7 @@ func (f *FakeCloud) CreateRoute(ctx context.Context, clusterName string, nameHin f.Err = fmt.Errorf("route %q already exists", name) return f.Err } - fakeRoute := FakeRoute{} + fakeRoute := Route{} fakeRoute.Route = *route fakeRoute.Route.Name = name fakeRoute.ClusterName = clusterName @@ -337,7 +356,9 @@ func (f *FakeCloud) CreateRoute(ctx context.Context, clusterName string, nameHin return nil } -func (f *FakeCloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { +// DeleteRoute deletes the specified managed route +// Route should be as returned by ListRoutes +func (f *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { f.Lock.Lock() defer f.Lock.Unlock() f.addCall("delete-route") @@ -350,8 +371,9 @@ func (f *FakeCloud) DeleteRoute(ctx context.Context, clusterName string, route * return nil } -func (c *FakeCloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { - if val, ok := c.VolumeLabelMap[pv.Name]; ok { +// GetLabelsForVolume returns the labels for a PersistentVolume +func (f *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { + if val, ok := f.VolumeLabelMap[pv.Name]; ok { return val, nil } return nil, fmt.Errorf("label not found for volume") diff --git a/test/integration/serving/BUILD b/test/integration/serving/BUILD index 94797906206..005a708aaa6 100644 --- a/test/integration/serving/BUILD +++ b/test/integration/serving/BUILD @@ -21,10 +21,10 @@ go_test( "//cmd/kube-apiserver/app/testing:go_default_library", "//cmd/kube-controller-manager/app/testing:go_default_library", "//cmd/kube-scheduler/app/testing:go_default_library", - "//pkg/cloudprovider/providers/fake:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//test/integration/framework:go_default_library", ], ) diff --git a/test/integration/serving/serving_test.go b/test/integration/serving/serving_test.go index 1e5ce9fb045..b3b85a672bd 100644 --- a/test/integration/serving/serving_test.go +++ b/test/integration/serving/serving_test.go @@ -31,11 +31,11 @@ import ( "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" "k8s.io/cloud-provider" + "k8s.io/cloud-provider/fake" cloudctrlmgrtesting "k8s.io/kubernetes/cmd/cloud-controller-manager/app/testing" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" kubectrlmgrtesting "k8s.io/kubernetes/cmd/kube-controller-manager/app/testing" kubeschedulertesting "k8s.io/kubernetes/cmd/kube-scheduler/app/testing" - "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/test/integration/framework" ) @@ -321,5 +321,5 @@ func intPtr(x int) *int { } func fakeCloudProviderFactory(io.Reader) (cloudprovider.Interface, error) { - return &fake.FakeCloud{}, nil + return &fake.Cloud{}, nil } diff --git a/test/integration/volume/BUILD b/test/integration/volume/BUILD index 4325654c414..8ddcaafe9d9 100644 --- a/test/integration/volume/BUILD +++ b/test/integration/volume/BUILD @@ -16,7 +16,6 @@ go_test( tags = ["integration"], deps = [ "//pkg/api/legacyscheme:go_default_library", - "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller/volume/attachdetach:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/persistentvolume:go_default_library", @@ -38,6 +37,7 @@ go_test( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", + "//staging/src/k8s.io/cloud-provider/fake:go_default_library", "//test/integration/framework:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index 6f1bb9c47cd..1af6d6493ce 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -32,7 +32,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/controller/volume/attachdetach" volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" @@ -422,7 +422,7 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy Detachers: nil, } plugins := []volume.VolumePlugin{plugin} - cloud := &fakecloud.FakeCloud{} + cloud := &fakecloud.Cloud{} informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod) ctrl, err := attachdetach.NewAttachDetachController( testClient, diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index ba1103634b5..7efec76ed7b 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -34,8 +34,8 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" ref "k8s.io/client-go/tools/reference" + fakecloud "k8s.io/cloud-provider/fake" "k8s.io/kubernetes/pkg/api/legacyscheme" - fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -1123,7 +1123,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio Detachers: nil, } plugins := []volume.VolumePlugin{plugin} - cloud := &fakecloud.FakeCloud{} + cloud := &fakecloud.Cloud{} informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod)) ctrl, err := persistentvolumecontroller.NewController( persistentvolumecontroller.ControllerParameters{ diff --git a/vendor/modules.txt b/vendor/modules.txt index 9ed0853bec4..d319b04a75b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1530,6 +1530,7 @@ k8s.io/client-go/util/testing k8s.io/client-go/util/workqueue # k8s.io/cloud-provider v0.0.0 => ./staging/src/k8s.io/cloud-provider k8s.io/cloud-provider +k8s.io/cloud-provider/fake k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume From 41ccf4c77ed4c9dac9488cfa5efb0e232d5b3081 Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Tue, 14 May 2019 16:41:50 -0700 Subject: [PATCH 173/194] Don't create a RuntimeClassManager without a KubeClient --- pkg/kubelet/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 2983cdd0e32..12d812cb38b 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -656,7 +656,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } klet.runtimeService = runtimeService - if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) { + if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && kubeDeps.KubeClient != nil { klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient) } From d60bccc6a4820018b34b20a57491c8d7f54a8ecc Mon Sep 17 00:00:00 2001 From: draveness Date: Tue, 7 May 2019 09:03:00 +0800 Subject: [PATCH 174/194] feat: implement "queue-sort" extension point for scheduling framework --- pkg/scheduler/core/extender_test.go | 2 +- pkg/scheduler/core/generic_scheduler_test.go | 6 +- pkg/scheduler/factory/factory.go | 2 +- pkg/scheduler/factory/factory_test.go | 2 +- pkg/scheduler/framework/v1alpha1/framework.go | 15 ++ pkg/scheduler/framework/v1alpha1/interface.go | 22 ++ pkg/scheduler/internal/queue/BUILD | 3 + .../internal/queue/scheduling_queue.go | 128 +++++----- .../internal/queue/scheduling_queue_test.go | 238 +++++++++++------- pkg/scheduler/scheduler_test.go | 4 +- 10 files changed, 260 insertions(+), 162 deletions(-) diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 3ab1aadf16b..6c0e83aa704 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -536,7 +536,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { for _, name := range test.nodes { cache.AddNode(createNode(name)) } - queue := internalqueue.NewSchedulingQueue(nil) + queue := internalqueue.NewSchedulingQueue(nil, nil) scheduler := NewGenericScheduler( cache, queue, diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 1877f2b52df..c92b1600093 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -452,7 +452,7 @@ func TestGenericScheduler(t *testing.T) { scheduler := NewGenericScheduler( cache, - internalqueue.NewSchedulingQueue(nil), + internalqueue.NewSchedulingQueue(nil, nil), test.predicates, algorithmpredicates.EmptyPredicateMetadataProducer, test.prioritizers, @@ -488,7 +488,7 @@ func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes s := NewGenericScheduler( cache, - internalqueue.NewSchedulingQueue(nil), + internalqueue.NewSchedulingQueue(nil, nil), predicates, algorithmpredicates.EmptyPredicateMetadataProducer, prioritizers, @@ -1491,7 +1491,7 @@ func TestPreempt(t *testing.T) { } scheduler := NewGenericScheduler( cache, - internalqueue.NewSchedulingQueue(nil), + internalqueue.NewSchedulingQueue(nil, nil), map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithmpredicates.EmptyPredicateMetadataProducer, []priorities.PriorityConfig{{Function: numericPriority, Weight: 1}}, diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index dc8066a899c..c647032298e 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -262,7 +262,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) Configurator { c := &configFactory{ client: args.Client, podLister: schedulerCache, - podQueue: internalqueue.NewSchedulingQueue(stopEverything), + podQueue: internalqueue.NewSchedulingQueue(stopEverything, framework), nodeLister: args.NodeInformer.Lister(), pVLister: args.PvInformer.Lister(), pVCLister: args.PvcInformer.Lister(), diff --git a/pkg/scheduler/factory/factory_test.go b/pkg/scheduler/factory/factory_test.go index 631598090e8..44198aa2afc 100644 --- a/pkg/scheduler/factory/factory_test.go +++ b/pkg/scheduler/factory/factory_test.go @@ -256,7 +256,7 @@ func TestDefaultErrorFunc(t *testing.T) { defer close(stopCh) timestamp := time.Now() - queue := internalqueue.NewPriorityQueueWithClock(nil, clock.NewFakeClock(timestamp)) + queue := internalqueue.NewPriorityQueueWithClock(nil, clock.NewFakeClock(timestamp), nil) schedulerCache := internalcache.New(30*time.Second, stopCh) errFunc := MakeDefaultErrorFunc(client, queue, schedulerCache, stopCh) diff --git a/pkg/scheduler/framework/v1alpha1/framework.go b/pkg/scheduler/framework/v1alpha1/framework.go index 752c98cd631..b1593a46261 100644 --- a/pkg/scheduler/framework/v1alpha1/framework.go +++ b/pkg/scheduler/framework/v1alpha1/framework.go @@ -34,6 +34,7 @@ type framework struct { nodeInfoSnapshot *cache.NodeInfoSnapshot waitingPods *waitingPodsMap plugins map[string]Plugin // a map of initialized plugins. Plugin name:plugin instance. + queueSortPlugins []QueueSortPlugin reservePlugins []ReservePlugin prebindPlugins []PrebindPlugin unreservePlugins []UnreservePlugin @@ -69,6 +70,10 @@ func NewFramework(r Registry, _ *runtime.Unknown) (Framework, error) { // TODO: For now, we assume any plugins that implements an extension // point wants to be called at that extension point. We should change this // later and add these plugins based on the configuration. + if qsp, ok := p.(QueueSortPlugin); ok { + f.queueSortPlugins = append(f.queueSortPlugins, qsp) + } + if rp, ok := p.(ReservePlugin); ok { f.reservePlugins = append(f.reservePlugins, rp) } @@ -85,6 +90,16 @@ func NewFramework(r Registry, _ *runtime.Unknown) (Framework, error) { return f, nil } +// QueueSortFunc returns the function to sort pods in scheduling queue +func (f *framework) QueueSortFunc() LessFunc { + if len(f.queueSortPlugins) == 0 { + return nil + } + + // Only one QueueSort plugin can be enabled. + return f.queueSortPlugins[0].Less +} + // RunPrebindPlugins runs the set of configured prebind plugins. It returns a // failure (bool) if any of the plugins returns an error. It also returns an // error containing the rejection message or the error occurred in the plugin. diff --git a/pkg/scheduler/framework/v1alpha1/interface.go b/pkg/scheduler/framework/v1alpha1/interface.go index e8ce959b512..09866b9d723 100644 --- a/pkg/scheduler/framework/v1alpha1/interface.go +++ b/pkg/scheduler/framework/v1alpha1/interface.go @@ -107,6 +107,25 @@ type Plugin interface { Name() string } +// PodInfo is minimum cell in the scheduling queue. +type PodInfo struct { + Pod *v1.Pod + // The time pod added to the scheduling queue. + Timestamp time.Time +} + +// LessFunc is the function to sort pod info +type LessFunc func(podInfo1, podInfo2 *PodInfo) bool + +// QueueSortPlugin is an interface that must be implemented by "QueueSort" plugins. +// These plugins are used to sort pods in the scheduling queue. Only one queue sort +// plugin may be enabled at a time. +type QueueSortPlugin interface { + Plugin + // Less are used to sort pods in the scheduling queue. + Less(*PodInfo, *PodInfo) bool +} + // ReservePlugin is an interface for Reserve plugins. These plugins are called // at the reservation point. These are meant to update the state of the plugin. // This concept used to be called 'assume' in the original scheduler. @@ -157,6 +176,9 @@ type PermitPlugin interface { // Configured plugins are called at specified points in a scheduling context. type Framework interface { FrameworkHandle + // QueueSortFunc returns the function to sort pods in scheduling queue + QueueSortFunc() LessFunc + // RunPrebindPlugins runs the set of configured prebind plugins. It returns // *Status and its code is set to non-success if any of the plugins returns // anything but Success. If the Status code is "Unschedulable", it is diff --git a/pkg/scheduler/internal/queue/BUILD b/pkg/scheduler/internal/queue/BUILD index a0729869868..c9c52749139 100644 --- a/pkg/scheduler/internal/queue/BUILD +++ b/pkg/scheduler/internal/queue/BUILD @@ -11,6 +11,7 @@ go_library( deps = [ "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -31,6 +32,8 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/api/v1/pod:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 07310e84143..e3382ba1c65 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -38,6 +38,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -87,8 +88,8 @@ type SchedulingQueue interface { } // NewSchedulingQueue initializes a priority queue as a new scheduling queue. -func NewSchedulingQueue(stop <-chan struct{}) SchedulingQueue { - return NewPriorityQueue(stop) +func NewSchedulingQueue(stop <-chan struct{}, fwk framework.Framework) SchedulingQueue { + return NewPriorityQueue(stop, fwk) } // NominatedNodeName returns nominated node name of a Pod. @@ -140,43 +141,48 @@ type PriorityQueue struct { // Making sure that PriorityQueue implements SchedulingQueue. var _ = SchedulingQueue(&PriorityQueue{}) -// podInfo is minimum cell in the scheduling queue. -type podInfo struct { - pod *v1.Pod - // The time pod added to the scheduling queue. - timestamp time.Time -} - -// newPodInfoNoTimestamp builds a podInfo object without timestamp. -func newPodInfoNoTimestamp(pod *v1.Pod) *podInfo { - return &podInfo{ - pod: pod, +// newPodInfoNoTimestamp builds a PodInfo object without timestamp. +func newPodInfoNoTimestamp(pod *v1.Pod) *framework.PodInfo { + return &framework.PodInfo{ + Pod: pod, } } // activeQComp is the function used by the activeQ heap algorithm to sort pods. // It sorts pods based on their priority. When priorities are equal, it uses -// podInfo.timestamp. +// PodInfo.timestamp. func activeQComp(podInfo1, podInfo2 interface{}) bool { - pInfo1 := podInfo1.(*podInfo) - pInfo2 := podInfo2.(*podInfo) - prio1 := util.GetPodPriority(pInfo1.pod) - prio2 := util.GetPodPriority(pInfo2.pod) - return (prio1 > prio2) || (prio1 == prio2 && pInfo1.timestamp.Before(pInfo2.timestamp)) + pInfo1 := podInfo1.(*framework.PodInfo) + pInfo2 := podInfo2.(*framework.PodInfo) + prio1 := util.GetPodPriority(pInfo1.Pod) + prio2 := util.GetPodPriority(pInfo2.Pod) + return (prio1 > prio2) || (prio1 == prio2 && pInfo1.Timestamp.Before(pInfo2.Timestamp)) } // NewPriorityQueue creates a PriorityQueue object. -func NewPriorityQueue(stop <-chan struct{}) *PriorityQueue { - return NewPriorityQueueWithClock(stop, util.RealClock{}) +func NewPriorityQueue(stop <-chan struct{}, fwk framework.Framework) *PriorityQueue { + return NewPriorityQueueWithClock(stop, util.RealClock{}, fwk) } // NewPriorityQueueWithClock creates a PriorityQueue which uses the passed clock for time. -func NewPriorityQueueWithClock(stop <-chan struct{}, clock util.Clock) *PriorityQueue { +func NewPriorityQueueWithClock(stop <-chan struct{}, clock util.Clock, fwk framework.Framework) *PriorityQueue { + comp := activeQComp + if fwk != nil { + if queueSortFunc := fwk.QueueSortFunc(); queueSortFunc != nil { + comp = func(podInfo1, podInfo2 interface{}) bool { + pInfo1 := podInfo1.(*framework.PodInfo) + pInfo2 := podInfo2.(*framework.PodInfo) + + return queueSortFunc(pInfo1, pInfo2) + } + } + } + pq := &PriorityQueue{ clock: clock, stop: stop, podBackoff: NewPodBackoffMap(1*time.Second, 10*time.Second), - activeQ: util.NewHeapWithRecorder(podInfoKeyFunc, activeQComp, metrics.NewActivePodsRecorder()), + activeQ: util.NewHeapWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()), unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()), nominatedPods: newNominatedPodMap(), moveRequestCycle: -1, @@ -334,7 +340,7 @@ func (p *PriorityQueue) flushBackoffQCompleted() { if rawPodInfo == nil { return } - pod := rawPodInfo.(*podInfo).pod + pod := rawPodInfo.(*framework.PodInfo).Pod boTime, found := p.podBackoff.GetBackoffTime(nsNameForPod(pod)) if !found { klog.Errorf("Unable to find backoff value for pod %v in backoffQ", nsNameForPod(pod)) @@ -363,10 +369,10 @@ func (p *PriorityQueue) flushUnschedulableQLeftover() { p.lock.Lock() defer p.lock.Unlock() - var podsToMove []*podInfo + var podsToMove []*framework.PodInfo currentTime := p.clock.Now() for _, pInfo := range p.unschedulableQ.podInfoMap { - lastScheduleTime := pInfo.timestamp + lastScheduleTime := pInfo.Timestamp if currentTime.Sub(lastScheduleTime) > unschedulableQTimeInterval { podsToMove = append(podsToMove, pInfo) } @@ -396,9 +402,9 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { if err != nil { return nil, err } - pInfo := obj.(*podInfo) + pInfo := obj.(*framework.PodInfo) p.schedulingCycle++ - return pInfo.pod, err + return pInfo.Pod, err } // isPodUpdated checks if the pod is updated in a way that it may have become @@ -428,7 +434,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { if oldPodInfo, exists, _ := p.activeQ.Get(oldPodInfo); exists { p.nominatedPods.update(oldPod, newPod) newPodInfo := newPodInfoNoTimestamp(newPod) - newPodInfo.timestamp = oldPodInfo.(*podInfo).timestamp + newPodInfo.Timestamp = oldPodInfo.(*framework.PodInfo).Timestamp err := p.activeQ.Update(newPodInfo) return err } @@ -438,7 +444,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { p.nominatedPods.update(oldPod, newPod) p.podBackoffQ.Delete(newPodInfoNoTimestamp(oldPod)) newPodInfo := newPodInfoNoTimestamp(newPod) - newPodInfo.timestamp = oldPodInfo.(*podInfo).timestamp + newPodInfo.Timestamp = oldPodInfo.(*framework.PodInfo).Timestamp err := p.activeQ.Add(newPodInfo) if err == nil { p.cond.Broadcast() @@ -451,11 +457,11 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { if usPodInfo := p.unschedulableQ.get(newPod); usPodInfo != nil { p.nominatedPods.update(oldPod, newPod) newPodInfo := newPodInfoNoTimestamp(newPod) - newPodInfo.timestamp = usPodInfo.timestamp + newPodInfo.Timestamp = usPodInfo.Timestamp if isPodUpdated(oldPod, newPod) { // If the pod is updated reset backoff p.clearPodBackoff(newPod) - p.unschedulableQ.delete(usPodInfo.pod) + p.unschedulableQ.delete(usPodInfo.Pod) err := p.activeQ.Add(newPodInfo) if err == nil { p.cond.Broadcast() @@ -514,7 +520,7 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { p.lock.Lock() defer p.lock.Unlock() for _, pInfo := range p.unschedulableQ.podInfoMap { - pod := pInfo.pod + pod := pInfo.Pod if p.isPodBackingOff(pod) { if err := p.podBackoffQ.Add(pInfo); err != nil { klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) @@ -531,9 +537,9 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { } // NOTE: this function assumes lock has been acquired in caller -func (p *PriorityQueue) movePodsToActiveQueue(podInfoList []*podInfo) { +func (p *PriorityQueue) movePodsToActiveQueue(podInfoList []*framework.PodInfo) { for _, pInfo := range podInfoList { - pod := pInfo.pod + pod := pInfo.Pod if p.isPodBackingOff(pod) { if err := p.podBackoffQ.Add(pInfo); err != nil { klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err) @@ -552,10 +558,10 @@ func (p *PriorityQueue) movePodsToActiveQueue(podInfoList []*podInfo) { // getUnschedulablePodsWithMatchingAffinityTerm returns unschedulable pods which have // any affinity term that matches "pod". // NOTE: this function assumes lock has been acquired in caller. -func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod) []*podInfo { - var podsToMove []*podInfo +func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod) []*framework.PodInfo { + var podsToMove []*framework.PodInfo for _, pInfo := range p.unschedulableQ.podInfoMap { - up := pInfo.pod + up := pInfo.Pod affinity := up.Spec.Affinity if affinity != nil && affinity.PodAffinity != nil { terms := predicates.GetPodAffinityTerms(affinity.PodAffinity) @@ -591,13 +597,13 @@ func (p *PriorityQueue) PendingPods() []*v1.Pod { defer p.lock.RUnlock() result := []*v1.Pod{} for _, pInfo := range p.activeQ.List() { - result = append(result, pInfo.(*podInfo).pod) + result = append(result, pInfo.(*framework.PodInfo).Pod) } for _, pInfo := range p.podBackoffQ.List() { - result = append(result, pInfo.(*podInfo).pod) + result = append(result, pInfo.(*framework.PodInfo).Pod) } for _, pInfo := range p.unschedulableQ.podInfoMap { - result = append(result, pInfo.pod) + result = append(result, pInfo.Pod) } return result } @@ -628,10 +634,10 @@ func (p *PriorityQueue) UpdateNominatedPodForNode(pod *v1.Pod, nodeName string) } func (p *PriorityQueue) podsCompareBackoffCompleted(podInfo1, podInfo2 interface{}) bool { - pInfo1 := podInfo1.(*podInfo) - pInfo2 := podInfo2.(*podInfo) - bo1, _ := p.podBackoff.GetBackoffTime(nsNameForPod(pInfo1.pod)) - bo2, _ := p.podBackoff.GetBackoffTime(nsNameForPod(pInfo2.pod)) + pInfo1 := podInfo1.(*framework.PodInfo) + pInfo2 := podInfo2.(*framework.PodInfo) + bo1, _ := p.podBackoff.GetBackoffTime(nsNameForPod(pInfo1.Pod)) + bo2, _ := p.podBackoff.GetBackoffTime(nsNameForPod(pInfo2.Pod)) return bo1.Before(bo2) } @@ -642,25 +648,25 @@ func (p *PriorityQueue) NumUnschedulablePods() int { return len(p.unschedulableQ.podInfoMap) } -// newPodInfo builds a podInfo object. -func (p *PriorityQueue) newPodInfo(pod *v1.Pod) *podInfo { +// newPodInfo builds a PodInfo object. +func (p *PriorityQueue) newPodInfo(pod *v1.Pod) *framework.PodInfo { if p.clock == nil { - return &podInfo{ - pod: pod, + return &framework.PodInfo{ + Pod: pod, } } - return &podInfo{ - pod: pod, - timestamp: p.clock.Now(), + return &framework.PodInfo{ + Pod: pod, + Timestamp: p.clock.Now(), } } // UnschedulablePodsMap holds pods that cannot be scheduled. This data structure // is used to implement unschedulableQ. type UnschedulablePodsMap struct { - // podInfoMap is a map key by a pod's full-name and the value is a pointer to the podInfo. - podInfoMap map[string]*podInfo + // podInfoMap is a map key by a pod's full-name and the value is a pointer to the PodInfo. + podInfoMap map[string]*framework.PodInfo keyFunc func(*v1.Pod) string // metricRecorder updates the counter when elements of an unschedulablePodsMap // get added or removed, and it does nothing if it's nil @@ -668,8 +674,8 @@ type UnschedulablePodsMap struct { } // Add adds a pod to the unschedulable podInfoMap. -func (u *UnschedulablePodsMap) addOrUpdate(pInfo *podInfo) { - podID := u.keyFunc(pInfo.pod) +func (u *UnschedulablePodsMap) addOrUpdate(pInfo *framework.PodInfo) { + podID := u.keyFunc(pInfo.Pod) if _, exists := u.podInfoMap[podID]; !exists && u.metricRecorder != nil { u.metricRecorder.Inc() } @@ -685,9 +691,9 @@ func (u *UnschedulablePodsMap) delete(pod *v1.Pod) { delete(u.podInfoMap, podID) } -// Get returns the podInfo if a pod with the same key as the key of the given "pod" +// Get returns the PodInfo if a pod with the same key as the key of the given "pod" // is found in the map. It returns nil otherwise. -func (u *UnschedulablePodsMap) get(pod *v1.Pod) *podInfo { +func (u *UnschedulablePodsMap) get(pod *v1.Pod) *framework.PodInfo { podKey := u.keyFunc(pod) if pInfo, exists := u.podInfoMap[podKey]; exists { return pInfo @@ -697,7 +703,7 @@ func (u *UnschedulablePodsMap) get(pod *v1.Pod) *podInfo { // Clear removes all the entries from the unschedulable podInfoMap. func (u *UnschedulablePodsMap) clear() { - u.podInfoMap = make(map[string]*podInfo) + u.podInfoMap = make(map[string]*framework.PodInfo) if u.metricRecorder != nil { u.metricRecorder.Clear() } @@ -706,7 +712,7 @@ func (u *UnschedulablePodsMap) clear() { // newUnschedulablePodsMap initializes a new object of UnschedulablePodsMap. func newUnschedulablePodsMap(metricRecorder metrics.MetricRecorder) *UnschedulablePodsMap { return &UnschedulablePodsMap{ - podInfoMap: make(map[string]*podInfo), + podInfoMap: make(map[string]*framework.PodInfo), keyFunc: util.GetPodFullName, metricRecorder: metricRecorder, } @@ -801,5 +807,5 @@ func MakeNextPodFunc(queue SchedulingQueue) func() *v1.Pod { } func podInfoKeyFunc(obj interface{}) (string, error) { - return cache.MetaNamespaceKeyFunc(obj.(*podInfo).pod) + return cache.MetaNamespaceKeyFunc(obj.(*framework.PodInfo).Pod) } diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 4407ca65e89..0252014e838 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -29,6 +29,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -109,13 +111,13 @@ func getUnschedulablePod(p *PriorityQueue, pod *v1.Pod) *v1.Pod { defer p.lock.Unlock() pInfo := p.unschedulableQ.get(pod) if pInfo != nil { - return pInfo.pod + return pInfo.Pod } return nil } func TestPriorityQueue_Add(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) if err := q.Add(&medPriorityPod); err != nil { t.Errorf("add failed: %v", err) } @@ -151,8 +153,58 @@ func TestPriorityQueue_Add(t *testing.T) { } } +type fakeFramework struct{} + +func (*fakeFramework) QueueSortFunc() framework.LessFunc { + return func(podInfo1, podInfo2 *framework.PodInfo) bool { + prio1 := util.GetPodPriority(podInfo1.Pod) + prio2 := util.GetPodPriority(podInfo2.Pod) + return prio1 < prio2 + } +} + +func (*fakeFramework) NodeInfoSnapshot() *internalcache.NodeInfoSnapshot { + return nil +} + +func (*fakeFramework) RunPrebindPlugins(pc *framework.PluginContext, pod *v1.Pod, nodeName string) *framework.Status { + return nil +} + +func (*fakeFramework) RunReservePlugins(pc *framework.PluginContext, pod *v1.Pod, nodeName string) *framework.Status { + return nil +} + +func (*fakeFramework) RunUnreservePlugins(pc *framework.PluginContext, pod *v1.Pod, nodeName string) {} + +func (*fakeFramework) RunPermitPlugins(pc *framework.PluginContext, pod *v1.Pod, nodeName string) *framework.Status { + return nil +} + +func (*fakeFramework) IterateOverWaitingPods(callback func(framework.WaitingPod)) {} + +func (*fakeFramework) GetWaitingPod(uid types.UID) framework.WaitingPod { + return nil +} + +func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) { + q := NewPriorityQueue(nil, &fakeFramework{}) + if err := q.Add(&medPriorityPod); err != nil { + t.Errorf("add failed: %v", err) + } + if err := q.Add(&highPriorityPod); err != nil { + t.Errorf("add failed: %v", err) + } + if p, err := q.Pop(); err != nil || p != &medPriorityPod { + t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name) + } + if p, err := q.Pop(); err != nil || p != &highPriorityPod { + t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) + } +} + func TestPriorityQueue_AddIfNotPresent(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) addOrUpdateUnschedulablePod(q, &highPriNominatedPod) q.AddIfNotPresent(&highPriNominatedPod) // Must not add anything. q.AddIfNotPresent(&medPriorityPod) @@ -184,7 +236,7 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) { } func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Add(&highPriNominatedPod) q.AddUnschedulableIfNotPresent(&highPriNominatedPod, q.SchedulingCycle()) // Must not add anything. q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle()) @@ -216,7 +268,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { // current scheduling cycle will be put back to activeQueue if we were trying // to schedule them when we received move request. func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) totalNum := 10 expectedPods := make([]v1.Pod, 0, totalNum) for i := 0; i < totalNum; i++ { @@ -279,7 +331,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) { } func TestPriorityQueue_Pop(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) wg := sync.WaitGroup{} wg.Add(1) go func() { @@ -296,7 +348,7 @@ func TestPriorityQueue_Pop(t *testing.T) { } func TestPriorityQueue_Update(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Update(nil, &highPriorityPod) if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&highPriorityPod)); !exists { t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name) @@ -332,7 +384,7 @@ func TestPriorityQueue_Update(t *testing.T) { } func TestPriorityQueue_Delete(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Update(&highPriorityPod, &highPriNominatedPod) q.Add(&unschedulablePod) if err := q.Delete(&highPriNominatedPod); err != nil { @@ -356,7 +408,7 @@ func TestPriorityQueue_Delete(t *testing.T) { } func TestPriorityQueue_MoveAllToActiveQueue(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Add(&medPriorityPod) addOrUpdateUnschedulablePod(q, &unschedulablePod) addOrUpdateUnschedulablePod(q, &highPriorityPod) @@ -402,7 +454,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { Spec: v1.PodSpec{NodeName: "machine1"}, } - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Add(&medPriorityPod) // Add a couple of pods to the unschedulableQ. addOrUpdateUnschedulablePod(q, &unschedulablePod) @@ -423,7 +475,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { } func TestPriorityQueue_NominatedPodsForNode(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Add(&medPriorityPod) q.Add(&unschedulablePod) q.Add(&highPriorityPod) @@ -448,7 +500,7 @@ func TestPriorityQueue_PendingPods(t *testing.T) { return pendingSet } - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) q.Add(&medPriorityPod) addOrUpdateUnschedulablePod(q, &unschedulablePod) addOrUpdateUnschedulablePod(q, &highPriorityPod) @@ -464,7 +516,7 @@ func TestPriorityQueue_PendingPods(t *testing.T) { } func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) if err := q.Add(&medPriorityPod); err != nil { t.Errorf("add failed: %v", err) } @@ -585,64 +637,64 @@ func TestUnschedulablePodsMap(t *testing.T) { tests := []struct { name string podsToAdd []*v1.Pod - expectedMapAfterAdd map[string]*podInfo + expectedMapAfterAdd map[string]*framework.PodInfo podsToUpdate []*v1.Pod - expectedMapAfterUpdate map[string]*podInfo + expectedMapAfterUpdate map[string]*framework.PodInfo podsToDelete []*v1.Pod - expectedMapAfterDelete map[string]*podInfo + expectedMapAfterDelete map[string]*framework.PodInfo }{ { name: "create, update, delete subset of pods", podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]}, - expectedMapAfterAdd: map[string]*podInfo{ - util.GetPodFullName(pods[0]): {pod: pods[0]}, - util.GetPodFullName(pods[1]): {pod: pods[1]}, - util.GetPodFullName(pods[2]): {pod: pods[2]}, - util.GetPodFullName(pods[3]): {pod: pods[3]}, + expectedMapAfterAdd: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[0]): {Pod: pods[0]}, + util.GetPodFullName(pods[1]): {Pod: pods[1]}, + util.GetPodFullName(pods[2]): {Pod: pods[2]}, + util.GetPodFullName(pods[3]): {Pod: pods[3]}, }, podsToUpdate: []*v1.Pod{updatedPods[0]}, - expectedMapAfterUpdate: map[string]*podInfo{ - util.GetPodFullName(pods[0]): {pod: updatedPods[0]}, - util.GetPodFullName(pods[1]): {pod: pods[1]}, - util.GetPodFullName(pods[2]): {pod: pods[2]}, - util.GetPodFullName(pods[3]): {pod: pods[3]}, + expectedMapAfterUpdate: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[0]): {Pod: updatedPods[0]}, + util.GetPodFullName(pods[1]): {Pod: pods[1]}, + util.GetPodFullName(pods[2]): {Pod: pods[2]}, + util.GetPodFullName(pods[3]): {Pod: pods[3]}, }, podsToDelete: []*v1.Pod{pods[0], pods[1]}, - expectedMapAfterDelete: map[string]*podInfo{ - util.GetPodFullName(pods[2]): {pod: pods[2]}, - util.GetPodFullName(pods[3]): {pod: pods[3]}, + expectedMapAfterDelete: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[2]): {Pod: pods[2]}, + util.GetPodFullName(pods[3]): {Pod: pods[3]}, }, }, { name: "create, update, delete all", podsToAdd: []*v1.Pod{pods[0], pods[3]}, - expectedMapAfterAdd: map[string]*podInfo{ - util.GetPodFullName(pods[0]): {pod: pods[0]}, - util.GetPodFullName(pods[3]): {pod: pods[3]}, + expectedMapAfterAdd: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[0]): {Pod: pods[0]}, + util.GetPodFullName(pods[3]): {Pod: pods[3]}, }, podsToUpdate: []*v1.Pod{updatedPods[3]}, - expectedMapAfterUpdate: map[string]*podInfo{ - util.GetPodFullName(pods[0]): {pod: pods[0]}, - util.GetPodFullName(pods[3]): {pod: updatedPods[3]}, + expectedMapAfterUpdate: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[0]): {Pod: pods[0]}, + util.GetPodFullName(pods[3]): {Pod: updatedPods[3]}, }, podsToDelete: []*v1.Pod{pods[0], pods[3]}, - expectedMapAfterDelete: map[string]*podInfo{}, + expectedMapAfterDelete: map[string]*framework.PodInfo{}, }, { name: "delete non-existing and existing pods", podsToAdd: []*v1.Pod{pods[1], pods[2]}, - expectedMapAfterAdd: map[string]*podInfo{ - util.GetPodFullName(pods[1]): {pod: pods[1]}, - util.GetPodFullName(pods[2]): {pod: pods[2]}, + expectedMapAfterAdd: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[1]): {Pod: pods[1]}, + util.GetPodFullName(pods[2]): {Pod: pods[2]}, }, podsToUpdate: []*v1.Pod{updatedPods[1]}, - expectedMapAfterUpdate: map[string]*podInfo{ - util.GetPodFullName(pods[1]): {pod: updatedPods[1]}, - util.GetPodFullName(pods[2]): {pod: pods[2]}, + expectedMapAfterUpdate: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[1]): {Pod: updatedPods[1]}, + util.GetPodFullName(pods[2]): {Pod: pods[2]}, }, podsToDelete: []*v1.Pod{pods[2], pods[3]}, - expectedMapAfterDelete: map[string]*podInfo{ - util.GetPodFullName(pods[1]): {pod: updatedPods[1]}, + expectedMapAfterDelete: map[string]*framework.PodInfo{ + util.GetPodFullName(pods[1]): {Pod: updatedPods[1]}, }, }, } @@ -690,7 +742,7 @@ func TestSchedulingQueue_Close(t *testing.T) { }{ { name: "PriorityQueue close", - q: NewPriorityQueue(nil), + q: NewPriorityQueue(nil, nil), expectedErr: fmt.Errorf(queueClosed), }, } @@ -719,7 +771,7 @@ func TestSchedulingQueue_Close(t *testing.T) { // ensures that an unschedulable pod does not block head of the queue when there // are frequent events that move pods to the active queue. func TestRecentlyTriedPodsGoBack(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) // Add a few pods to priority queue. for i := 0; i < 5; i++ { p := v1.Pod{ @@ -773,7 +825,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) { // This behavior ensures that an unschedulable pod does not block head of the queue when there // are frequent events that move pods to the active queue. func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) // Add an unschedulable pod to a priority queue. // This makes a situation that the pod was tried to schedule @@ -864,7 +916,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { // TestHighPriorityBackoff tests that a high priority pod does not block // other pods if it is unschedulable func TestHighProirotyBackoff(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) midPod := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -927,7 +979,7 @@ func TestHighProirotyBackoff(t *testing.T) { // TestHighProirotyFlushUnschedulableQLeftover tests that pods will be moved to // activeQ after one minutes if it is in unschedulableQ func TestHighProirotyFlushUnschedulableQLeftover(t *testing.T) { - q := NewPriorityQueue(nil) + q := NewPriorityQueue(nil, nil) midPod := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test-midpod", @@ -973,8 +1025,8 @@ func TestHighProirotyFlushUnschedulableQLeftover(t *testing.T) { addOrUpdateUnschedulablePod(q, &highPod) addOrUpdateUnschedulablePod(q, &midPod) - q.unschedulableQ.podInfoMap[util.GetPodFullName(&highPod)].timestamp = time.Now().Add(-1 * unschedulableQTimeInterval) - q.unschedulableQ.podInfoMap[util.GetPodFullName(&midPod)].timestamp = time.Now().Add(-1 * unschedulableQTimeInterval) + q.unschedulableQ.podInfoMap[util.GetPodFullName(&highPod)].Timestamp = time.Now().Add(-1 * unschedulableQTimeInterval) + q.unschedulableQ.podInfoMap[util.GetPodFullName(&midPod)].Timestamp = time.Now().Add(-1 * unschedulableQTimeInterval) if p, err := q.Pop(); err != nil || p != &highPod { t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) @@ -984,23 +1036,23 @@ func TestHighProirotyFlushUnschedulableQLeftover(t *testing.T) { } } -type operation func(queue *PriorityQueue, pInfo *podInfo) +type operation func(queue *PriorityQueue, pInfo *framework.PodInfo) var ( - addPodActiveQ = func(queue *PriorityQueue, pInfo *podInfo) { + addPodActiveQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) { queue.lock.Lock() queue.activeQ.Add(pInfo) queue.lock.Unlock() } - updatePodActiveQ = func(queue *PriorityQueue, pInfo *podInfo) { + updatePodActiveQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) { queue.lock.Lock() queue.activeQ.Update(pInfo) queue.lock.Unlock() } - addPodUnschedulableQ = func(queue *PriorityQueue, pInfo *podInfo) { + addPodUnschedulableQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) { queue.lock.Lock() // Update pod condition to unschedulable. - podutil.UpdatePodCondition(&pInfo.pod.Status, &v1.PodCondition{ + podutil.UpdatePodCondition(&pInfo.Pod.Status, &v1.PodCondition{ Type: v1.PodScheduled, Status: v1.ConditionFalse, Reason: v1.PodReasonUnschedulable, @@ -1009,24 +1061,24 @@ var ( queue.unschedulableQ.addOrUpdate(pInfo) queue.lock.Unlock() } - addPodBackoffQ = func(queue *PriorityQueue, pInfo *podInfo) { + addPodBackoffQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) { queue.lock.Lock() queue.podBackoffQ.Add(pInfo) queue.lock.Unlock() } - moveAllToActiveQ = func(queue *PriorityQueue, _ *podInfo) { + moveAllToActiveQ = func(queue *PriorityQueue, _ *framework.PodInfo) { queue.MoveAllToActiveQueue() } - backoffPod = func(queue *PriorityQueue, pInfo *podInfo) { - queue.backoffPod(pInfo.pod) + backoffPod = func(queue *PriorityQueue, pInfo *framework.PodInfo) { + queue.backoffPod(pInfo.Pod) } - flushBackoffQ = func(queue *PriorityQueue, _ *podInfo) { + flushBackoffQ = func(queue *PriorityQueue, _ *framework.PodInfo) { queue.clock.(*clock.FakeClock).Step(2 * time.Second) queue.flushBackoffQCompleted() } ) -// TestPodTimestamp tests the operations related to podInfo. +// TestPodTimestamp tests the operations related to PodInfo. func TestPodTimestamp(t *testing.T) { pod1 := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -1051,20 +1103,20 @@ func TestPodTimestamp(t *testing.T) { } var timestamp = time.Now() - pInfo1 := &podInfo{ - pod: pod1, - timestamp: timestamp, + pInfo1 := &framework.PodInfo{ + Pod: pod1, + Timestamp: timestamp, } - pInfo2 := &podInfo{ - pod: pod2, - timestamp: timestamp.Add(time.Second), + pInfo2 := &framework.PodInfo{ + Pod: pod2, + Timestamp: timestamp.Add(time.Second), } tests := []struct { name string operations []operation - operands []*podInfo - expected []*podInfo + operands []*framework.PodInfo + expected []*framework.PodInfo }{ { name: "add two pod to activeQ and sort them by the timestamp", @@ -1072,8 +1124,8 @@ func TestPodTimestamp(t *testing.T) { addPodActiveQ, addPodActiveQ, }, - operands: []*podInfo{pInfo2, pInfo1}, - expected: []*podInfo{pInfo1, pInfo2}, + operands: []*framework.PodInfo{pInfo2, pInfo1}, + expected: []*framework.PodInfo{pInfo1, pInfo2}, }, { name: "update two pod to activeQ and sort them by the timestamp", @@ -1081,8 +1133,8 @@ func TestPodTimestamp(t *testing.T) { updatePodActiveQ, updatePodActiveQ, }, - operands: []*podInfo{pInfo2, pInfo1}, - expected: []*podInfo{pInfo1, pInfo2}, + operands: []*framework.PodInfo{pInfo2, pInfo1}, + expected: []*framework.PodInfo{pInfo1, pInfo2}, }, { name: "add two pod to unschedulableQ then move them to activeQ and sort them by the timestamp", @@ -1091,8 +1143,8 @@ func TestPodTimestamp(t *testing.T) { addPodUnschedulableQ, moveAllToActiveQ, }, - operands: []*podInfo{pInfo2, pInfo1, nil}, - expected: []*podInfo{pInfo1, pInfo2}, + operands: []*framework.PodInfo{pInfo2, pInfo1, nil}, + expected: []*framework.PodInfo{pInfo1, pInfo2}, }, { name: "add one pod to BackoffQ and move it to activeQ", @@ -1103,15 +1155,15 @@ func TestPodTimestamp(t *testing.T) { flushBackoffQ, moveAllToActiveQ, }, - operands: []*podInfo{pInfo2, pInfo1, pInfo1, nil, nil}, - expected: []*podInfo{pInfo1, pInfo2}, + operands: []*framework.PodInfo{pInfo2, pInfo1, pInfo1, nil, nil}, + expected: []*framework.PodInfo{pInfo1, pInfo2}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - queue := NewPriorityQueueWithClock(nil, clock.NewFakeClock(timestamp)) - var podInfoList []*podInfo + queue := NewPriorityQueueWithClock(nil, clock.NewFakeClock(timestamp), nil) + var podInfoList []*framework.PodInfo for i, op := range test.operations { op(queue, test.operands[i]) @@ -1121,12 +1173,12 @@ func TestPodTimestamp(t *testing.T) { if pInfo, err := queue.activeQ.Pop(); err != nil { t.Errorf("Error while popping the head of the queue: %v", err) } else { - podInfoList = append(podInfoList, pInfo.(*podInfo)) + podInfoList = append(podInfoList, pInfo.(*framework.PodInfo)) } } if !reflect.DeepEqual(test.expected, podInfoList) { - t.Errorf("Unexpected podInfo list. Expected: %v, got: %v", + t.Errorf("Unexpected PodInfo list. Expected: %v, got: %v", test.expected, podInfoList) } }) @@ -1137,24 +1189,24 @@ func TestPodTimestamp(t *testing.T) { func TestPendingPodsMetric(t *testing.T) { total := 50 timestamp := time.Now() - var pInfos = make([]*podInfo, 0, total) + var pInfos = make([]*framework.PodInfo, 0, total) for i := 1; i <= total; i++ { - p := &podInfo{ - pod: &v1.Pod{ + p := &framework.PodInfo{ + Pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-pod-%d", i), Namespace: fmt.Sprintf("ns%d", i), UID: types.UID(fmt.Sprintf("tp-%d", i)), }, }, - timestamp: timestamp, + Timestamp: timestamp, } pInfos = append(pInfos, p) } tests := []struct { name string operations []operation - operands [][]*podInfo + operands [][]*framework.PodInfo expected []int64 }{ { @@ -1163,7 +1215,7 @@ func TestPendingPodsMetric(t *testing.T) { addPodActiveQ, addPodUnschedulableQ, }, - operands: [][]*podInfo{ + operands: [][]*framework.PodInfo{ pInfos[:30], pInfos[30:], }, @@ -1177,7 +1229,7 @@ func TestPendingPodsMetric(t *testing.T) { addPodBackoffQ, addPodUnschedulableQ, }, - operands: [][]*podInfo{ + operands: [][]*framework.PodInfo{ pInfos[:15], pInfos[15:40], pInfos[15:40], @@ -1191,7 +1243,7 @@ func TestPendingPodsMetric(t *testing.T) { addPodUnschedulableQ, moveAllToActiveQ, }, - operands: [][]*podInfo{ + operands: [][]*framework.PodInfo{ pInfos[:total], {nil}, }, @@ -1204,7 +1256,7 @@ func TestPendingPodsMetric(t *testing.T) { addPodUnschedulableQ, moveAllToActiveQ, }, - operands: [][]*podInfo{ + operands: [][]*framework.PodInfo{ pInfos[:20], pInfos[:total], {nil}, @@ -1220,7 +1272,7 @@ func TestPendingPodsMetric(t *testing.T) { moveAllToActiveQ, flushBackoffQ, }, - operands: [][]*podInfo{ + operands: [][]*framework.PodInfo{ pInfos[:20], pInfos[:40], pInfos[40:], @@ -1240,7 +1292,7 @@ func TestPendingPodsMetric(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { resetMetrics() - queue := NewPriorityQueueWithClock(nil, clock.NewFakeClock(timestamp)) + queue := NewPriorityQueueWithClock(nil, clock.NewFakeClock(timestamp), nil) for i, op := range test.operations { for _, pInfo := range test.operands[i] { op(queue, pInfo) diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index f5ee0ed57e6..6436b5c9284 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -641,7 +641,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C framework, _ := framework.NewFramework(EmptyPluginRegistry, nil) algo := core.NewGenericScheduler( scache, - internalqueue.NewSchedulingQueue(nil), + internalqueue.NewSchedulingQueue(nil, nil), predicateMap, predicates.EmptyPredicateMetadataProducer, []priorities.PriorityConfig{}, @@ -694,7 +694,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc framework, _ := framework.NewFramework(EmptyPluginRegistry, nil) algo := core.NewGenericScheduler( scache, - internalqueue.NewSchedulingQueue(nil), + internalqueue.NewSchedulingQueue(nil, nil), predicateMap, predicates.EmptyPredicateMetadataProducer, []priorities.PriorityConfig{}, From 531a50c776f149be1c9dd58272bd9ed1aee5aba8 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 May 2019 18:21:48 -0700 Subject: [PATCH 175/194] simplify pluginwatcher closing --- .../util/pluginwatcher/plugin_watcher.go | 32 ++++++------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go index 9dad92db845..4e12716c57b 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -39,10 +39,10 @@ import ( type Watcher struct { path string deprecatedPath string - stopCh chan interface{} + stopCh chan struct{} + stopped chan struct{} fs utilfs.Filesystem fsWatcher *fsnotify.Watcher - wg sync.WaitGroup mutex sync.Mutex handlers map[string]PluginHandler @@ -88,7 +88,8 @@ func (w *Watcher) getHandler(pluginType string) (PluginHandler, bool) { // Start watches for the creation of plugin sockets at the path func (w *Watcher) Start() error { klog.V(2).Infof("Plugin Watcher Start at %s", w.path) - w.stopCh = make(chan interface{}) + w.stopCh = make(chan struct{}) + w.stopped = make(chan struct{}) // Creating the directory to be watched if it doesn't exist yet, // and walks through the directory to discover the existing plugins. @@ -104,22 +105,20 @@ func (w *Watcher) Start() error { // Traverse plugin dir and add filesystem watchers before starting the plugin processing goroutine. if err := w.traversePluginDir(w.path); err != nil { - w.Stop() + w.fsWatcher.Close() return fmt.Errorf("failed to traverse plugin socket path %q, err: %v", w.path, err) } // Traverse deprecated plugin dir, if specified. if len(w.deprecatedPath) != 0 { if err := w.traversePluginDir(w.deprecatedPath); err != nil { - w.Stop() + w.fsWatcher.Close() return fmt.Errorf("failed to traverse deprecated plugin socket path %q, err: %v", w.deprecatedPath, err) } } - w.wg.Add(1) - go func(fsWatcher *fsnotify.Watcher) { - defer w.wg.Done() - + go func() { + defer close(w.stopped) for { select { case event := <-fsWatcher.Events: @@ -135,17 +134,15 @@ func (w *Watcher) Start() error { klog.Errorf("error %v when handling delete event: %s", err, event) } } - continue case err := <-fsWatcher.Errors: if err != nil { klog.Errorf("fsWatcher received error: %v", err) } - continue case <-w.stopCh: return } } - }(fsWatcher) + }() return nil } @@ -154,18 +151,9 @@ func (w *Watcher) Start() error { func (w *Watcher) Stop() error { close(w.stopCh) - c := make(chan struct{}) - var once sync.Once - closeFunc := func() { close(c) } - go func() { - defer once.Do(closeFunc) - w.wg.Wait() - }() - select { - case <-c: + case <-w.stopped: case <-time.After(11 * time.Second): - once.Do(closeFunc) return fmt.Errorf("timeout on stopping watcher") } From b9fffd1571c627d3bc5763a45e48a1ee075c3781 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Thu, 9 May 2019 18:54:37 +0000 Subject: [PATCH 176/194] Use framework.ExpectNoError() for e2e/common The e2e test framework has ExpectNoError() for readable test code. This replaces Expect(err).NotTo(HaveOccurred()) with it. --- test/e2e/common/pods.go | 29 +++++++++++++++-------------- test/e2e/common/runtime.go | 6 +++--- test/e2e/common/sysctl.go | 18 +++++++++--------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index c2e7af9f452..e8d5c02224a 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -67,7 +67,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { t := time.Now() for { p, err := podClient.Get(pod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name) + framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) if p.Status.HostIP != "" { e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break @@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + framework.ExpectNoError(err, "failed to query for pods") gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), @@ -256,7 +256,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + framework.ExpectNoError(err, "failed to query for pods") gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) ginkgo.By("verifying pod creation was observed") @@ -279,11 +279,11 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") + framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod") + framework.ExpectNoError(err, "failed to delete pod") ginkgo.By("verifying the kubelet observed the termination notice") gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { @@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + framework.ExpectNoError(err, "failed to query for pods") gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) }) @@ -373,7 +373,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + framework.ExpectNoError(err, "failed to query for pods") gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) ginkgo.By("updating the pod") @@ -388,7 +388,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + framework.ExpectNoError(err, "failed to query for pods") gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) e2elog.Logf("Pod update OK") }) @@ -427,7 +427,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + framework.ExpectNoError(err, "failed to query for pods") gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) ginkgo.By("updating the pod") @@ -491,7 +491,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, } _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service") + framework.ExpectNoError(err, "failed to create service") // Make a client pod that verifies that it has the service environment variables. podName := "client-envvars-" + string(uuid.NewUUID()) @@ -538,7 +538,7 @@ var _ = framework.KubeDescribe("Pods", func() { */ framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() { config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") + framework.ExpectNoError(err, "unable to get base config") ginkgo.By("creating the pod") name := "pod-exec-websocket-" + string(uuid.NewUUID()) @@ -620,7 +620,7 @@ var _ = framework.KubeDescribe("Pods", func() { */ framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() { config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") + framework.ExpectNoError(err, "unable to get base config") ginkgo.By("creating the pod") name := "pod-logs-websocket-" + string(uuid.NewUUID()) @@ -798,14 +798,15 @@ var _ = framework.KubeDescribe("Pods", func() { } validatePodReadiness := func(expectReady bool) { - gomega.Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) { + err := wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) { podReady := podClient.PodIsReady(podName) res := expectReady == podReady if !res { e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady) } return res, nil - })).NotTo(gomega.HaveOccurred()) + }) + framework.ExpectNoError(err) } ginkgo.By("submitting the pod to kubernetes") diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index ac92014d641..d51da57ee75 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -114,7 +114,7 @@ while true; do sleep 1; done Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready)) status, err := terminateContainer.GetStatus() - Expect(err).ShouldNot(HaveOccurred()) + framework.ExpectNoError(err) By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name)) Expect(GetContainerState(status.State)).To(Equal(testCase.State)) @@ -148,7 +148,7 @@ while true; do sleep 1; done By("get the container status") status, err := c.GetStatus() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("the container should be terminated") Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated)) @@ -286,7 +286,7 @@ while true; do sleep 1; done secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) By("create image pull secret") _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil) container.ImagePullSecrets = []string{secret.Name} } diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index 2d8ef72a6c7..88f6c79bc72 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { // failed pods without running containers. This would create a race as the pod // might have already been deleted here. ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) if ev != nil && ev.Reason == sysctl.UnsupportedReason { framework.Skipf("No sysctl support in Docker <1.12") } @@ -85,16 +85,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { By("Waiting for pod completion") err = f.WaitForPodNoLongerRunning(pod.Name) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("Checking that the pod succeeded") Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) By("Getting logs from the pod") log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("Checking that the sysctl is actually updated") Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1")) @@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { // failed pods without running containers. This would create a race as the pod // might have already been deleted here. ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) if ev != nil && ev.Reason == sysctl.UnsupportedReason { framework.Skipf("No sysctl support in Docker <1.12") } @@ -128,16 +128,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { By("Waiting for pod completion") err = f.WaitForPodNoLongerRunning(pod.Name) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("Checking that the pod succeeded") Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) By("Getting logs from the pod") log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("Checking that the sysctl is actually updated") Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1")) @@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { // failed pods without running containers. This would create a race as the pod // might have already been deleted here. ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) if ev != nil && ev.Reason == sysctl.UnsupportedReason { framework.Skipf("No sysctl support in Docker <1.12") } From 95a6376899af66bd6c456a1376e4e82cf075934f Mon Sep 17 00:00:00 2001 From: draveness Date: Wed, 15 May 2019 10:08:05 +0800 Subject: [PATCH 177/194] feat: use framework.ExpectNoError in e2e apps disruption --- test/e2e/apps/disruption.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 61245742e76..d6b11ce374a 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -166,7 +166,7 @@ var _ = SIGDescribe("DisruptionController", func() { // Locate a running pod. pod, err := locateRunningPod(cs, ns) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) e := &policy.Eviction{ ObjectMeta: metav1.ObjectMeta{ @@ -205,7 +205,7 @@ var _ = SIGDescribe("DisruptionController", func() { ginkgo.By("First trying to evict a pod which shouldn't be evictable") pod, err := locateRunningPod(cs, ns) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb e := &policy.Eviction{ @@ -223,7 +223,7 @@ var _ = SIGDescribe("DisruptionController", func() { ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable") waitForPodsOrDie(cs, ns, 3) err = cs.CoreV1().Pods(ns).Evict(e) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // the eviction is now allowed + framework.ExpectNoError(err) // the eviction is now allowed }) }) From 4fcfb9717967911c1379fee72bf9834244f9c026 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Wed, 15 May 2019 12:14:44 +0800 Subject: [PATCH 178/194] improve validation message for join command --- cmd/kubeadm/app/apis/kubeadm/validation/validation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index 275e29e0214..d61b4603377 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -146,7 +146,7 @@ func ValidateDiscoveryBootstrapToken(b *kubeadm.BootstrapTokenDiscovery, fldPath } if len(b.CACertHashes) == 0 && !b.UnsafeSkipCAVerification { - allErrs = append(allErrs, field.Invalid(fldPath, "", "using token-based discovery without caCertHashes can be unsafe. Set unsafeSkipCAVerification to continue")) + allErrs = append(allErrs, field.Invalid(fldPath, "", "using token-based discovery without caCertHashes can be unsafe. Set unsafeSkipCAVerification as true in your kubeadm config file or pass --discovery-token-unsafe-skip-ca-verification flag to continue")) } allErrs = append(allErrs, ValidateToken(b.Token, fldPath.Child(kubeadmcmdoptions.TokenStr))...) From a8e8d4a668799ba7a91c8f29852d107c1499b772 Mon Sep 17 00:00:00 2001 From: toyoda Date: Wed, 15 May 2019 13:57:41 +0900 Subject: [PATCH 179/194] use framework.ExpectNoError() for e2e/network/ingress.go service.go --- test/e2e/network/ingress.go | 123 ++++++++++++++++++---------------- test/e2e/network/service.go | 129 +++++++++++++++++++----------------- 2 files changed, 135 insertions(+), 117 deletions(-) diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 4f86b04a230..9e428f1a793 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -94,7 +94,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) // Platform specific cleanup @@ -110,7 +110,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.TryDeleteIngress() ginkgo.By("Cleaning up cloud resources") - gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) + err := gceController.CleanupIngressController() + framework.ExpectNoError(err) }) ginkgo.It("should conform to Ingress spec", func() { @@ -143,7 +144,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } for i, host := range hosts { err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } ginkgo.By("Remove all but one of the certs on the ingress.") @@ -153,13 +154,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.By("Test that the remaining cert is properly served.") err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0]) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) ginkgo.By("Add back one of the certs that was removed and check that all certs are served.") jig.AddHTTPS(secrets[1], hosts[1]) for i, host := range hosts[:2] { err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } }) @@ -250,7 +251,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) // Platform specific cleanup @@ -266,7 +267,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.TryDeleteIngress() ginkgo.By("Cleaning up cloud resources") - gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) + err := gceController.CleanupIngressController() + framework.ExpectNoError(err) }) ginkgo.It("should conform to Ingress spec", func() { @@ -279,7 +281,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { t.Execute() ginkgo.By(t.ExitLog) jig.WaitForIngress(true) - gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) + err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + framework.ExpectNoError(err) } }) @@ -288,15 +291,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) - gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) + err = gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + framework.ExpectNoError(err) ginkgo.By("Switch backend service to use IG") svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil { @@ -305,16 +309,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } return true, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target IG, but failed to observe") + framework.ExpectNoError(err, "Expect backend service to target IG, but failed to observe") jig.WaitForIngress(true) ginkgo.By("Switch backend service to use NEG") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil { @@ -323,7 +327,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } return true, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target NEG, but failed to observe") + framework.ExpectNoError(err, "Expect backend service to target NEG, but failed to observe") jig.WaitForIngress(true) }) @@ -332,7 +336,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) svcPorts := jig.GetServicePorts(false) - gomega.Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(gomega.HaveOccurred()) + err := gceController.WaitForNegBackendService(svcPorts) + framework.ExpectNoError(err) // ClusterIP ServicePorts have no NodePort for _, sp := range svcPorts { @@ -344,11 +349,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { name := "hostname" scaleAndValidateNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if scale.Spec.Replicas != int32(num) { scale.Spec.Replicas = int32(num) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -358,14 +363,15 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { e2elog.Logf("Expecting %d backends, got %d", num, res.Len()) return res.Len() == num, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() - gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) + err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + framework.ExpectNoError(err) // initial replicas number is 1 scaleAndValidateNEG(1) @@ -389,14 +395,15 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() - gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) + err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas)) scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) scale.Spec.Replicas = int32(replicas) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -405,21 +412,21 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } return res.Len() == replicas, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Trigger rolling update and observe service disruption") deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // trigger by changing graceful termination period to 60 seconds gracePeriod := int64(60) deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod _, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if int(deploy.Status.UpdatedReplicas) == replicas { if res.Len() == replicas { return true, nil @@ -431,7 +438,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.") return false, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) }) ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { @@ -440,15 +447,15 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { scaleAndValidateExposedNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if scale.Spec.Replicas != int32(num) { scale.Spec.Replicas = int32(num) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) var status ingress.NegStatus v, ok := svc.Annotations[ingress.NEGStatusAnnotation] @@ -481,10 +488,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } gceCloud, err := gce.GetGCECloud() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if len(networkEndpoints) != num { e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints)) return false, nil @@ -493,13 +500,14 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { return true, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) - gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) + err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + framework.ExpectNoError(err) // initial replicas number is 1 scaleAndValidateExposedNEG(1) @@ -527,46 +535,46 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Add Ingress annotation - NEGs should stay the same. ginkgo.By("Adding NEG Ingress annotation") svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Modify exposed NEG annotation, but keep ingress annotation ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Remove Ingress annotation. Expect 1 NEG ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 1) // Remove NEG annotation entirely. Expect 0 NEGs. ginkgo.By("Removing NEG annotation") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, svc := range svcList.Items { delete(svc.Annotations, ingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 0) }) @@ -588,7 +596,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19): // Kubemci should reserve a static ip if user has not specified one. @@ -611,7 +619,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } ginkgo.By("Cleaning up cloud resources") - gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) + err := gceController.CleanupIngressController() + framework.ExpectNoError(err) }) ginkgo.It("should conform to Ingress spec", func() { @@ -768,9 +777,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) testHostname := "test.ingress.com" cert, key, err := ingress.GenerateRSACerts(testHostname, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) gceCloud, err := gce.GetGCECloud() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) defer func() { // We would not be able to delete the cert until ingress controller // cleans up the target proxy that references it. @@ -786,7 +795,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat } return true, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to delete ssl certificate %q: %v", preSharedCertName, err)) + framework.ExpectNoError(err, fmt.Sprintf("ginkgo.Failed to delete ssl certificate %q: %v", preSharedCertName, err)) }() _, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{ Name: preSharedCertName, @@ -794,7 +803,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat PrivateKey: string(key), Description: "pre-shared cert for ingress testing", }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to create ssl certificate %q: %v", preSharedCertName, err)) + framework.ExpectNoError(err, fmt.Sprintf("ginkgo.Failed to create ssl certificate %q: %v", preSharedCertName, err)) ginkgo.By("Creating an ingress referencing the pre-shared certificate") // Create an ingress referencing this cert using pre-shared-cert annotation. @@ -811,7 +820,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat ginkgo.By("Test that ingress works with the pre-shared certificate") err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { @@ -837,11 +846,11 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs) } }() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to create re-encryption ingress") + framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress") ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to wait for ingress IP") + framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP") ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} @@ -857,7 +866,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ e2elog.Logf("Poll succeeded, request was served by HTTPS") return true, nil }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to verify backside re-encryption ingress") + framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress") } func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { @@ -897,10 +906,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro } gceCloud, err := gce.GetGCECloud() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) if len(networkEndpoints) != 1 { e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints)) return false, nil @@ -914,6 +923,6 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro } return true, nil }); err != nil { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 2741d05449d..187b07122a8 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -108,7 +108,7 @@ var _ = SIGDescribe("Services", func() { */ framework.ConformanceIt("should provide secure master service ", func() { _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch the service object for the service named kubernetes") + framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes") }) /* @@ -128,7 +128,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service " + serviceName + " in namespace " + ns) defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() ports := []v1.ServicePort{{ Port: 80, @@ -136,7 +136,7 @@ var _ = SIGDescribe("Services", func() { }} _, err := jig.CreateServiceWithServicePort(labels, ns, ports) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) + framework.ExpectNoError(err, "failed to create service with ServicePorts in namespace: %s", ns) framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Services", func() { defer func() { for name := range names { err := cs.CoreV1().Pods(ns).Delete(name, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) + framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -181,7 +181,7 @@ var _ = SIGDescribe("Services", func() { defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() labels := map[string]string{"foo": "bar"} @@ -203,7 +203,7 @@ var _ = SIGDescribe("Services", func() { }, } _, err := jig.CreateServiceWithServicePort(labels, ns, ports) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) + framework.ExpectNoError(err, "failed to create service with ServicePorts in namespace: %s", ns) port1 := 100 port2 := 101 framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) @@ -212,7 +212,7 @@ var _ = SIGDescribe("Services", func() { defer func() { for name := range names { err := cs.CoreV1().Pods(ns).Delete(name, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) + framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -272,7 +272,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the sourceip test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() serviceIP := tcpService.Spec.ClusterIP e2elog.Logf("sourceip-test cluster ip: %s", serviceIP) @@ -293,7 +293,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the echo server pod") err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s on node: %s", serverPodName, node1.Name) + framework.ExpectNoError(err, "failed to delete pod: %s on node: %s", serverPodName, node1.Name) }() // Waiting for service to expose endpoint. @@ -322,13 +322,13 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service1 in namespace " + ns) podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) ginkgo.By("creating service2 in namespace " + ns) podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) hosts, err := e2essh.NodeSSHHosts(cs) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") + framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -352,7 +352,7 @@ var _ = SIGDescribe("Services", func() { // Start another service and verify both are up. ginkgo.By("creating service3 in namespace " + ns) podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service3"), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) if svc2IP == svc3IP { framework.Failf("service IPs conflict: %v", svc2IP) @@ -379,20 +379,20 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) defer func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } hosts, err := e2essh.NodeSSHHosts(cs) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") + framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -432,10 +432,10 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) hosts, err := e2essh.NodeSSHHosts(cs) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") + framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -459,7 +459,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2")) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) @@ -527,7 +527,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating a second namespace") namespacePtr, err := f.CreateNamespace("services", nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace") + framework.ExpectNoError(err, "failed to create namespace") ns2 := namespacePtr.Name // LB2 in ns2 on UDP e2elog.Logf("namespace for UDP test: %s", ns2) @@ -592,7 +592,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating a static load balancer IP") staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get GCE cloud provider") + framework.ExpectNoError(err, "failed to get GCE cloud provider") err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) defer func() { @@ -603,9 +603,9 @@ var _ = SIGDescribe("Services", func() { } } }() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create region address: %s", staticIPName) + framework.ExpectNoError(err, "failed to create region address: %s", staticIPName) reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get region address: %s", staticIPName) + framework.ExpectNoError(err, "failed to get region address: %s", staticIPName) requestedIP = reservedAddr.Address e2elog.Logf("Allocated static load balancer IP: %s", requestedIP) @@ -649,7 +649,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("demoting the static IP to ephemeral") if staticIPName != "" { gceCloud, err := gce.GetGCECloud() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get GCE cloud provider") + framework.ExpectNoError(err, "failed to get GCE cloud provider") // Deleting it after it is attached "demotes" it to an // ephemeral IP, which can be auto-released. if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { @@ -880,7 +880,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the updating NodePorts test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) svcPort := int(tcpService.Spec.Ports[0].Port) @@ -927,7 +927,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the ExternalName to ClusterIP test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) ginkgo.By("changing the ExternalName service to type=ClusterIP") @@ -951,7 +951,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the ExternalName to NodePort test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) ginkgo.By("changing the ExternalName service to type=NodePort") @@ -975,7 +975,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the ClusterIP to ExternalName test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP) ginkgo.By("changing the ClusterIP service to type=ExternalName") @@ -999,7 +999,7 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the NodePort to ExternalName test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort) ginkgo.By("changing the NodePort service to type=ExternalName") @@ -1049,7 +1049,7 @@ var _ = SIGDescribe("Services", func() { }, } result, err := t.CreateService(service) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) if len(result.Spec.Ports) != 2 { framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) @@ -1079,7 +1079,7 @@ var _ = SIGDescribe("Services", func() { service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort result, err := t.CreateService(service) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) + framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns) if result.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", result) @@ -1106,11 +1106,11 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("deleting service " + serviceName1 + " to release NodePort") err = t.DeleteService(serviceName1) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName1, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName1, ns) ginkgo.By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") _, err = t.CreateService(service2) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) + framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns) }) ginkgo.It("should check NodePort out-of-range", func() { @@ -1132,7 +1132,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", service) @@ -1168,7 +1168,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("deleting original service " + serviceName) err = t.DeleteService(serviceName) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) service = t.BuildServiceSpec() @@ -1200,7 +1200,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", service) @@ -1219,7 +1219,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("deleting original service " + serviceName) err = t.DeleteService(serviceName) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) @@ -1241,7 +1241,7 @@ var _ = SIGDescribe("Services", func() { service.Spec.Type = v1.ServiceTypeNodePort service.Spec.Ports[0].NodePort = nodePort service, err = t.CreateService(service) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) }) ginkgo.It("should create endpoints for unready pods", func() { @@ -1413,9 +1413,9 @@ var _ = SIGDescribe("Services", func() { dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) + framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) dropPod, err := cs.CoreV1().Pods(namespace).Get(dropPodName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", dropPodName, namespace) + framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", dropPodName, namespace) ginkgo.By("creating a pod to be part of the service " + serviceName) // This container is an nginx container listening on port 80 @@ -1433,7 +1433,8 @@ var _ = SIGDescribe("Services", func() { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.LoadBalancerSourceRanges = nil }) - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, loadBalancerCreateTimeout) @@ -1615,7 +1616,8 @@ var _ = SIGDescribe("Services", func() { jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, framework.LoadBalancerCreateTimeoutDefault) @@ -1743,17 +1745,17 @@ var _ = SIGDescribe("Services", func() { svcDisabled := getServeHostnameService("service-disabled") svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels _, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) ginkgo.By("creating service in namespace " + ns) svcToggled := getServeHostnameService("service") podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) + framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name) hosts, err := e2essh.NodeSSHHosts(cs) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") + framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -1834,7 +1836,7 @@ var _ = SIGDescribe("Services", func() { } return true, errors.New("expected wget call to fail") }); pollErr != nil { - gomega.Expect(pollErr).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(pollErr) } }) @@ -1887,9 +1889,11 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // Make sure we didn't leak the health check node port. threshold := 2 for _, ips := range jig.GetEndpointNodes(svc) { - gomega.Expect(jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)).NotTo(gomega.HaveOccurred()) + err := jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold) + framework.ExpectNoError(err) } - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() svcTCPPort := int(svc.Spec.Ports[0].Port) @@ -1913,7 +1917,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true) defer func() { - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() tcpNodePort := int(svc.Spec.Ports[0].NodePort) @@ -1951,7 +1956,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) @@ -1990,7 +1996,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { port := strconv.Itoa(healthCheckNodePort) ipPort := net.JoinHostPort(publicIP, port) e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) - gomega.Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(gomega.HaveOccurred()) + err := jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold) + framework.ExpectNoError(err) } framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) } @@ -2006,7 +2013,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) @@ -2022,7 +2030,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { }) defer func() { err := cs.CoreV1().Pods(namespace).Delete(execPodName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", execPodName) + framework.ExpectNoError(err, "failed to delete pod: %s", execPodName) }() execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -2059,7 +2067,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + framework.ExpectNoError(err) }() // save the health check node port because it disappears when ESIPP is turned off. @@ -2161,7 +2170,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam defer func() { e2elog.Logf("Cleaning up the exec pod") err := c.CoreV1().Pods(ns).Delete(execPodName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", execPodName) + framework.ExpectNoError(err, "failed to delete pod: %s", execPodName) }() execPod, err := f.ClientSet.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -2217,13 +2226,13 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor serviceType := svc.Spec.Type svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) + framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) defer func() { framework.StopServeHostnameService(cs, ns, serviceName) }() jig := framework.NewServiceTestJig(cs, serviceName) svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch service: %s in namespace: %s", serviceName, ns) + framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) var svcIP string if serviceType == v1.ServiceTypeNodePort { nodes := framework.GetReadySchedulableNodesOrDie(cs) @@ -2239,10 +2248,10 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor defer func() { e2elog.Logf("Cleaning up the exec pod") err := cs.CoreV1().Pods(ns).Delete(execPodName, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", execPodName, ns) + framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPodName, ns) }() execPod, err := cs.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", execPodName, ns) + framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", execPodName, ns) if !isTransitionTest { gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) @@ -2276,7 +2285,7 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, ginkgo.By("creating service in namespace " + ns) svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) + framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) jig := framework.NewServiceTestJig(cs, serviceName) ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName) svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault) From 6bcb7d51308d9a96b1487acfd11ffc1e9e1c7b05 Mon Sep 17 00:00:00 2001 From: yuchengwu Date: Thu, 7 Mar 2019 21:26:21 +0800 Subject: [PATCH 180/194] Fix initContainer failed to get its own field value as env values --- pkg/api/v1/resource/helpers.go | 5 ++ pkg/api/v1/resource/helpers_test.go | 72 +++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index d30ab7ed86e..e42a1b343bf 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -177,6 +177,11 @@ func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error return &container, nil } } + for _, container := range pod.Spec.InitContainers { + if container.Name == containerName { + return &container, nil + } + } return nil, fmt.Errorf("container %s not found", containerName) } diff --git a/pkg/api/v1/resource/helpers_test.go b/pkg/api/v1/resource/helpers_test.go index a0f41253f79..d5462a415dc 100644 --- a/pkg/api/v1/resource/helpers_test.go +++ b/pkg/api/v1/resource/helpers_test.go @@ -137,6 +137,72 @@ func TestExtractResourceValue(t *testing.T) { pod: getPod("foo", "", "", "10Mi", "100Mi"), expectedValue: "104857600", }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "limits.cpu", + }, + cName: "init-foo", + pod: getPod("foo", "", "9", "", ""), + expectedValue: "9", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "init-foo", + pod: getPod("foo", "", "", "", ""), + expectedValue: "0", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "init-foo", + pod: getPod("foo", "8", "", "", ""), + expectedValue: "8", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "init-foo", + pod: getPod("foo", "100m", "", "", ""), + expectedValue: "1", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + Divisor: resource.MustParse("100m"), + }, + cName: "init-foo", + pod: getPod("foo", "1200m", "", "", ""), + expectedValue: "12", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.memory", + }, + cName: "init-foo", + pod: getPod("foo", "", "", "100Mi", ""), + expectedValue: "104857600", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.memory", + Divisor: resource.MustParse("1Mi"), + }, + cName: "init-foo", + pod: getPod("foo", "", "", "100Mi", "1Gi"), + expectedValue: "100", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "limits.memory", + }, + cName: "init-foo", + pod: getPod("foo", "", "", "10Mi", "100Mi"), + expectedValue: "104857600", + }, } as := assert.New(t) for idx, tc := range cases { @@ -175,6 +241,12 @@ func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1. Resources: resources, }, }, + InitContainers: []v1.Container{ + { + Name: "init-" + cname, + Resources: resources, + }, + }, }, } } From 9da7db76b7aa82562d4eb33b9cec1ff1155667f4 Mon Sep 17 00:00:00 2001 From: Maciej Borsz Date: Wed, 15 May 2019 08:31:19 +0200 Subject: [PATCH 181/194] Revert "[Re-Apply][Distroless] Convert the GCE manifests for master containers." --- cluster/gce/gci/apiserver_manifest_test.go | 24 ++++------ cluster/gce/gci/configure-helper.sh | 44 +++---------------- cluster/gce/manifests/kube-apiserver.manifest | 10 ++--- .../kube-controller-manager.manifest | 9 ++-- cluster/gce/manifests/kube-scheduler.manifest | 9 ++-- 5 files changed, 26 insertions(+), 70 deletions(-) diff --git a/cluster/gce/gci/apiserver_manifest_test.go b/cluster/gce/gci/apiserver_manifest_test.go index 3c7ce508428..6bbdfce9908 100644 --- a/cluster/gce/gci/apiserver_manifest_test.go +++ b/cluster/gce/gci/apiserver_manifest_test.go @@ -94,12 +94,11 @@ func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv, kubeEnv s func TestEncryptionProviderFlag(t *testing.T) { var ( - // command": [ - // "/usr/local/bin/kube-apiserver " - Index 0, - // "--flag1=val1", - Index 1, - // "--flag2=val2", - Index 2, - // ... - // "--flagN=valN", - Index N, + // command": [ + // "/bin/sh", - Index 0 + // "-c", - Index 1 + // "exec /usr/local/bin/kube-apiserver " - Index 2 + execArgsIndex = 2 encryptionConfigFlag = "--encryption-provider-config" ) @@ -133,15 +132,10 @@ func TestEncryptionProviderFlag(t *testing.T) { c.invokeTest(e, deployHelperEnv) - var flagIsInArg bool - var flag, execArgs string - for _, execArgs = range c.pod.Spec.Containers[0].Args[1:] { - if strings.Contains(execArgs, encryptionConfigFlag) { - flagIsInArg = true - flag = fmt.Sprintf("%s=%s", encryptionConfigFlag, e.EncryptionProviderConfigPath) - break - } - } + execArgs := c.pod.Spec.Containers[0].Command[execArgsIndex] + flagIsInArg := strings.Contains(execArgs, encryptionConfigFlag) + flag := fmt.Sprintf("%s=%s", encryptionConfigFlag, e.EncryptionProviderConfigPath) + switch { case tc.wantFlag && !flagIsInArg: t.Fatalf("Got %q,\n want flags to contain %q", execArgs, flag) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 12ac4efbec4..0091ded0f40 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -25,24 +25,6 @@ set -o errexit set -o nounset set -o pipefail -function convert-manifest-params { - # A helper function to convert the manifest args from a string to a list of - # flag arguments. - # Old format: - # command=["/bin/sh", "-c", "exec KUBE_EXEC_BINARY --param1=val1 --param2-val2"]. - # New format: - # command=["KUBE_EXEC_BINARY"] # No shell dependencies. - # args=["--param1=val1", "--param2-val2"] - IFS=' ' read -ra FLAGS <<< "$1" - params="" - for flag in "${FLAGS[@]}"; do - params+="\n\"$flag\"," - done - if [ ! -z $params ]; then - echo "${params::-1}" # drop trailing comma - fi -} - function setup-os-params { # Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to # /sbin/crash_reporter which is more restrictive in saving crash dumps. So for @@ -1849,10 +1831,6 @@ function start-kube-apiserver { # params is passed by reference, so no "$" setup-etcd-encryption "${src_file}" params - params+=" --log-file=${KUBE_API_SERVER_LOG_PATH:-/var/log/kube-apiserver.log}" - params+=" --logtostderr=false" - params+=" --log-file-max-size=0" - params="$(convert-manifest-params "${params}")" # Evaluate variables. local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}" sed -i -e "s@{{params}}@${params}@g" "${src_file}" @@ -2034,8 +2012,7 @@ function apply-encryption-config() { function start-kube-controller-manager { echo "Start kubernetes controller-manager" create-kubeconfig "kube-controller-manager" ${KUBE_CONTROLLER_MANAGER_TOKEN} - local LOG_PATH=/var/log/kube-controller-manager.log - prepare-log-file "${LOG_PATH}" + prepare-log-file /var/log/kube-controller-manager.log # Calculate variables and assemble the command line. local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}" params+=" --use-service-account-credentials" @@ -2063,7 +2040,7 @@ function start-kube-controller-manager { params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}" fi if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then - params+=" --allocate-node-cidrs" + params+=" --allocate-node-cidrs=true" elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" fi @@ -2094,14 +2071,9 @@ function start-kube-controller-manager { params+=" --pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE" fi if [[ -n "${RUN_CONTROLLERS:-}" ]]; then - # Trim the `RUN_CONTROLLERS` value. This field is quoted which is - # incompatible with the `convert-manifest-params` format. - params+=" --controllers=${RUN_CONTROLLERS//\'}" + params+=" --controllers=${RUN_CONTROLLERS}" fi - params+=" --log-file=${LOG_PATH}" - params+=" --logtostderr=false" - params+=" --log-file-max-size=0" - params="$(convert-manifest-params "${params}")" + local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag) local container_env="" if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then @@ -2136,8 +2108,7 @@ function start-kube-controller-manager { function start-kube-scheduler { echo "Start kubernetes scheduler" create-kubeconfig "kube-scheduler" ${KUBE_SCHEDULER_TOKEN} - local LOG_PATH=/var/log/kube-scheduler.log - prepare-log-file "${LOG_PATH}" + prepare-log-file /var/log/kube-scheduler.log # Calculate variables and set them in the manifest. params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}" @@ -2153,11 +2124,6 @@ function start-kube-scheduler { params+=" --use-legacy-policy-config" params+=" --policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config" fi - - params+=" --log-file=${LOG_PATH}" - params+=" --logtostderr=false" - params+=" --log-file-max-size=0" - params="$(convert-manifest-params "${params}")" local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag") # Remove salt comments and replace variables with values. diff --git a/cluster/gce/manifests/kube-apiserver.manifest b/cluster/gce/manifests/kube-apiserver.manifest index 2b93941b8ba..2ae818a0fcd 100644 --- a/cluster/gce/manifests/kube-apiserver.manifest +++ b/cluster/gce/manifests/kube-apiserver.manifest @@ -25,12 +25,10 @@ } }, "command": [ - "/usr/local/bin/kube-apiserver" - ], - "args": [ - "--allow-privileged={{pillar['allow_privileged']}}", - {{params}} - ], + "/bin/sh", + "-c", + "exec /usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1" + ], {{container_env}} "livenessProbe": { "httpGet": { diff --git a/cluster/gce/manifests/kube-controller-manager.manifest b/cluster/gce/manifests/kube-controller-manager.manifest index 55850431276..be19cab1ce1 100644 --- a/cluster/gce/manifests/kube-controller-manager.manifest +++ b/cluster/gce/manifests/kube-controller-manager.manifest @@ -25,11 +25,10 @@ } }, "command": [ - "/usr/local/bin/kube-controller-manager" - ], - "args": [ - {{params}} - ], + "/bin/sh", + "-c", + "exec /usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1" + ], {{container_env}} "livenessProbe": { "httpGet": { diff --git a/cluster/gce/manifests/kube-scheduler.manifest b/cluster/gce/manifests/kube-scheduler.manifest index e4acaacd029..a0648f9c189 100644 --- a/cluster/gce/manifests/kube-scheduler.manifest +++ b/cluster/gce/manifests/kube-scheduler.manifest @@ -25,11 +25,10 @@ } }, "command": [ - "/usr/local/bin/kube-scheduler" - ], - "args": [ - {{params}} - ], + "/bin/sh", + "-c", + "exec /usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1" + ], "livenessProbe": { "httpGet": { "host": "127.0.0.1", From b13d80a59a8c6eaf7f25a74c9687d98242f86dda Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 15 May 2019 14:24:43 +0800 Subject: [PATCH 182/194] Allow Kubelet to run with no Azure identity useInstanceMetadata should be enabled and Kubelet would use IMDS to get node's information. --- .../azure/auth/azure_auth.go | 7 +- .../legacy-cloud-providers/azure/azure.go | 75 +++++++++++-------- .../azure/azure_instances.go | 27 +++++-- 3 files changed, 72 insertions(+), 37 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go b/staging/src/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go index 6a651eb05c0..2e051d47b7b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go @@ -28,6 +28,11 @@ import ( "k8s.io/klog" ) +var ( + // ErrorNoAuth indicates that no credentials are provided. + ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider") +) + // AzureAuthConfig holds auth related part of cloud config type AzureAuthConfig struct { // The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13 @@ -104,7 +109,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) ( env.ServiceManagementEndpoint) } - return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) + return nil, ErrorNoAuth } // ParseAzureEnvironment returns azure environment by name diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go index 9bdc76d7259..48585179f8f 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go @@ -248,7 +248,14 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { } servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env) - if err != nil { + if err == auth.ErrorNoAuth { + if !config.UseInstanceMetadata { + // No credentials provided, useInstanceMetadata should be enabled. + return nil, fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials") + } + + klog.V(2).Infof("Azure cloud provider is starting without credentials") + } else if err != nil { return nil, err } @@ -348,6 +355,27 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { } } + az := Cloud{ + Config: *config, + Environment: *env, + nodeZones: map[string]sets.String{}, + nodeResourceGroups: map[string]string{}, + unmanagedNodes: sets.NewString(), + routeCIDRs: map[string]string{}, + resourceRequestBackoff: resourceRequestBackoff, + } + az.metadata, err = NewInstanceMetadataService(metadataURL) + if err != nil { + return nil, err + } + + // No credentials provided, InstanceMetadataService would be used for getting Azure resources. + // Note that this only applies to Kubelet, controller-manager should configure credentials for managing Azure resources. + if servicePrincipalToken == nil { + return &az, nil + } + + // Initialize Azure clients. azClientConfig := &azClientConfig{ subscriptionID: config.SubscriptionID, resourceManagerEndpoint: env.ResourceManagerEndpoint, @@ -358,36 +386,21 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { CloudProviderBackoffDuration: config.CloudProviderBackoffDuration, ShouldOmitCloudProviderBackoff: config.shouldOmitCloudProviderBackoff(), } - az := Cloud{ - Config: *config, - Environment: *env, - nodeZones: map[string]sets.String{}, - nodeResourceGroups: map[string]string{}, - unmanagedNodes: sets.NewString(), - routeCIDRs: map[string]string{}, - resourceRequestBackoff: resourceRequestBackoff, - - DisksClient: newAzDisksClient(azClientConfig), - SnapshotsClient: newSnapshotsClient(azClientConfig), - RoutesClient: newAzRoutesClient(azClientConfig), - SubnetsClient: newAzSubnetsClient(azClientConfig), - InterfacesClient: newAzInterfacesClient(azClientConfig), - RouteTablesClient: newAzRouteTablesClient(azClientConfig), - LoadBalancerClient: newAzLoadBalancersClient(azClientConfig), - SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig), - StorageAccountClient: newAzStorageAccountClient(azClientConfig), - VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig), - PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig), - VirtualMachineSizesClient: newAzVirtualMachineSizesClient(azClientConfig), - VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig), - VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig), - FileClient: &azureFileClient{env: *env}, - } - - az.metadata, err = NewInstanceMetadataService(metadataURL) - if err != nil { - return nil, err - } + az.DisksClient = newAzDisksClient(azClientConfig) + az.SnapshotsClient = newSnapshotsClient(azClientConfig) + az.RoutesClient = newAzRoutesClient(azClientConfig) + az.SubnetsClient = newAzSubnetsClient(azClientConfig) + az.InterfacesClient = newAzInterfacesClient(azClientConfig) + az.RouteTablesClient = newAzRouteTablesClient(azClientConfig) + az.LoadBalancerClient = newAzLoadBalancersClient(azClientConfig) + az.SecurityGroupsClient = newAzSecurityGroupsClient(azClientConfig) + az.StorageAccountClient = newAzStorageAccountClient(azClientConfig) + az.VirtualMachinesClient = newAzVirtualMachinesClient(azClientConfig) + az.PublicIPAddressesClient = newAzPublicIPAddressesClient(azClientConfig) + az.VirtualMachineSizesClient = newAzVirtualMachineSizesClient(azClientConfig) + az.VirtualMachineScaleSetsClient = newAzVirtualMachineScaleSetsClient(azClientConfig) + az.VirtualMachineScaleSetVMsClient = newAzVirtualMachineScaleSetVMsClient(azClientConfig) + az.FileClient = &azureFileClient{env: *env} if az.MaximumLoadBalancerRuleCount == 0 { az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go index 07bba979b74..ae6442ddb88 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -83,7 +83,12 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N // Not local instance, get addresses from Azure ARM API. if !isLocalInstance { - return addressGetter(name) + if az.vmSet != nil { + return addressGetter(name) + } + + // vmSet == nil indicates credentials are not provided. + return nil, fmt.Errorf("no credentials provided for Azure cloud provider") } if len(metadata.Network.Interface) == 0 { @@ -242,7 +247,12 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e // Not local instance, get instanceID from Azure ARM API. if !isLocalInstance { - return az.vmSet.GetInstanceIDByNodeName(nodeName) + if az.vmSet != nil { + return az.vmSet.GetInstanceIDByNodeName(nodeName) + } + + // vmSet == nil indicates credentials are not provided. + return "", fmt.Errorf("no credentials provided for Azure cloud provider") } // Get resource group name. @@ -316,10 +326,17 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, if err != nil { return "", err } - if isLocalInstance { - if metadata.Compute.VMSize != "" { - return metadata.Compute.VMSize, nil + if !isLocalInstance { + if az.vmSet != nil { + return az.vmSet.GetInstanceTypeByNodeName(string(name)) } + + // vmSet == nil indicates credentials are not provided. + return "", fmt.Errorf("no credentials provided for Azure cloud provider") + } + + if metadata.Compute.VMSize != "" { + return metadata.Compute.VMSize, nil } } From 7fa953fcb5b22ee546809c460fded74e0dec867b Mon Sep 17 00:00:00 2001 From: SataQiu Date: Wed, 15 May 2019 15:37:21 +0800 Subject: [PATCH 183/194] fix golint failures of test/e2e_node/services --- hack/.golint_failures | 1 - test/e2e_node/services/apiserver.go | 1 + test/e2e_node/services/internal_services.go | 6 +++--- test/e2e_node/services/kubelet.go | 10 +++++----- test/e2e_node/services/server.go | 7 +++---- test/e2e_node/services/services.go | 4 ++-- 6 files changed, 14 insertions(+), 15 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 0e0efe4e94e..3fad7284925 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -613,5 +613,4 @@ test/e2e_kubeadm test/e2e_node test/e2e_node/remote test/e2e_node/runner/remote -test/e2e_node/services test/utils diff --git a/test/e2e_node/services/apiserver.go b/test/e2e_node/services/apiserver.go index 6f234cb6937..a86fe3e6daf 100644 --- a/test/e2e_node/services/apiserver.go +++ b/test/e2e_node/services/apiserver.go @@ -90,6 +90,7 @@ func (a *APIServer) Stop() error { const apiserverName = "apiserver" +// Name returns the name of APIServer. func (a *APIServer) Name() string { return apiserverName } diff --git a/test/e2e_node/services/internal_services.go b/test/e2e_node/services/internal_services.go index 3890ffd2875..4782dfe1fda 100644 --- a/test/e2e_node/services/internal_services.go +++ b/test/e2e_node/services/internal_services.go @@ -60,7 +60,7 @@ func (es *e2eServices) start(t *testing.T) error { if err != nil { return err } - err = es.startApiServer(es.etcdStorage) + err = es.startAPIServer(es.etcdStorage) if err != nil { return err } @@ -116,8 +116,8 @@ func (es *e2eServices) startEtcd(t *testing.T) error { return nil } -// startApiServer starts the embedded API server or returns an error. -func (es *e2eServices) startApiServer(etcdStorage *storagebackend.Config) error { +// startAPIServer starts the embedded API server or returns an error. +func (es *e2eServices) startAPIServer(etcdStorage *storagebackend.Config) error { klog.Info("Starting API server") es.apiServer = NewAPIServer(*etcdStorage) return es.apiServer.Start() diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 996a09926f0..c22b7e2a2ea 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -91,8 +91,9 @@ func RunKubelet() { const ( // Ports of different e2e services. - kubeletPort = "10250" - kubeletReadOnlyPort = "10255" + kubeletPort = "10250" + kubeletReadOnlyPort = "10255" + // KubeletRootDirectory specifies the directory where the kubelet runtime information is stored. KubeletRootDirectory = "/var/lib/kubelet" // Health check url of kubelet kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz" @@ -258,7 +259,7 @@ func (e *E2EServices) startKubelet() (*server, error) { cmdArgs = append(cmdArgs, "--kubeconfig", kubeconfigPath, "--root-dir", KubeletRootDirectory, - "--v", LOG_VERBOSITY_LEVEL, "--logtostderr", + "--v", LogVerbosityLevel, "--logtostderr", "--allow-privileged=true", ) @@ -412,9 +413,8 @@ func createRootDirectory(path string) error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { return os.MkdirAll(path, os.FileMode(0755)) - } else { - return err } + return err } return nil } diff --git a/test/e2e_node/services/server.go b/test/e2e_node/services/server.go index 22a162f7a25..fe00593c12d 100644 --- a/test/e2e_node/services/server.go +++ b/test/e2e_node/services/server.go @@ -106,7 +106,7 @@ func (s *server) start() error { var stopRestartingCh, ackStopRestartingCh chan bool if s.restartOnExit { if len(s.healthCheckUrls) == 0 { - return fmt.Errorf("Tried to start %s which has s.restartOnExit == true, but no health check urls provided.", s) + return fmt.Errorf("tried to start %s which has s.restartOnExit == true, but no health check urls provided", s) } stopRestartingCh = make(chan bool) @@ -124,11 +124,10 @@ func (s *server) start() error { outPath := path.Join(framework.TestContext.ReportDir, s.outFilename) outfile, err := os.Create(outPath) if err != nil { - errCh <- fmt.Errorf("failed to create file %q for `%s` %v.", outPath, s, err) + errCh <- fmt.Errorf("failed to create file %q for `%s` %v", outPath, s, err) return - } else { - klog.Infof("Output file for server %q: %v", s.name, outfile.Name()) } + klog.Infof("Output file for server %q: %v", s.name, outfile.Name()) defer outfile.Close() defer outfile.Sync() diff --git a/test/e2e_node/services/services.go b/test/e2e_node/services/services.go index d344acb7d07..931741a4184 100644 --- a/test/e2e_node/services/services.go +++ b/test/e2e_node/services/services.go @@ -121,8 +121,8 @@ func RunE2EServices(t *testing.T) { const ( // services.log is the combined log of all services servicesLogFile = "services.log" - // LOG_VERBOSITY_LEVEL is consistent with the level used in a cluster e2e test. - LOG_VERBOSITY_LEVEL = "4" + // LogVerbosityLevel is consistent with the level used in a cluster e2e test. + LogVerbosityLevel = "4" ) // startInternalServices starts the internal services in a separate process. From 38b7f2125fa571bdd9fe3070b808635f88586dd1 Mon Sep 17 00:00:00 2001 From: adisky Date: Wed, 15 May 2019 13:47:48 +0530 Subject: [PATCH 184/194] fix unbound array variable --- hack/lib/golang.sh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index e2f203725a5..81d934cef81 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -163,6 +163,8 @@ kube::golang::dedup() { # to readonly. # The configured vars will only contain platforms allowed by the # KUBE_SUPPORTED* vars at the top of this file. +declare -a -g KUBE_SERVER_PLATFORMS +declare -a -g KUBE_CLIENT_PLATFORMS kube::golang::setup_platforms() { if [[ -n "${KUBE_BUILD_PLATFORMS:-}" ]]; then # KUBE_BUILD_PLATFORMS needs to be read into an array before the next @@ -202,20 +204,23 @@ kube::golang::setup_platforms() { readonly KUBE_CLIENT_PLATFORMS elif [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then - readonly KUBE_SERVER_PLATFORMS=(linux/amd64) + KUBE_SERVER_PLATFORMS=(linux/amd64) + readonly KUBE_SERVER_PLATFORMS readonly KUBE_NODE_PLATFORMS=(linux/amd64) if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then readonly KUBE_TEST_PLATFORMS=( darwin/amd64 linux/amd64 ) - readonly KUBE_CLIENT_PLATFORMS=( + KUBE_CLIENT_PLATFORMS=( darwin/amd64 linux/amd64 - ) + ) + readonly KUBE_CLIENT_PLATFORMS else readonly KUBE_TEST_PLATFORMS=(linux/amd64) - readonly KUBE_CLIENT_PLATFORMS=(linux/amd64) + KUBE_CLIENT_PLATFORMS=(linux/amd64) + readonly KUBE_CLIENT_PLATFORMS fi else KUBE_SERVER_PLATFORMS=("${KUBE_SUPPORTED_SERVER_PLATFORMS[@]}") From 6db533dd5b58f8c029db4b4a6d8266bf88c4cc1e Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 15 May 2019 10:25:50 +0200 Subject: [PATCH 185/194] refactor certs renewal --- cmd/kubeadm/app/cmd/alpha/certs.go | 217 ++++------- cmd/kubeadm/app/cmd/alpha/certs_test.go | 155 ++++---- cmd/kubeadm/app/phases/certs/certlist.go | 10 +- .../renewal/{certsapi.go => apirenewer.go} | 16 +- .../phases/certs/renewal/apirenewer_test.go | 119 ++++++ .../{filerenewal.go => filerenewer.go} | 14 +- ...ilerenewal_test.go => filerenewer_test.go} | 16 +- .../app/phases/certs/renewal/interface.go | 29 -- .../app/phases/certs/renewal/manager.go | 288 ++++++++++++++ .../app/phases/certs/renewal/manager_test.go | 270 +++++++++++++ .../app/phases/certs/renewal/readwriter.go | 173 +++++++++ .../phases/certs/renewal/readwriter_test.go | 179 +++++++++ .../app/phases/certs/renewal/renewal.go | 131 ------- .../app/phases/certs/renewal/renewal_test.go | 359 ------------------ cmd/kubeadm/app/phases/upgrade/staticpods.go | 163 +++----- .../app/phases/upgrade/staticpods_test.go | 8 +- 16 files changed, 1260 insertions(+), 887 deletions(-) rename cmd/kubeadm/app/phases/certs/renewal/{certsapi.go => apirenewer.go} (88%) create mode 100644 cmd/kubeadm/app/phases/certs/renewal/apirenewer_test.go rename cmd/kubeadm/app/phases/certs/renewal/{filerenewal.go => filerenewer.go} (64%) rename cmd/kubeadm/app/phases/certs/renewal/{filerenewal_test.go => filerenewer_test.go} (78%) delete mode 100644 cmd/kubeadm/app/phases/certs/renewal/interface.go create mode 100644 cmd/kubeadm/app/phases/certs/renewal/manager.go create mode 100644 cmd/kubeadm/app/phases/certs/renewal/manager_test.go create mode 100644 cmd/kubeadm/app/phases/certs/renewal/readwriter.go create mode 100644 cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go delete mode 100644 cmd/kubeadm/app/phases/certs/renewal/renewal.go delete mode 100644 cmd/kubeadm/app/phases/certs/renewal/renewal_test.go diff --git a/cmd/kubeadm/app/cmd/alpha/certs.go b/cmd/kubeadm/app/cmd/alpha/certs.go index fbd3b007748..937b99df3ee 100644 --- a/cmd/kubeadm/app/cmd/alpha/certs.go +++ b/cmd/kubeadm/app/cmd/alpha/certs.go @@ -19,14 +19,16 @@ package alpha import ( "fmt" + "github.com/pkg/errors" "github.com/spf13/cobra" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" @@ -36,14 +38,16 @@ import ( var ( genericCertRenewLongDesc = normalizer.LongDesc(` - Renew the %[1]s, and save them into %[2]s.cert and %[2]s.key files. + Renew the %s. - Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. -`) - genericCertRenewEmbeddedLongDesc = normalizer.LongDesc(` -Renew the certificate embedded in the kubeconfig file %s. + Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will + be based on the existing file/certificates, there is no need to resupply them. -Kubeconfig attributes and certificate extra attributes such as SANs will be based on the existing kubeconfig/certificates, there is no need to resupply them. + Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative + it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + + After renewal, in order to make changes effective, is is required to restart control-plane components and + eventually re-distribute the renewed certificate in case the file is used elsewhere. `) allLongDesc = normalizer.LongDesc(` @@ -78,17 +82,17 @@ func newCmdCertsRenewal() *cobra.Command { return cmd } -type renewConfig struct { +type renewFlags struct { cfgPath string kubeconfigPath string cfg kubeadmapiv1beta2.InitConfiguration useAPI bool - useCSR bool + csrOnly bool csrPath string } func getRenewSubCommands(kdir string) []*cobra.Command { - cfg := &renewConfig{ + flags := &renewFlags{ cfg: kubeadmapiv1beta2.InitConfiguration{ ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{ // Setting kubernetes version to a default value in order to allow a not necessary internet lookup @@ -97,45 +101,28 @@ func getRenewSubCommands(kdir string) []*cobra.Command { }, } // Default values for the cobra help text - kubeadmscheme.Scheme.Default(&cfg.cfg) + kubeadmscheme.Scheme.Default(&flags.cfg) - certTree, err := certsphase.GetDefaultCertList().AsMap().CertTree() + // Get a renewal manager for a generic Cluster configuration, that is used only for getting + // the list of certificates for building subcommands + rm, err := renewal.NewManager(&kubeadmapi.ClusterConfiguration{}, "") kubeadmutil.CheckErr(err) cmdList := []*cobra.Command{} funcList := []func(){} - for caCert, certs := range certTree { - // Don't offer to renew CAs; would cause serious consequences - for _, cert := range certs { - // get the cobra.Command skeleton for this command - cmd := generateCertRenewalCommand(cert, cfg) - // get the implementation of renewing this certificate - renewalFunc := func(cert *certsphase.KubeadmCert, caCert *certsphase.KubeadmCert) func() { - return func() { renewCert(cert, caCert, cfg) } - }(cert, caCert) - // install the implementation into the command - cmd.Run = func(*cobra.Command, []string) { renewalFunc() } - cmdList = append(cmdList, cmd) - // Collect renewal functions for `renew all` - funcList = append(funcList, renewalFunc) - } - } - - kubeconfigs := []string{ - kubeadmconstants.AdminKubeConfigFileName, - kubeadmconstants.ControllerManagerKubeConfigFileName, - kubeadmconstants.SchedulerKubeConfigFileName, - //NB. we are escluding KubeletKubeConfig from renewal because management of this certificate is delegated to kubelet - } - - for _, k := range kubeconfigs { + for _, handler := range rm.Certificates() { // get the cobra.Command skeleton for this command - cmd := generateEmbeddedCertRenewalCommand(k, cfg) + cmd := &cobra.Command{ + Use: handler.Name, + Short: fmt.Sprintf("Renew the %s", handler.LongName), + Long: fmt.Sprintf(genericCertRenewLongDesc, handler.LongName), + } + addFlags(cmd, flags) // get the implementation of renewing this certificate - renewalFunc := func(kdir, k string) func() { - return func() { renewEmbeddedCert(kdir, k, cfg) } - }(kdir, k) + renewalFunc := func(handler *renewal.CertificateRenewHandler) func() { + return func() { renewCert(flags, kdir, handler) } + }(handler) // install the implementation into the command cmd.Run = func(*cobra.Command, []string) { renewalFunc() } cmdList = append(cmdList, cmd) @@ -153,134 +140,60 @@ func getRenewSubCommands(kdir string) []*cobra.Command { } }, } - addFlags(allCmd, cfg) + addFlags(allCmd, flags) cmdList = append(cmdList, allCmd) return cmdList } -func addFlags(cmd *cobra.Command, cfg *renewConfig) { - options.AddConfigFlag(cmd.Flags(), &cfg.cfgPath) - options.AddCertificateDirFlag(cmd.Flags(), &cfg.cfg.CertificatesDir) - options.AddKubeConfigFlag(cmd.Flags(), &cfg.kubeconfigPath) - options.AddCSRFlag(cmd.Flags(), &cfg.useCSR) - options.AddCSRDirFlag(cmd.Flags(), &cfg.csrPath) - cmd.Flags().BoolVar(&cfg.useAPI, "use-api", cfg.useAPI, "Use the Kubernetes certificate API to renew certificates") +func addFlags(cmd *cobra.Command, flags *renewFlags) { + options.AddConfigFlag(cmd.Flags(), &flags.cfgPath) + options.AddCertificateDirFlag(cmd.Flags(), &flags.cfg.CertificatesDir) + options.AddKubeConfigFlag(cmd.Flags(), &flags.kubeconfigPath) + options.AddCSRFlag(cmd.Flags(), &flags.csrOnly) + options.AddCSRDirFlag(cmd.Flags(), &flags.csrPath) + cmd.Flags().BoolVar(&flags.useAPI, "use-api", flags.useAPI, "Use the Kubernetes certificate API to renew certificates") } -func renewCert(cert *certsphase.KubeadmCert, caCert *certsphase.KubeadmCert, cfg *renewConfig) { - internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfg.cfgPath, &cfg.cfg) +func renewCert(flags *renewFlags, kdir string, handler *renewal.CertificateRenewHandler) { + internalcfg, err := configutil.LoadOrDefaultInitConfiguration(flags.cfgPath, &flags.cfg) kubeadmutil.CheckErr(err) - // if the renewal operation is set to generate only CSR request - if cfg.useCSR { - // trigger CSR generation in the csrPath, or if this one is missing, in the CertificateDir - path := cfg.csrPath - if path == "" { - path = cfg.cfg.CertificatesDir + // Get a renewal manager for the given cluster configuration + rm, err := renewal.NewManager(&internalcfg.ClusterConfiguration, kdir) + kubeadmutil.CheckErr(err) + + // if the renewal operation is set to generate CSR request only + if flags.csrOnly { + // checks a path for storing CSR request is given + if flags.csrPath == "" { + kubeadmutil.CheckErr(errors.New("please provide a path where CSR request should be stored")) } - err := certsphase.CreateCSR(cert, internalcfg, path) + err := rm.CreateRenewCSR(handler.Name, flags.csrPath) kubeadmutil.CheckErr(err) return } // otherwise, the renewal operation has to actually renew a certificate - var externalCA bool - switch caCert.BaseName { - case kubeadmconstants.CACertAndKeyBaseName: - // Check if an external CA is provided by the user (when the CA Cert is present but the CA Key is not) - externalCA, _ = certsphase.UsingExternalCA(&internalcfg.ClusterConfiguration) - case kubeadmconstants.FrontProxyCACertAndKeyBaseName: - // Check if an external Front-Proxy CA is provided by the user (when the Front-Proxy CA Cert is present but the Front-Proxy CA Key is not) - externalCA, _ = certsphase.UsingExternalFrontProxyCA(&internalcfg.ClusterConfiguration) - default: - externalCA = false - } - - if !externalCA { - renewer, err := getRenewer(cfg, caCert.BaseName) - kubeadmutil.CheckErr(err) - - err = renewal.RenewExistingCert(internalcfg.CertificatesDir, cert.BaseName, renewer) - kubeadmutil.CheckErr(err) - - fmt.Printf("Certificate %s renewed\n", cert.Name) - return - } - - fmt.Printf("Detected external %s, certificate %s can't be renewed\n", cert.CAName, cert.Name) -} - -func renewEmbeddedCert(kdir, k string, cfg *renewConfig) { - internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfg.cfgPath, &cfg.cfg) - kubeadmutil.CheckErr(err) - - // if the renewal operation is set to generate only CSR request - if cfg.useCSR { - // trigger CSR generation in the csrPath, or if this one is missing, in the CertificateDir - path := cfg.csrPath - if path == "" { - path = cfg.cfg.CertificatesDir - } - err := certsphase.CreateCSR(nil, internalcfg, path) - kubeadmutil.CheckErr(err) - return - } - - // otherwise, the renewal operation has to actually renew a certificate - - // Check if an external CA is provided by the user (when the CA Cert is present but the CA Key is not) - externalCA, _ := certsphase.UsingExternalCA(&internalcfg.ClusterConfiguration) - - if !externalCA { - renewer, err := getRenewer(cfg, certsphase.KubeadmCertRootCA.BaseName) - kubeadmutil.CheckErr(err) - - err = renewal.RenewEmbeddedClientCert(kdir, k, renewer) - kubeadmutil.CheckErr(err) - - fmt.Printf("Certificate embedded in %s renewed\n", k) - return - } - - fmt.Printf("Detected external CA, certificate embedded in %s can't be renewed\n", k) -} - -func generateCertRenewalCommand(cert *certsphase.KubeadmCert, cfg *renewConfig) *cobra.Command { - cmd := &cobra.Command{ - Use: cert.Name, - Short: fmt.Sprintf("Renew the %s", cert.LongName), - Long: fmt.Sprintf(genericCertRenewLongDesc, cert.LongName, cert.BaseName), - } - addFlags(cmd, cfg) - return cmd -} - -func generateEmbeddedCertRenewalCommand(k string, cfg *renewConfig) *cobra.Command { - cmd := &cobra.Command{ - Use: k, - Short: fmt.Sprintf("Renew the certificate embedded in %s", k), - Long: fmt.Sprintf(genericCertRenewEmbeddedLongDesc, k), - } - addFlags(cmd, cfg) - return cmd -} - -func getRenewer(cfg *renewConfig, caCertBaseName string) (renewal.Interface, error) { - if cfg.useAPI { - kubeConfigPath := cmdutil.GetKubeConfigPath(cfg.kubeconfigPath) + // renew the certificate using the requested renew method + if flags.useAPI { + // renew using K8s certificate API + kubeConfigPath := cmdutil.GetKubeConfigPath(flags.kubeconfigPath) client, err := kubeconfigutil.ClientSetFromFile(kubeConfigPath) - if err != nil { - return nil, err + kubeadmutil.CheckErr(err) + + err = rm.RenewUsingCSRAPI(handler.Name, client) + kubeadmutil.CheckErr(err) + } else { + // renew using local certificate authorities. + // this operation can't complete in case the certificate key is not provided (external CA) + renewed, err := rm.RenewUsingLocalCA(handler.Name) + kubeadmutil.CheckErr(err) + if !renewed { + fmt.Printf("Detected external %s, %s can't be renewed\n", handler.CABaseName, handler.LongName) + return } - return renewal.NewCertsAPIRenawal(client), nil } - - caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.cfg.CertificatesDir, caCertBaseName) - if err != nil { - return nil, err - } - - return renewal.NewFileRenewal(caCert, caKey), nil + fmt.Printf("%s renewed\n", handler.LongName) } diff --git a/cmd/kubeadm/app/cmd/alpha/certs_test.go b/cmd/kubeadm/app/cmd/alpha/certs_test.go index ed5a11b7f98..78227813b67 100644 --- a/cmd/kubeadm/app/cmd/alpha/certs_test.go +++ b/cmd/kubeadm/app/cmd/alpha/certs_test.go @@ -55,6 +55,10 @@ func TestCommandsGenerated(t *testing.T) { "renew etcd-server", "renew etcd-peer", "renew etcd-healthcheck-client", + + "renew admin.conf", + "renew scheduler.conf", + "renew controller-manager.conf", } renewCmd := newCmdCertsRenewal() @@ -79,19 +83,63 @@ func TestCommandsGenerated(t *testing.T) { } func TestRunRenewCommands(t *testing.T) { + tmpDir := testutil.SetupTempDir(t) + defer os.RemoveAll(tmpDir) + + cfg := testutil.GetDefaultInternalConfig(t) + cfg.CertificatesDir = tmpDir + + // Generate all the CA + CACerts := map[string]*x509.Certificate{} + CAKeys := map[string]crypto.Signer{} + for _, ca := range []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertRootCA, + &certsphase.KubeadmCertFrontProxyCA, + &certsphase.KubeadmCertEtcdCA, + } { + caCert, caKey, err := ca.CreateAsCA(cfg) + if err != nil { + t.Fatalf("couldn't write out CA %s: %v", ca.Name, err) + } + CACerts[ca.Name] = caCert + CAKeys[ca.Name] = caKey + } + + // Generate all the signed certificates + for _, cert := range []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertAPIServer, + &certsphase.KubeadmCertKubeletClient, + &certsphase.KubeadmCertFrontProxyClient, + &certsphase.KubeadmCertEtcdAPIClient, + &certsphase.KubeadmCertEtcdServer, + &certsphase.KubeadmCertEtcdPeer, + &certsphase.KubeadmCertEtcdHealthcheck, + } { + caCert := CACerts[cert.CAName] + caKey := CAKeys[cert.CAName] + if err := cert.CreateFromCA(cfg, caCert, caKey); err != nil { + t.Fatalf("couldn't write certificate %s: %v", cert.Name, err) + } + } + + // Generate all the kubeconfig files with embedded certs + for _, kubeConfig := range []string{ + kubeadmconstants.AdminKubeConfigFileName, + kubeadmconstants.SchedulerKubeConfigFileName, + kubeadmconstants.ControllerManagerKubeConfigFileName, + } { + if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpDir, cfg); err != nil { + t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err) + } + } + tests := []struct { command string - CAs []*certsphase.KubeadmCert Certs []*certsphase.KubeadmCert KubeconfigFiles []string }{ { command: "all", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertRootCA, - &certsphase.KubeadmCertFrontProxyCA, - &certsphase.KubeadmCertEtcdCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertAPIServer, &certsphase.KubeadmCertKubeletClient, @@ -109,90 +157,60 @@ func TestRunRenewCommands(t *testing.T) { }, { command: "apiserver", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertRootCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertAPIServer, }, }, { command: "apiserver-kubelet-client", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertRootCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertKubeletClient, }, }, { command: "apiserver-etcd-client", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertEtcdCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertEtcdAPIClient, }, }, { command: "front-proxy-client", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertFrontProxyCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertFrontProxyClient, }, }, { command: "etcd-server", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertEtcdCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertEtcdServer, }, }, { command: "etcd-peer", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertEtcdCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertEtcdPeer, }, }, { command: "etcd-healthcheck-client", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertEtcdCA, - }, Certs: []*certsphase.KubeadmCert{ &certsphase.KubeadmCertEtcdHealthcheck, }, }, { command: "admin.conf", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertRootCA, - }, KubeconfigFiles: []string{ kubeadmconstants.AdminKubeConfigFileName, }, }, { command: "scheduler.conf", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertRootCA, - }, KubeconfigFiles: []string{ kubeadmconstants.SchedulerKubeConfigFileName, }, }, { command: "controller-manager.conf", - CAs: []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertRootCA, - }, KubeconfigFiles: []string{ kubeadmconstants.ControllerManagerKubeConfigFileName, }, @@ -201,74 +219,43 @@ func TestRunRenewCommands(t *testing.T) { for _, test := range tests { t.Run(test.command, func(t *testing.T) { - tmpDir := testutil.SetupTempDir(t) - defer os.RemoveAll(tmpDir) - - cfg := testutil.GetDefaultInternalConfig(t) - cfg.CertificatesDir = tmpDir - - // Generate all the CA - CACerts := map[string]*x509.Certificate{} - CAKeys := map[string]crypto.Signer{} - for _, ca := range test.CAs { - caCert, caKey, err := ca.CreateAsCA(cfg) - if err != nil { - t.Fatalf("couldn't write out CA %s: %v", ca.Name, err) - } - CACerts[ca.Name] = caCert - CAKeys[ca.Name] = caKey - } - - // Generate all the signed certificates (and store creation time) - createTime := map[string]time.Time{} + // Get file ModTime before renew + ModTime := map[string]time.Time{} for _, cert := range test.Certs { - caCert := CACerts[cert.CAName] - caKey := CAKeys[cert.CAName] - if err := cert.CreateFromCA(cfg, caCert, caKey); err != nil { - t.Fatalf("couldn't write certificate %s: %v", cert.Name, err) - } - file, err := os.Stat(filepath.Join(tmpDir, fmt.Sprintf("%s.crt", cert.BaseName))) if err != nil { t.Fatalf("couldn't get certificate %s: %v", cert.Name, err) } - createTime[cert.Name] = file.ModTime() + ModTime[cert.Name] = file.ModTime() } - - // Generate all the kubeconfig files with embedded certs(and store creation time) for _, kubeConfig := range test.KubeconfigFiles { - if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpDir, cfg); err != nil { - t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err) - } file, err := os.Stat(filepath.Join(tmpDir, kubeConfig)) if err != nil { t.Fatalf("couldn't get kubeconfig %s: %v", kubeConfig, err) } - createTime[kubeConfig] = file.ModTime() + ModTime[kubeConfig] = file.ModTime() } // exec renew renewCmds := getRenewSubCommands(tmpDir) cmdtestutil.RunSubCommand(t, renewCmds, test.command, fmt.Sprintf("--cert-dir=%s", tmpDir)) - // read renewed certificates and check the file is modified + // check the file is modified for _, cert := range test.Certs { file, err := os.Stat(filepath.Join(tmpDir, fmt.Sprintf("%s.crt", cert.BaseName))) if err != nil { t.Fatalf("couldn't get certificate %s: %v", cert.Name, err) } - if createTime[cert.Name] == file.ModTime() { + if ModTime[cert.Name] == file.ModTime() { t.Errorf("certificate %s was not renewed as expected", cert.Name) } } - - // ead renewed kubeconfig files and check the file is modified for _, kubeConfig := range test.KubeconfigFiles { file, err := os.Stat(filepath.Join(tmpDir, kubeConfig)) if err != nil { t.Fatalf("couldn't get kubeconfig %s: %v", kubeConfig, err) } - if createTime[kubeConfig] == file.ModTime() { + if ModTime[kubeConfig] == file.ModTime() { t.Errorf("kubeconfig %s was not renewed as expected", kubeConfig) } } @@ -281,10 +268,22 @@ func TestRenewUsingCSR(t *testing.T) { defer os.RemoveAll(tmpDir) cert := &certs.KubeadmCertEtcdServer - renewCmds := getRenewSubCommands(tmpDir) - cmdtestutil.RunSubCommand(t, renewCmds, cert.Name, "--csr-only", "--csr-dir="+tmpDir) + cfg := testutil.GetDefaultInternalConfig(t) + cfg.CertificatesDir = tmpDir - if _, _, err := pkiutil.TryLoadCSRAndKeyFromDisk(tmpDir, cert.BaseName); err != nil { - t.Fatalf("couldn't load certificate %q: %v", cert.BaseName, err) + caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(cfg) + if err != nil { + t.Fatalf("couldn't write out CA %s: %v", certsphase.KubeadmCertEtcdCA.Name, err) + } + + if err := cert.CreateFromCA(cfg, caCert, caKey); err != nil { + t.Fatalf("couldn't write certificate %s: %v", cert.Name, err) + } + + renewCmds := getRenewSubCommands(tmpDir) + cmdtestutil.RunSubCommand(t, renewCmds, cert.Name, "--csr-only", "--csr-dir="+tmpDir, fmt.Sprintf("--cert-dir=%s", tmpDir)) + + if _, _, err := pkiutil.TryLoadCSRAndKeyFromDisk(tmpDir, cert.Name); err != nil { + t.Fatalf("couldn't load certificate %q: %v", cert.Name, err) } } diff --git a/cmd/kubeadm/app/phases/certs/certlist.go b/cmd/kubeadm/app/phases/certs/certlist.go index e58822f541e..65d58d3a88e 100644 --- a/cmd/kubeadm/app/phases/certs/certlist.go +++ b/cmd/kubeadm/app/phases/certs/certlist.go @@ -260,7 +260,7 @@ var ( // KubeadmCertKubeletClient is the definition of the cert used by the API server to access the kubelet. KubeadmCertKubeletClient = KubeadmCert{ Name: "apiserver-kubelet-client", - LongName: "Client certificate for the API server to connect to kubelet", + LongName: "certificate for the API server to connect to kubelet", BaseName: kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, CAName: "ca", config: certutil.Config{ @@ -284,7 +284,7 @@ var ( KubeadmCertFrontProxyClient = KubeadmCert{ Name: "front-proxy-client", BaseName: kubeadmconstants.FrontProxyClientCertAndKeyBaseName, - LongName: "client for the front proxy", + LongName: "certificate for the front proxy client", CAName: "front-proxy-ca", config: certutil.Config{ CommonName: kubeadmconstants.FrontProxyClientCertCommonName, @@ -322,7 +322,7 @@ var ( // KubeadmCertEtcdPeer is the definition of the cert used by etcd peers to access each other. KubeadmCertEtcdPeer = KubeadmCert{ Name: "etcd-peer", - LongName: "credentials for etcd nodes to communicate with each other", + LongName: "certificate for etcd nodes to communicate with each other", BaseName: kubeadmconstants.EtcdPeerCertAndKeyBaseName, CAName: "etcd-ca", config: certutil.Config{ @@ -336,7 +336,7 @@ var ( // KubeadmCertEtcdHealthcheck is the definition of the cert used by Kubernetes to check the health of the etcd server. KubeadmCertEtcdHealthcheck = KubeadmCert{ Name: "etcd-healthcheck-client", - LongName: "client certificate for liveness probes to healtcheck etcd", + LongName: "certificate for liveness probes to healtcheck etcd", BaseName: kubeadmconstants.EtcdHealthcheckClientCertAndKeyBaseName, CAName: "etcd-ca", config: certutil.Config{ @@ -348,7 +348,7 @@ var ( // KubeadmCertEtcdAPIClient is the definition of the cert used by the API server to access etcd. KubeadmCertEtcdAPIClient = KubeadmCert{ Name: "apiserver-etcd-client", - LongName: "client apiserver uses to access etcd", + LongName: "certificate the apiserver uses to access etcd", BaseName: kubeadmconstants.APIServerEtcdClientCertAndKeyBaseName, CAName: "etcd-ca", config: certutil.Config{ diff --git a/cmd/kubeadm/app/phases/certs/renewal/certsapi.go b/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go similarity index 88% rename from cmd/kubeadm/app/phases/certs/renewal/certsapi.go rename to cmd/kubeadm/app/phases/certs/renewal/apirenewer.go index bb61cdd6428..d118420ed15 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/certsapi.go +++ b/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go @@ -27,7 +27,7 @@ import ( certsapi "k8s.io/api/certificates/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" certstype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" certutil "k8s.io/client-go/util/cert" csrutil "k8s.io/client-go/util/certificate/csr" @@ -38,20 +38,20 @@ const certAPIPrefixName = "kubeadm-cert" var watchTimeout = 5 * time.Minute -// CertsAPIRenewal creates new certificates using the certs API -type CertsAPIRenewal struct { +// APIRenewer define a certificate renewer implementation that uses the K8s certificate API +type APIRenewer struct { client certstype.CertificatesV1beta1Interface } -// NewCertsAPIRenawal takes a Kubernetes interface and returns a renewal Interface. -func NewCertsAPIRenawal(client kubernetes.Interface) Interface { - return &CertsAPIRenewal{ +// NewAPIRenewer a new certificate renewer implementation that uses the K8s certificate API +func NewAPIRenewer(client clientset.Interface) *APIRenewer { + return &APIRenewer{ client: client.CertificatesV1beta1(), } } -// Renew takes a certificate using the cert and key. -func (r *CertsAPIRenewal) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) { +// Renew a certificate using the K8s certificate API +func (r *APIRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) { reqTmp := &x509.CertificateRequest{ Subject: pkix.Name{ CommonName: cfg.CommonName, diff --git a/cmd/kubeadm/app/phases/certs/renewal/apirenewer_test.go b/cmd/kubeadm/app/phases/certs/renewal/apirenewer_test.go new file mode 100644 index 00000000000..761db0bd04a --- /dev/null +++ b/cmd/kubeadm/app/phases/certs/renewal/apirenewer_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package renewal + +import ( + "crypto" + "crypto/x509" + "testing" + "time" + + certsapi "k8s.io/api/certificates/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + fakecerts "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" + k8stesting "k8s.io/client-go/testing" + certutil "k8s.io/client-go/util/cert" + pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" +) + +func TestAPIRenewer(t *testing.T) { + caCertCfg := &certutil.Config{CommonName: "kubernetes"} + caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) + if err != nil { + t.Fatalf("couldn't create CA: %v", err) + } + + client := &fakecerts.FakeCertificatesV1beta1{ + Fake: &k8stesting.Fake{}, + } + certReq := getCertReq(t, caCert, caKey) + certReqNoCert := certReq.DeepCopy() + certReqNoCert.Status.Certificate = nil + client.AddReactor("get", "certificatesigningrequests", defaultReactionFunc(certReq)) + watcher := watch.NewFakeWithChanSize(3, false) + watcher.Add(certReqNoCert) + watcher.Modify(certReqNoCert) + watcher.Modify(certReq) + client.AddWatchReactor("certificatesigningrequests", k8stesting.DefaultWatchReactor(watcher, nil)) + + // override the timeout so tests are faster + watchTimeout = time.Second + + certCfg := &certutil.Config{ + CommonName: "test-certs", + AltNames: certutil.AltNames{ + DNSNames: []string{"test-domain.space"}, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + + renewer := &APIRenewer{ + client: client, + } + + cert, _, err := renewer.Renew(certCfg) + if err != nil { + t.Fatalf("unexpected error renewing cert: %v", err) + } + + pool := x509.NewCertPool() + pool.AddCert(caCert) + + _, err = cert.Verify(x509.VerifyOptions{ + DNSName: "test-domain.space", + Roots: pool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) + if err != nil { + t.Errorf("couldn't verify new cert: %v", err) + } +} + +func defaultReactionFunc(obj runtime.Object) k8stesting.ReactionFunc { + return func(act k8stesting.Action) (bool, runtime.Object, error) { + return true, obj, nil + } +} + +func getCertReq(t *testing.T, caCert *x509.Certificate, caKey crypto.Signer) *certsapi.CertificateSigningRequest { + cert, _, err := pkiutil.NewCertAndKey(caCert, caKey, &certutil.Config{ + CommonName: "testcert", + AltNames: certutil.AltNames{ + DNSNames: []string{"test-domain.space"}, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) + if err != nil { + t.Fatalf("couldn't generate cert: %v", err) + } + + return &certsapi.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testcert", + }, + Status: certsapi.CertificateSigningRequestStatus{ + Conditions: []certsapi.CertificateSigningRequestCondition{ + { + Type: certsapi.CertificateApproved, + }, + }, + Certificate: pkiutil.EncodeCertPEM(cert), + }, + } +} diff --git a/cmd/kubeadm/app/phases/certs/renewal/filerenewal.go b/cmd/kubeadm/app/phases/certs/renewal/filerenewer.go similarity index 64% rename from cmd/kubeadm/app/phases/certs/renewal/filerenewal.go rename to cmd/kubeadm/app/phases/certs/renewal/filerenewer.go index 66fd9b3a3c1..5a71393d136 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/filerenewal.go +++ b/cmd/kubeadm/app/phases/certs/renewal/filerenewer.go @@ -24,21 +24,21 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" ) -// FileRenewal renews a certificate using local certs -type FileRenewal struct { +// FileRenewer define a certificate renewer implementation that uses given CA cert and key for generating new certficiates +type FileRenewer struct { caCert *x509.Certificate caKey crypto.Signer } -// NewFileRenewal takes a certificate pair to construct the Interface. -func NewFileRenewal(caCert *x509.Certificate, caKey crypto.Signer) Interface { - return &FileRenewal{ +// NewFileRenewer returns a new certificate renewer that uses given CA cert and key for generating new certficiates +func NewFileRenewer(caCert *x509.Certificate, caKey crypto.Signer) *FileRenewer { + return &FileRenewer{ caCert: caCert, caKey: caKey, } } -// Renew takes a certificate using the cert and key -func (r *FileRenewal) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) { +// Renew a certificate using a given CA cert and key +func (r *FileRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) { return pkiutil.NewCertAndKey(r.caCert, r.caKey, cfg) } diff --git a/cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go b/cmd/kubeadm/app/phases/certs/renewal/filerenewer_test.go similarity index 78% rename from cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go rename to cmd/kubeadm/app/phases/certs/renewal/filerenewer_test.go index 29d92e78c30..341e8cfc3bb 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go +++ b/cmd/kubeadm/app/phases/certs/renewal/filerenewer_test.go @@ -21,18 +21,13 @@ import ( "testing" certutil "k8s.io/client-go/util/cert" - "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" ) -func TestFileRenew(t *testing.T) { - caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) - if err != nil { - t.Fatalf("couldn't create CA: %v", err) - } - - fr := NewFileRenewal(caCert, caKey) +func TestFileRenewer(t *testing.T) { + // creates a File renewer using a test Certificate authority + fr := NewFileRenewer(testCACert, testCAKey) + // renews a certificate certCfg := &certutil.Config{ CommonName: "test-certs", AltNames: certutil.AltNames{ @@ -46,8 +41,9 @@ func TestFileRenew(t *testing.T) { t.Fatalf("unexpected error renewing cert: %v", err) } + // verify the renewed certificate pool := x509.NewCertPool() - pool.AddCert(caCert) + pool.AddCert(testCACert) _, err = cert.Verify(x509.VerifyOptions{ DNSName: "test-domain.space", diff --git a/cmd/kubeadm/app/phases/certs/renewal/interface.go b/cmd/kubeadm/app/phases/certs/renewal/interface.go deleted file mode 100644 index 3da747d40c5..00000000000 --- a/cmd/kubeadm/app/phases/certs/renewal/interface.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package renewal - -import ( - "crypto" - "crypto/x509" - - certutil "k8s.io/client-go/util/cert" -) - -// Interface represents a standard way to renew a certificate. -type Interface interface { - Renew(*certutil.Config) (*x509.Certificate, crypto.Signer, error) -} diff --git a/cmd/kubeadm/app/phases/certs/renewal/manager.go b/cmd/kubeadm/app/phases/certs/renewal/manager.go new file mode 100644 index 00000000000..88a0bec54d7 --- /dev/null +++ b/cmd/kubeadm/app/phases/certs/renewal/manager.go @@ -0,0 +1,288 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package renewal + +import ( + "crypto/x509" + "sort" + + "github.com/pkg/errors" + clientset "k8s.io/client-go/kubernetes" + certutil "k8s.io/client-go/util/cert" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" +) + +// Manager can be used to coordinate certificate renewal and related processes, +// like CSR generation or checking certificate expiration +type Manager struct { + // cfg holds the kubeadm ClusterConfiguration + cfg *kubeadmapi.ClusterConfiguration + + // kubernetesDir holds the directory where kubeConfig files are stored + kubernetesDir string + + // certificates contains the certificateRenewHandler controlled by this manager + certificates map[string]*CertificateRenewHandler +} + +// CertificateRenewHandler defines required info for renewing a certificate +type CertificateRenewHandler struct { + // Name of the certificate to be used for UX. + // This value can be used to trigger operations on this certificate + Name string + + // LongName of the certificate to be used for UX + LongName string + + // FileName defines the name (or the BaseName) of the certificate file + FileName string + + // CABaseName define the base name for the CA that should be used for certificate renewal + CABaseName string + + // readwriter define a CertificateReadWriter to be used for certificate renewal + readwriter certificateReadWriter +} + +// NewManager return a new certificate renewal manager ready for handling certificates in the cluster +func NewManager(cfg *kubeadmapi.ClusterConfiguration, kubernetesDir string) (*Manager, error) { + rm := &Manager{ + cfg: cfg, + kubernetesDir: kubernetesDir, + certificates: map[string]*CertificateRenewHandler{}, + } + + // gets the list of certificates that are expected according to the current cluster configuration + certListFunc := certsphase.GetDefaultCertList + if cfg.Etcd.External != nil { + certListFunc = certsphase.GetCertsWithoutEtcd + } + certTree, err := certListFunc().AsMap().CertTree() + if err != nil { + return nil, err + } + + // create a CertificateRenewHandler for each signed certificate in the certificate tree; + // NB. we are not offering support for renewing CAs; this would cause serious consequences + for ca, certs := range certTree { + for _, cert := range certs { + // create a ReadWriter for certificates stored in the K8s local PKI + pkiReadWriter := newPKICertificateReadWriter(rm.cfg.CertificatesDir, cert.BaseName) + + // adds the certificateRenewHandler. + // PKI certificates are indexed by name, that is a well know constant defined + // in the certsphase package and that can be reused across all the kubeadm codebase + rm.certificates[cert.Name] = &CertificateRenewHandler{ + Name: cert.Name, + LongName: cert.LongName, + FileName: cert.BaseName, + CABaseName: ca.BaseName, //Nb. this is a path for etcd certs (they are stored in a subfolder) + readwriter: pkiReadWriter, + } + } + } + + // gets the list of certificates that should be considered for renewal + kubeConfigs := []struct { + longName string + fileName string + }{ + { + longName: "certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself", + fileName: kubeadmconstants.AdminKubeConfigFileName, + }, + { + longName: "certificate embedded in the kubeconfig file for the controller manager to use", + fileName: kubeadmconstants.ControllerManagerKubeConfigFileName, + }, + { + longName: "certificate embedded in the kubeconfig file for the scheduler manager to use", + fileName: kubeadmconstants.SchedulerKubeConfigFileName, + }, + //NB. we are escluding KubeletKubeConfig from renewal because management of this certificate is delegated to kubelet + } + + // create a CertificateRenewHandler for each kubeConfig file + for _, kubeConfig := range kubeConfigs { + // create a ReadWriter for certificates embedded in kubeConfig files + kubeConfigReadWriter := newKubeconfigReadWriter(kubernetesDir, kubeConfig.fileName) + + // adds the certificateRenewHandler. + // Certificates embedded kubeConfig files in are indexed by fileName, that is a well know constant defined + // in the kubeadm constants package and that can be reused across all the kubeadm codebase + rm.certificates[kubeConfig.fileName] = &CertificateRenewHandler{ + Name: kubeConfig.fileName, // we are using fileName as name, because there is nothing similar outside + LongName: kubeConfig.longName, + FileName: kubeConfig.fileName, + CABaseName: kubeadmconstants.CACertAndKeyBaseName, // all certificates in kubeConfig files are signed by the Kubernetes CA + readwriter: kubeConfigReadWriter, + } + } + + return rm, nil +} + +// Certificates return the list of certificates controlled by this Manager +func (rm *Manager) Certificates() []*CertificateRenewHandler { + certificates := []*CertificateRenewHandler{} + for _, h := range rm.certificates { + certificates = append(certificates, h) + } + + sort.Slice(certificates, func(i, j int) bool { return certificates[i].Name < certificates[j].Name }) + + return certificates +} + +// RenewUsingLocalCA executes certificate renewal using local certificate authorities for generating new certs. +// For PKI certificates, use the name defined in the certsphase package, while for certificates +// embedded in the kubeConfig files, use the kubeConfig file name defined in the kubeadm constants package. +// If you use the CertificateRenewHandler returned by Certificates func, handler.Name already contains the right value. +func (rm *Manager) RenewUsingLocalCA(name string) (bool, error) { + handler, ok := rm.certificates[name] + if !ok { + return false, errors.Errorf("%s is not a valid certificate for this cluster", name) + } + + // checks if the we are in the external CA case (CA certificate provided without the certificate key) + var externalCA bool + switch handler.CABaseName { + case kubeadmconstants.CACertAndKeyBaseName: + externalCA, _ = certsphase.UsingExternalCA(rm.cfg) + case kubeadmconstants.FrontProxyCACertAndKeyBaseName: + externalCA, _ = certsphase.UsingExternalFrontProxyCA(rm.cfg) + case kubeadmconstants.EtcdCACertAndKeyBaseName: + externalCA = false + default: + return false, errors.Errorf("unknown certificate authority %s", handler.CABaseName) + } + + // in case of external CA it is not possible to renew certificates, then return early + if externalCA { + return false, nil + } + + // reads the current certificate + cert, err := handler.readwriter.Read() + if err != nil { + return false, err + } + + // extract the certificate config + cfg := certToConfig(cert) + + // reads the CA + caCert, caKey, err := certsphase.LoadCertificateAuthority(rm.cfg.CertificatesDir, handler.CABaseName) + if err != nil { + return false, err + } + + // create a new certificate with the same config + newCert, newKey, err := NewFileRenewer(caCert, caKey).Renew(cfg) + if err != nil { + return false, errors.Wrapf(err, "failed to renew certificate %s", name) + } + + // writes the new certificate to disk + err = handler.readwriter.Write(newCert, newKey) + if err != nil { + return false, err + } + + return true, nil +} + +// RenewUsingCSRAPI executes certificate renewal uses the K8s certificate API. +// For PKI certificates, use the name defined in the certsphase package, while for certificates +// embedded in the kubeConfig files, use the kubeConfig file name defined in the kubeadm constants package. +// If you use the CertificateRenewHandler returned by Certificates func, handler.Name already contains the right value. +func (rm *Manager) RenewUsingCSRAPI(name string, client clientset.Interface) error { + handler, ok := rm.certificates[name] + if !ok { + return errors.Errorf("%s is not a valid certificate for this cluster", name) + } + + // reads the current certificate + cert, err := handler.readwriter.Read() + if err != nil { + return err + } + + // extract the certificate config + cfg := certToConfig(cert) + + // create a new certificate with the same config + newCert, newKey, err := NewAPIRenewer(client).Renew(cfg) + if err != nil { + return errors.Wrapf(err, "failed to renew certificate %s", name) + } + + // writes the new certificate to disk + err = handler.readwriter.Write(newCert, newKey) + if err != nil { + return err + } + + return nil +} + +// CreateRenewCSR generates CSR request for certificate renewal. +// For PKI certificates, use the name defined in the certsphase package, while for certificates +// embedded in the kubeConfig files, use the kubeConfig file name defined in the kubeadm constants package. +// If you use the CertificateRenewHandler returned by Certificates func, handler.Name already contains the right value. +func (rm *Manager) CreateRenewCSR(name, outdir string) error { + handler, ok := rm.certificates[name] + if !ok { + return errors.Errorf("%s is not a known certificate", name) + } + + // reads the current certificate + cert, err := handler.readwriter.Read() + if err != nil { + return err + } + + // extracts the certificate config + cfg := certToConfig(cert) + + // generates the CSR request and save it + csr, key, err := pkiutil.NewCSRAndKey(cfg) + if err := pkiutil.WriteKey(outdir, name, key); err != nil { + return errors.Wrapf(err, "failure while saving %s key", name) + } + + if err := pkiutil.WriteCSR(outdir, name, csr); err != nil { + return errors.Wrapf(err, "failure while saving %s CSR", name) + } + + return nil +} + +func certToConfig(cert *x509.Certificate) *certutil.Config { + return &certutil.Config{ + CommonName: cert.Subject.CommonName, + Organization: cert.Subject.Organization, + AltNames: certutil.AltNames{ + IPs: cert.IPAddresses, + DNSNames: cert.DNSNames, + }, + Usages: cert.ExtKeyUsage, + } +} diff --git a/cmd/kubeadm/app/phases/certs/renewal/manager_test.go b/cmd/kubeadm/app/phases/certs/renewal/manager_test.go new file mode 100644 index 00000000000..ff2cb3572e8 --- /dev/null +++ b/cmd/kubeadm/app/phases/certs/renewal/manager_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package renewal + +import ( + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + certutil "k8s.io/client-go/util/cert" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" + testutil "k8s.io/kubernetes/cmd/kubeadm/test" +) + +var ( + testCACertCfg = &certutil.Config{CommonName: "kubernetes"} + + testCACert, testCAKey, _ = pkiutil.NewCertificateAuthority(testCACertCfg) + + testCertCfg = &certutil.Config{ + CommonName: "test-common-name", + Organization: []string{"sig-cluster-lifecycle"}, + AltNames: certutil.AltNames{ + IPs: []net.IP{net.ParseIP("10.100.0.1")}, + DNSNames: []string{"test-domain.space"}, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +) + +func TestNewManager(t *testing.T) { + tests := []struct { + name string + cfg *kubeadmapi.ClusterConfiguration + expectedCertificates int + }{ + { + name: "cluster with local etcd", + cfg: &kubeadmapi.ClusterConfiguration{}, + expectedCertificates: 10, //[admin apiserver apiserver-etcd-client apiserver-kubelet-client controller-manager etcd/healthcheck-client etcd/peer etcd/server front-proxy-client scheduler] + }, + { + name: "cluster with external etcd", + cfg: &kubeadmapi.ClusterConfiguration{ + Etcd: kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{}, + }, + }, + expectedCertificates: 6, // [admin apiserver apiserver-kubelet-client controller-manager front-proxy-client scheduler] + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rm, err := NewManager(test.cfg, "") + if err != nil { + t.Fatalf("Failed to create the certificate renewal manager: %v", err) + } + + if len(rm.Certificates()) != test.expectedCertificates { + t.Errorf("Expected %d certificates, saw %d", test.expectedCertificates, len(rm.Certificates())) + } + }) + } +} + +func TestRenewUsingLocalCA(t *testing.T) { + dir := testutil.SetupTempDir(t) + defer os.RemoveAll(dir) + + if err := pkiutil.WriteCertAndKey(dir, "ca", testCACert, testCAKey); err != nil { + t.Fatalf("couldn't write out CA certificate to %s", dir) + } + + cfg := &kubeadmapi.ClusterConfiguration{ + CertificatesDir: dir, + } + rm, err := NewManager(cfg, dir) + if err != nil { + t.Fatalf("Failed to create the certificate renewal manager: %v", err) + } + + tests := []struct { + name string + certName string + createCertFunc func() *x509.Certificate + }{ + { + name: "Certificate renewal for a PKI certificate", + certName: "apiserver", + createCertFunc: func() *x509.Certificate { + return writeTestCertificate(t, dir, "apiserver", testCACert, testCAKey) + }, + }, + { + name: "Certificate renewal for a certificate embedded in a kubeconfig file", + certName: "admin.conf", + createCertFunc: func() *x509.Certificate { + return writeTestKubeconfig(t, dir, "admin.conf", testCACert, testCAKey) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cert := test.createCertFunc() + + time.Sleep(1 * time.Second) + + _, err := rm.RenewUsingLocalCA(test.certName) + if err != nil { + t.Fatalf("error renewing certificate: %v", err) + } + + newCert, err := rm.certificates[test.certName].readwriter.Read() + if err != nil { + t.Fatalf("error reading renewed certificate: %v", err) + } + + if newCert.SerialNumber.Cmp(cert.SerialNumber) == 0 { + t.Fatal("expected new certificate, but renewed certificate has same serial number") + } + + if !newCert.NotAfter.After(cert.NotAfter) { + t.Fatalf("expected new certificate with updated expiration, but renewed certificate has same NotAfter value: saw %s, expected greather than %s", newCert.NotAfter, cert.NotAfter) + } + + certtestutil.AssertCertificateIsSignedByCa(t, newCert, testCACert) + certtestutil.AssertCertificateHasClientAuthUsage(t, newCert) + certtestutil.AssertCertificateHasOrganizations(t, newCert, testCertCfg.Organization...) + certtestutil.AssertCertificateHasCommonName(t, newCert, testCertCfg.CommonName) + certtestutil.AssertCertificateHasDNSNames(t, newCert, testCertCfg.AltNames.DNSNames...) + certtestutil.AssertCertificateHasIPAddresses(t, newCert, testCertCfg.AltNames.IPs...) + }) + } +} + +func TestCreateRenewCSR(t *testing.T) { + dir := testutil.SetupTempDir(t) + defer os.RemoveAll(dir) + + outdir := filepath.Join(dir, "out") + + if err := os.MkdirAll(outdir, 0755); err != nil { + t.Fatalf("couldn't create %s", outdir) + } + + if err := pkiutil.WriteCertAndKey(dir, "ca", testCACert, testCAKey); err != nil { + t.Fatalf("couldn't write out CA certificate to %s", dir) + } + + cfg := &kubeadmapi.ClusterConfiguration{ + CertificatesDir: dir, + } + rm, err := NewManager(cfg, dir) + if err != nil { + t.Fatalf("Failed to create the certificate renewal manager: %v", err) + } + + tests := []struct { + name string + certName string + createCertFunc func() *x509.Certificate + }{ + { + name: "Creation of a CSR request for renewal of a PKI certificate", + certName: "apiserver", + createCertFunc: func() *x509.Certificate { + return writeTestCertificate(t, dir, "apiserver", testCACert, testCAKey) + }, + }, + { + name: "Creation of a CSR request for renewal of a certificate embedded in a kubeconfig file", + certName: "admin.conf", + createCertFunc: func() *x509.Certificate { + return writeTestKubeconfig(t, dir, "admin.conf", testCACert, testCAKey) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.createCertFunc() + + time.Sleep(1 * time.Second) + + err := rm.CreateRenewCSR(test.certName, outdir) + if err != nil { + t.Fatalf("error renewing certificate: %v", err) + } + + file := fmt.Sprintf("%s.key", test.certName) + if _, err := os.Stat(filepath.Join(outdir, file)); os.IsNotExist(err) { + t.Errorf("Expected file %s does not exist", file) + } + + file = fmt.Sprintf("%s.csr", test.certName) + if _, err := os.Stat(filepath.Join(outdir, file)); os.IsNotExist(err) { + t.Errorf("Expected file %s does not exist", file) + } + }) + } + +} + +func TestCertToConfig(t *testing.T) { + expectedConfig := &certutil.Config{ + CommonName: "test-common-name", + Organization: []string{"sig-cluster-lifecycle"}, + AltNames: certutil.AltNames{ + IPs: []net.IP{net.ParseIP("10.100.0.1")}, + DNSNames: []string{"test-domain.space"}, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + + cert := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-common-name", + Organization: []string{"sig-cluster-lifecycle"}, + }, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + DNSNames: []string{"test-domain.space"}, + IPAddresses: []net.IP{net.ParseIP("10.100.0.1")}, + } + + cfg := certToConfig(cert) + + if cfg.CommonName != expectedConfig.CommonName { + t.Errorf("expected common name %q, got %q", expectedConfig.CommonName, cfg.CommonName) + } + + if len(cfg.Organization) != 1 || cfg.Organization[0] != expectedConfig.Organization[0] { + t.Errorf("expected organization %v, got %v", expectedConfig.Organization, cfg.Organization) + + } + + if len(cfg.Usages) != 1 || cfg.Usages[0] != expectedConfig.Usages[0] { + t.Errorf("expected ext key usage %v, got %v", expectedConfig.Usages, cfg.Usages) + } + + if len(cfg.AltNames.IPs) != 1 || cfg.AltNames.IPs[0].String() != expectedConfig.AltNames.IPs[0].String() { + t.Errorf("expected SAN IPs %v, got %v", expectedConfig.AltNames.IPs, cfg.AltNames.IPs) + } + + if len(cfg.AltNames.DNSNames) != 1 || cfg.AltNames.DNSNames[0] != expectedConfig.AltNames.DNSNames[0] { + t.Errorf("expected SAN DNSNames %v, got %v", expectedConfig.AltNames.DNSNames, cfg.AltNames.DNSNames) + } +} diff --git a/cmd/kubeadm/app/phases/certs/renewal/readwriter.go b/cmd/kubeadm/app/phases/certs/renewal/readwriter.go new file mode 100644 index 00000000000..c6040793d71 --- /dev/null +++ b/cmd/kubeadm/app/phases/certs/renewal/readwriter.go @@ -0,0 +1,173 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package renewal + +import ( + "crypto" + "crypto/x509" + "path/filepath" + + "github.com/pkg/errors" + + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" +) + +// certificateReadWriter defines the behavior of a component that +// read or write a certificate stored/embedded in a file +type certificateReadWriter interface { + // Read a certificate stored/embedded in a file + Read() (*x509.Certificate, error) + + // Write (update) a certificate stored/embedded in a file + Write(*x509.Certificate, crypto.Signer) error +} + +// pkiCertificateReadWriter defines a certificateReadWriter for certificate files +// in the K8s pki managed by kubeadm +type pkiCertificateReadWriter struct { + baseName string + certificateDir string +} + +// newPKICertificateReadWriter return a new pkiCertificateReadWriter +func newPKICertificateReadWriter(certificateDir string, baseName string) *pkiCertificateReadWriter { + return &pkiCertificateReadWriter{ + baseName: baseName, + certificateDir: certificateDir, + } +} + +// Read a certificate from a file the K8s pki managed by kubeadm +func (rw *pkiCertificateReadWriter) Read() (*x509.Certificate, error) { + certificatePath, _ := pkiutil.PathsForCertAndKey(rw.certificateDir, rw.baseName) + certs, err := certutil.CertsFromFile(certificatePath) + if err != nil { + return nil, errors.Wrapf(err, "failed to load existing certificate %s", rw.baseName) + } + + if len(certs) != 1 { + return nil, errors.Errorf("wanted exactly one certificate, got %d", len(certs)) + } + + return certs[0], nil +} + +// Write a certificate to files in the K8s pki managed by kubeadm +func (rw *pkiCertificateReadWriter) Write(newCert *x509.Certificate, newKey crypto.Signer) error { + if err := pkiutil.WriteCertAndKey(rw.certificateDir, rw.baseName, newCert, newKey); err != nil { + return errors.Wrapf(err, "failed to write new certificate %s", rw.baseName) + } + return nil +} + +// kubeConfigReadWriter defines a certificateReadWriter for certificate files +// embedded in the kubeConfig files managed by kubeadm, and more specifically +// for the client certificate of the AuthInfo +type kubeConfigReadWriter struct { + kubernetesDir string + kubeConfigFileName string + kubeConfigFilePath string + kubeConfig *clientcmdapi.Config +} + +// newKubeconfigReadWriter return a new kubeConfigReadWriter +func newKubeconfigReadWriter(kubernetesDir string, kubeConfigFileName string) *kubeConfigReadWriter { + return &kubeConfigReadWriter{ + kubernetesDir: kubernetesDir, + kubeConfigFileName: kubeConfigFileName, + kubeConfigFilePath: filepath.Join(kubernetesDir, kubeConfigFileName), + } +} + +// Read a certificate embedded in kubeConfig file managed by kubeadm. +// Please note that the kubeConfig file itself is kept in the ReadWriter state thus allowing +// to preserve the attributes (Context, Servers, AuthInfo etc.) +func (rw *kubeConfigReadWriter) Read() (*x509.Certificate, error) { + // try to load the kubeConfig file + kubeConfig, err := clientcmd.LoadFromFile(rw.kubeConfigFilePath) + if err != nil { + return nil, errors.Wrapf(err, "failed to load kubeConfig file %s", rw.kubeConfigFilePath) + } + + // get current context + if _, ok := kubeConfig.Contexts[kubeConfig.CurrentContext]; !ok { + return nil, errors.Errorf("invalid kubeConfig file %s: missing context %s", rw.kubeConfigFilePath, kubeConfig.CurrentContext) + } + + // get cluster info for current context and ensure a server certificate is embedded in it + clusterName := kubeConfig.Contexts[kubeConfig.CurrentContext].Cluster + if _, ok := kubeConfig.Clusters[clusterName]; !ok { + return nil, errors.Errorf("invalid kubeConfig file %s: missing cluster %s", rw.kubeConfigFilePath, clusterName) + } + + cluster := kubeConfig.Clusters[clusterName] + if len(cluster.CertificateAuthorityData) == 0 { + return nil, errors.Errorf("kubeConfig file %s does not have and embedded server certificate", rw.kubeConfigFilePath) + } + + // get auth info for current context and ensure a client certificate is embedded in it + authInfoName := kubeConfig.Contexts[kubeConfig.CurrentContext].AuthInfo + if _, ok := kubeConfig.AuthInfos[authInfoName]; !ok { + return nil, errors.Errorf("invalid kubeConfig file %s: missing authInfo %s", rw.kubeConfigFilePath, authInfoName) + } + + authInfo := kubeConfig.AuthInfos[authInfoName] + if len(authInfo.ClientCertificateData) == 0 { + return nil, errors.Errorf("kubeConfig file %s does not have and embedded client certificate", rw.kubeConfigFilePath) + } + + // parse the client certificate, retrive the cert config and then renew it + certs, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData) + if err != nil { + return nil, errors.Wrapf(err, "kubeConfig file %s does not contain a valid client certificate", rw.kubeConfigFilePath) + } + + rw.kubeConfig = kubeConfig + + return certs[0], nil +} + +// Write a certificate embedded in kubeConfig file managed by kubeadm +// Please note that all the other attribute of the kubeConfig file are preserved, but this +// requires to call Read before Write +func (rw *kubeConfigReadWriter) Write(newCert *x509.Certificate, newKey crypto.Signer) error { + // check if Read was called before Write + if rw.kubeConfig == nil { + return errors.Errorf("failed to Write kubeConfig file with renewd certs. It is necessary to call Read before Write") + } + + // encodes the new key + encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(newKey) + if err != nil { + return errors.Wrapf(err, "failed to marshal private key to PEM") + } + + // get auth info for current context and ensure a client certificate is embedded in it + authInfoName := rw.kubeConfig.Contexts[rw.kubeConfig.CurrentContext].AuthInfo + + // create a kubeConfig copy with the new client certs + newConfig := rw.kubeConfig.DeepCopy() + newConfig.AuthInfos[authInfoName].ClientKeyData = encodedClientKey + newConfig.AuthInfos[authInfoName].ClientCertificateData = pkiutil.EncodeCertPEM(newCert) + + // writes the kubeConfig to disk + return clientcmd.WriteToFile(*newConfig, rw.kubeConfigFilePath) +} diff --git a/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go b/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go new file mode 100644 index 00000000000..cccef9ec3dc --- /dev/null +++ b/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package renewal + +import ( + "crypto" + "crypto/x509" + "net" + "os" + "path/filepath" + "testing" + + "k8s.io/client-go/tools/clientcmd" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" + pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" + testutil "k8s.io/kubernetes/cmd/kubeadm/test" +) + +func TestPKICertificateReadWriter(t *testing.T) { + // creates a tmp folder + dir := testutil.SetupTempDir(t) + defer os.RemoveAll(dir) + + // creates a certificate + cert := writeTestCertificate(t, dir, "test", testCACert, testCAKey) + + // Creates a pkiCertificateReadWriter + pkiReadWriter := newPKICertificateReadWriter(dir, "test") + + // Reads the certificate + readCert, err := pkiReadWriter.Read() + if err != nil { + t.Fatalf("couldn't read certificate: %v", err) + } + + // Check if the certificate read from disk is equal to the original one + if !cert.Equal(readCert) { + t.Errorf("read cert does not match with expected cert") + } + + // Create a new cert + newCert, newkey, err := pkiutil.NewCertAndKey(testCACert, testCAKey, testCertCfg) + if err != nil { + t.Fatalf("couldn't generate certificate: %v", err) + } + + // Writes the new certificate + err = pkiReadWriter.Write(newCert, newkey) + if err != nil { + t.Fatalf("couldn't write new certificate: %v", err) + } + + // Reads back the new certificate + readCert, err = pkiReadWriter.Read() + if err != nil { + t.Fatalf("couldn't read new certificate: %v", err) + } + + // Check if the new certificate read from disk is equal to the original one + if !newCert.Equal(readCert) { + t.Error("read cert does not match with expected new cert") + } +} + +func TestKubeconfigReadWriter(t *testing.T) { + // creates a tmp folder + dir := testutil.SetupTempDir(t) + defer os.RemoveAll(dir) + + // creates a certificate and then embeds it into a kubeconfig file + cert := writeTestKubeconfig(t, dir, "test", testCACert, testCAKey) + + // Creates a KubeconfigReadWriter + kubeconfigReadWriter := newKubeconfigReadWriter(dir, "test") + + // Reads the certificate embedded in a kubeconfig + readCert, err := kubeconfigReadWriter.Read() + if err != nil { + t.Fatalf("couldn't read embedded certificate: %v", err) + } + + // Check if the certificate read from disk is equal to the original one + if !cert.Equal(readCert) { + t.Errorf("read cert does not match with expected cert") + } + + // Create a new cert + newCert, newkey, err := pkiutil.NewCertAndKey(testCACert, testCAKey, testCertCfg) + if err != nil { + t.Fatalf("couldn't generate certificate: %v", err) + } + + // Writes the new certificate embedded in a kubeconfig + err = kubeconfigReadWriter.Write(newCert, newkey) + if err != nil { + t.Fatalf("couldn't write new embedded certificate: %v", err) + } + + // Reads back the new certificate embedded in a kubeconfig writer + readCert, err = kubeconfigReadWriter.Read() + if err != nil { + t.Fatalf("couldn't read new embedded certificate: %v", err) + } + + // Check if the new certificate read from disk is equal to the original one + if !newCert.Equal(readCert) { + t.Errorf("read cert does not match with expected new cert") + } +} + +// writeTestCertificate is a utility for creating a test certificate +func writeTestCertificate(t *testing.T, dir, name string, caCert *x509.Certificate, caKey crypto.Signer) *x509.Certificate { + cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, testCertCfg) + if err != nil { + t.Fatalf("couldn't generate certificate: %v", err) + } + + if err := pkiutil.WriteCertAndKey(dir, name, cert, key); err != nil { + t.Fatalf("couldn't write out certificate %s to %s", name, dir) + } + + return cert +} + +// writeTestKubeconfig is a utility for creating a test kubeconfig with an embedded certificate +func writeTestKubeconfig(t *testing.T, dir, name string, caCert *x509.Certificate, caKey crypto.Signer) *x509.Certificate { + + cfg := &certutil.Config{ + CommonName: "test-common-name", + Organization: []string{"sig-cluster-lifecycle"}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + AltNames: certutil.AltNames{ + IPs: []net.IP{net.ParseIP("10.100.0.1")}, + DNSNames: []string{"test-domain.space"}, + }, + } + cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, cfg) + if err != nil { + t.Fatalf("couldn't generate certificate: %v", err) + } + + encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(key) + if err != nil { + t.Fatalf("failed to marshal private key to PEM: %v", err) + } + + certificateAuthorityData := pkiutil.EncodeCertPEM(caCert) + + config := kubeconfigutil.CreateWithCerts( + "https://localhost:1234", + "kubernetes-test", + "user-test", + certificateAuthorityData, + encodedClientKey, + pkiutil.EncodeCertPEM(cert), + ) + + if err := clientcmd.WriteToFile(*config, filepath.Join(dir, name)); err != nil { + t.Fatalf("couldn't write out certificate") + } + + return cert +} diff --git a/cmd/kubeadm/app/phases/certs/renewal/renewal.go b/cmd/kubeadm/app/phases/certs/renewal/renewal.go deleted file mode 100644 index 37dbbfb902e..00000000000 --- a/cmd/kubeadm/app/phases/certs/renewal/renewal.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package renewal - -import ( - "crypto/x509" - "path/filepath" - - "github.com/pkg/errors" - "k8s.io/client-go/tools/clientcmd" - certutil "k8s.io/client-go/util/cert" - "k8s.io/client-go/util/keyutil" - "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" -) - -// RenewExistingCert loads a certificate file, uses the renew interface to renew it, -// and saves the resulting certificate and key over the old one. -func RenewExistingCert(certsDir, baseName string, impl Interface) error { - certificatePath, _ := pkiutil.PathsForCertAndKey(certsDir, baseName) - certs, err := certutil.CertsFromFile(certificatePath) - if err != nil { - return errors.Wrapf(err, "failed to load existing certificate %s", baseName) - } - - if len(certs) != 1 { - return errors.Errorf("wanted exactly one certificate, got %d", len(certs)) - } - - cfg := certToConfig(certs[0]) - newCert, newKey, err := impl.Renew(cfg) - if err != nil { - return errors.Wrapf(err, "failed to renew certificate %s", baseName) - } - - if err := pkiutil.WriteCertAndKey(certsDir, baseName, newCert, newKey); err != nil { - return errors.Wrapf(err, "failed to write new certificate %s", baseName) - } - return nil -} - -// RenewEmbeddedClientCert loads a kubeconfig file, uses the renew interface to renew the client certificate -// embedded in it, and then saves the resulting kubeconfig and key over the old one. -func RenewEmbeddedClientCert(kubeConfigFileDir, kubeConfigFileName string, impl Interface) error { - kubeConfigFilePath := filepath.Join(kubeConfigFileDir, kubeConfigFileName) - - // try to load the kubeconfig file - kubeconfig, err := clientcmd.LoadFromFile(kubeConfigFilePath) - if err != nil { - return errors.Wrapf(err, "failed to load kubeconfig file %s", kubeConfigFilePath) - } - - // get current context - if _, ok := kubeconfig.Contexts[kubeconfig.CurrentContext]; !ok { - return errors.Errorf("invalid kubeconfig file %s: missing context %s", kubeConfigFilePath, kubeconfig.CurrentContext) - } - - // get cluster info for current context and ensure a server certificate is embedded in it - clusterName := kubeconfig.Contexts[kubeconfig.CurrentContext].Cluster - if _, ok := kubeconfig.Clusters[clusterName]; !ok { - return errors.Errorf("invalid kubeconfig file %s: missing cluster %s", kubeConfigFilePath, clusterName) - } - - cluster := kubeconfig.Clusters[clusterName] - if len(cluster.CertificateAuthorityData) == 0 { - return errors.Errorf("kubeconfig file %s does not have and embedded server certificate", kubeConfigFilePath) - } - - // get auth info for current context and ensure a client certificate is embedded in it - authInfoName := kubeconfig.Contexts[kubeconfig.CurrentContext].AuthInfo - if _, ok := kubeconfig.AuthInfos[authInfoName]; !ok { - return errors.Errorf("invalid kubeconfig file %s: missing authInfo %s", kubeConfigFilePath, authInfoName) - } - - authInfo := kubeconfig.AuthInfos[authInfoName] - if len(authInfo.ClientCertificateData) == 0 { - return errors.Errorf("kubeconfig file %s does not have and embedded client certificate", kubeConfigFilePath) - } - - // parse the client certificate, retrive the cert config and then renew it - certs, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData) - if err != nil { - return errors.Wrapf(err, "kubeconfig file %s does not contain a valid client certificate", kubeConfigFilePath) - } - - cfg := certToConfig(certs[0]) - - newCert, newKey, err := impl.Renew(cfg) - if err != nil { - return errors.Wrapf(err, "failed to renew certificate embedded in %s", kubeConfigFilePath) - } - - // encodes the new key - encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(newKey) - if err != nil { - return errors.Wrapf(err, "failed to marshal private key to PEM") - } - - // create a kubeconfig copy with the new client certs - newConfig := kubeconfig.DeepCopy() - newConfig.AuthInfos[authInfoName].ClientKeyData = encodedClientKey - newConfig.AuthInfos[authInfoName].ClientCertificateData = pkiutil.EncodeCertPEM(newCert) - - // writes the kubeconfig to disk - return clientcmd.WriteToFile(*newConfig, kubeConfigFilePath) -} - -func certToConfig(cert *x509.Certificate) *certutil.Config { - return &certutil.Config{ - CommonName: cert.Subject.CommonName, - Organization: cert.Subject.Organization, - AltNames: certutil.AltNames{ - IPs: cert.IPAddresses, - DNSNames: cert.DNSNames, - }, - Usages: cert.ExtKeyUsage, - } -} diff --git a/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go b/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go deleted file mode 100644 index 1418e26254a..00000000000 --- a/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go +++ /dev/null @@ -1,359 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package renewal - -import ( - "bytes" - "crypto" - "crypto/x509" - "crypto/x509/pkix" - "net" - "os" - "path/filepath" - "testing" - "time" - - certsapi "k8s.io/api/certificates/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - fakecerts "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" - k8stesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/clientcmd" - certutil "k8s.io/client-go/util/cert" - "k8s.io/client-go/util/keyutil" - certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" - testutil "k8s.io/kubernetes/cmd/kubeadm/test" -) - -func TestRenewImplementations(t *testing.T) { - caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) - if err != nil { - t.Fatalf("couldn't create CA: %v", err) - } - - client := &fakecerts.FakeCertificatesV1beta1{ - Fake: &k8stesting.Fake{}, - } - certReq := getCertReq(t, caCert, caKey) - certReqNoCert := certReq.DeepCopy() - certReqNoCert.Status.Certificate = nil - client.AddReactor("get", "certificatesigningrequests", defaultReactionFunc(certReq)) - watcher := watch.NewFakeWithChanSize(3, false) - watcher.Add(certReqNoCert) - watcher.Modify(certReqNoCert) - watcher.Modify(certReq) - client.AddWatchReactor("certificatesigningrequests", k8stesting.DefaultWatchReactor(watcher, nil)) - - // override the timeout so tests are faster - watchTimeout = time.Second - - tests := []struct { - name string - impl Interface - }{ - { - name: "filerenewal", - impl: NewFileRenewal(caCert, caKey), - }, - { - name: "certs api", - impl: &CertsAPIRenewal{ - client: client, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - - certCfg := &certutil.Config{ - CommonName: "test-certs", - AltNames: certutil.AltNames{ - DNSNames: []string{"test-domain.space"}, - }, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - - cert, _, err := test.impl.Renew(certCfg) - if err != nil { - t.Fatalf("unexpected error renewing cert: %v", err) - } - - pool := x509.NewCertPool() - pool.AddCert(caCert) - - _, err = cert.Verify(x509.VerifyOptions{ - DNSName: "test-domain.space", - Roots: pool, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }) - if err != nil { - t.Errorf("couldn't verify new cert: %v", err) - } - }) - } -} - -func defaultReactionFunc(obj runtime.Object) k8stesting.ReactionFunc { - return func(act k8stesting.Action) (bool, runtime.Object, error) { - return true, obj, nil - } -} - -func getCertReq(t *testing.T, caCert *x509.Certificate, caKey crypto.Signer) *certsapi.CertificateSigningRequest { - cert, _, err := pkiutil.NewCertAndKey(caCert, caKey, &certutil.Config{ - CommonName: "testcert", - AltNames: certutil.AltNames{ - DNSNames: []string{"test-domain.space"}, - }, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }) - if err != nil { - t.Fatalf("couldn't generate cert: %v", err) - } - - return &certsapi.CertificateSigningRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testcert", - }, - Status: certsapi.CertificateSigningRequestStatus{ - Conditions: []certsapi.CertificateSigningRequestCondition{ - { - Type: certsapi.CertificateApproved, - }, - }, - Certificate: pkiutil.EncodeCertPEM(cert), - }, - } -} - -func TestCertToConfig(t *testing.T) { - expectedConfig := &certutil.Config{ - CommonName: "test-common-name", - Organization: []string{"sig-cluster-lifecycle"}, - AltNames: certutil.AltNames{ - IPs: []net.IP{net.ParseIP("10.100.0.1")}, - DNSNames: []string{"test-domain.space"}, - }, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - - cert := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "test-common-name", - Organization: []string{"sig-cluster-lifecycle"}, - }, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - DNSNames: []string{"test-domain.space"}, - IPAddresses: []net.IP{net.ParseIP("10.100.0.1")}, - } - - cfg := certToConfig(cert) - - if cfg.CommonName != expectedConfig.CommonName { - t.Errorf("expected common name %q, got %q", expectedConfig.CommonName, cfg.CommonName) - } - - if len(cfg.Organization) != 1 || cfg.Organization[0] != expectedConfig.Organization[0] { - t.Errorf("expected organization %v, got %v", expectedConfig.Organization, cfg.Organization) - - } - - if len(cfg.Usages) != 1 || cfg.Usages[0] != expectedConfig.Usages[0] { - t.Errorf("expected ext key usage %v, got %v", expectedConfig.Usages, cfg.Usages) - } - - if len(cfg.AltNames.IPs) != 1 || cfg.AltNames.IPs[0].String() != expectedConfig.AltNames.IPs[0].String() { - t.Errorf("expected SAN IPs %v, got %v", expectedConfig.AltNames.IPs, cfg.AltNames.IPs) - } - - if len(cfg.AltNames.DNSNames) != 1 || cfg.AltNames.DNSNames[0] != expectedConfig.AltNames.DNSNames[0] { - t.Errorf("expected SAN DNSNames %v, got %v", expectedConfig.AltNames.DNSNames, cfg.AltNames.DNSNames) - } -} - -func TestRenewExistingCert(t *testing.T) { - // creates a CA, a certificate, and save it to a file - cfg := &certutil.Config{ - CommonName: "test-common-name", - Organization: []string{"sig-cluster-lifecycle"}, - AltNames: certutil.AltNames{ - IPs: []net.IP{net.ParseIP("10.100.0.1")}, - DNSNames: []string{"test-domain.space"}, - }, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - - caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) - if err != nil { - t.Fatalf("couldn't create CA: %v", err) - } - - cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, cfg) - if err != nil { - t.Fatalf("couldn't generate certificate: %v", err) - } - - dir := testutil.SetupTempDir(t) - defer os.RemoveAll(dir) - - if err := pkiutil.WriteCertAndKey(dir, "server", cert, key); err != nil { - t.Fatalf("couldn't write out certificate") - } - - // makes some time pass - time.Sleep(1 * time.Second) - - // renew the certificate - renewer := NewFileRenewal(caCert, caKey) - - if err := RenewExistingCert(dir, "server", renewer); err != nil { - t.Fatalf("couldn't renew certificate: %v", err) - } - - // reads the renewed certificate - newCert, err := pkiutil.TryLoadCertFromDisk(dir, "server") - if err != nil { - t.Fatalf("couldn't load created certificate: %v", err) - } - - // check the new certificate is changed, has an newer expiration date, but preserve all the - // other attributes - - if newCert.SerialNumber.Cmp(cert.SerialNumber) == 0 { - t.Fatal("expected new certificate, but renewed certificate has same serial number") - } - - if !newCert.NotAfter.After(cert.NotAfter) { - t.Fatalf("expected new certificate with updated expiration, but renewed certificate has the same serial number: saw %s, expected greather than %s", newCert.NotAfter, cert.NotAfter) - } - - certtestutil.AssertCertificateIsSignedByCa(t, newCert, caCert) - certtestutil.AssertCertificateHasClientAuthUsage(t, newCert) - certtestutil.AssertCertificateHasOrganizations(t, newCert, cfg.Organization...) - certtestutil.AssertCertificateHasCommonName(t, newCert, cfg.CommonName) - certtestutil.AssertCertificateHasDNSNames(t, newCert, cfg.AltNames.DNSNames...) - certtestutil.AssertCertificateHasIPAddresses(t, newCert, cfg.AltNames.IPs...) -} - -func TestRenewEmbeddedClientCert(t *testing.T) { - // creates a CA, a client certificate, and then embeds it into a kubeconfig file - caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) - if err != nil { - t.Fatalf("couldn't create CA: %v", err) - } - - cfg := &certutil.Config{ - CommonName: "test-common-name", - Organization: []string{"sig-cluster-lifecycle"}, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - AltNames: certutil.AltNames{ - IPs: []net.IP{net.ParseIP("10.100.0.1")}, - DNSNames: []string{"test-domain.space"}, - }, - } - cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, cfg) - if err != nil { - t.Fatalf("couldn't generate certificate: %v", err) - } - - encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(key) - if err != nil { - t.Fatalf("failed to marshal private key to PEM: %v", err) - } - - certificateAuthorityData := pkiutil.EncodeCertPEM(caCert) - - config := kubeconfigutil.CreateWithCerts( - "https://localhost:1234", - "kubernetes-test", - "user-test", - certificateAuthorityData, - encodedClientKey, - pkiutil.EncodeCertPEM(cert), - ) - - dir := testutil.SetupTempDir(t) - defer os.RemoveAll(dir) - - kubeconfigPath := filepath.Join(dir, "k.conf") - - if err := clientcmd.WriteToFile(*config, kubeconfigPath); err != nil { - t.Fatalf("couldn't write out certificate") - } - - // makes some time pass - time.Sleep(1 * time.Second) - - // renew the embedded certificate - renewer := NewFileRenewal(caCert, caKey) - - if err := RenewEmbeddedClientCert(dir, "k.conf", renewer); err != nil { - t.Fatalf("couldn't renew embedded certificate: %v", err) - } - - // reads the kubeconfig file and gets the renewed certificate - newConfig, err := clientcmd.LoadFromFile(kubeconfigPath) - if err != nil { - t.Fatalf("failed to load kubeconfig file %s: %v", kubeconfigPath, err) - } - - if newConfig.Contexts[config.CurrentContext].Cluster != "kubernetes-test" { - t.Fatalf("invalid cluster. expected kubernetes-test, saw %s", newConfig.Contexts[config.CurrentContext].Cluster) - } - - cluster := newConfig.Clusters["kubernetes-test"] - if !bytes.Equal(cluster.CertificateAuthorityData, certificateAuthorityData) { - t.Fatalf("invalid cluster. CertificateAuthorityData does not contain expected value") - } - - if newConfig.Contexts[config.CurrentContext].AuthInfo != "user-test" { - t.Fatalf("invalid AuthInfo. expected user-test, saw %s", newConfig.Contexts[config.CurrentContext].AuthInfo) - } - - authInfo := newConfig.AuthInfos["user-test"] - - newCerts, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData) - if err != nil { - t.Fatalf("couldn't load created certificate: %v", err) - } - - // check the new certificate is changed, has an newer expiration date, but preserve all the - // other attributes - - newCert := newCerts[0] - if newCert.SerialNumber.Cmp(cert.SerialNumber) == 0 { - t.Fatal("expected new certificate, but renewed certificate has same serial number") - } - - if !newCert.NotAfter.After(cert.NotAfter) { - t.Fatalf("expected new certificate with updated expiration, but renewed certificate has same serial number: saw %s, expected greather than %s", newCert.NotAfter, cert.NotAfter) - } - - certtestutil.AssertCertificateIsSignedByCa(t, newCert, caCert) - certtestutil.AssertCertificateHasClientAuthUsage(t, newCert) - certtestutil.AssertCertificateHasOrganizations(t, newCert, cfg.Organization...) - certtestutil.AssertCertificateHasCommonName(t, newCert, cfg.CommonName) - certtestutil.AssertCertificateHasDNSNames(t, newCert, cfg.AltNames.DNSNames...) - certtestutil.AssertCertificateHasIPAddresses(t, newCert, cfg.AltNames.IPs...) -} diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 754ee78ca95..a50fb091019 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -178,7 +178,7 @@ func (spm *KubeStaticPodPathManager) CleanupDirs() error { return utilerrors.NewAggregate(errlist) } -func upgradeComponent(component string, renewCerts bool, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, beforePodHash string, recoverManifests map[string]string) error { +func upgradeComponent(component string, certsRenewMgr *renewal.Manager, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, beforePodHash string, recoverManifests map[string]string) error { // Special treatment is required for etcd case, when rollbackOldManifests should roll back etcd // manifests only for the case when component is Etcd recoverEtcd := false @@ -211,9 +211,9 @@ func upgradeComponent(component string, renewCerts bool, waiter apiclient.Waiter } // if certificate renewal should be performed - if renewCerts { + if certsRenewMgr != nil { // renew all the certificates used by the current component - if err := renewCertsByComponent(cfg, pathMgr.KubernetesDir(), component); err != nil { + if err := renewCertsByComponent(cfg, component, certsRenewMgr); err != nil { return rollbackOldManifests(recoverManifests, errors.Wrapf(err, "failed to renew certificates for component %q", component), pathMgr, recoverEtcd) } } @@ -256,7 +256,7 @@ func upgradeComponent(component string, renewCerts bool, waiter apiclient.Waiter } // performEtcdStaticPodUpgrade performs upgrade of etcd, it returns bool which indicates fatal error or not and the actual error. -func performEtcdStaticPodUpgrade(renewCerts bool, client clientset.Interface, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, recoverManifests map[string]string, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) { +func performEtcdStaticPodUpgrade(certsRenewMgr *renewal.Manager, client clientset.Interface, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, recoverManifests map[string]string, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) { // Add etcd static pod spec only if external etcd is not configured if cfg.Etcd.External != nil { return false, errors.New("external etcd detected, won't try to change any etcd state") @@ -320,7 +320,7 @@ func performEtcdStaticPodUpgrade(renewCerts bool, client clientset.Interface, wa retryInterval := 15 * time.Second // Perform etcd upgrade using common to all control plane components function - if err := upgradeComponent(constants.Etcd, renewCerts, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests); err != nil { + if err := upgradeComponent(constants.Etcd, certsRenewMgr, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests); err != nil { fmt.Printf("[upgrade/etcd] Failed to upgrade etcd: %v\n", err) // Since upgrade component failed, the old etcd manifest has either been restored or was never touched // Now we need to check the health of etcd cluster if it is up with old manifest @@ -433,13 +433,21 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, } } + var certsRenewMgr *renewal.Manager + if renewCerts { + certsRenewMgr, err = renewal.NewManager(&cfg.ClusterConfiguration, pathMgr.KubernetesDir()) + if err != nil { + return errors.Wrap(err, "failed to create the certificate renewal manager") + } + } + // etcd upgrade is done prior to other control plane components if !isExternalEtcd && etcdUpgrade { // set the TLS upgrade flag for all components fmt.Printf("[upgrade/etcd] Upgrading to TLS for %s\n", constants.Etcd) // Perform etcd upgrade using common to all control plane components function - fatal, err := performEtcdStaticPodUpgrade(renewCerts, client, waiter, pathMgr, cfg, recoverManifests, oldEtcdClient, newEtcdClient) + fatal, err := performEtcdStaticPodUpgrade(certsRenewMgr, client, waiter, pathMgr, cfg, recoverManifests, oldEtcdClient, newEtcdClient) if err != nil { if fatal { return err @@ -456,17 +464,22 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, } for _, component := range constants.ControlPlaneComponents { - if err = upgradeComponent(component, renewCerts, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests); err != nil { + if err = upgradeComponent(component, certsRenewMgr, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests); err != nil { return err } } if renewCerts { // renew the certificate embedded in the admin.conf file - err := renewEmbeddedCertsByName(cfg, pathMgr.KubernetesDir(), constants.AdminKubeConfigFileName) + renewed, err := certsRenewMgr.RenewUsingLocalCA(constants.AdminKubeConfigFileName) if err != nil { return rollbackOldManifests(recoverManifests, errors.Wrapf(err, "failed to upgrade the %s certificates", constants.AdminKubeConfigFileName), pathMgr, false) } + + if !renewed { + // if not error, but not renewed because of external CA detected, inform the user + fmt.Printf("[upgrade/staticpods] External CA detected, %s certificate can't be renewed\n", constants.AdminKubeConfigFileName) + } } // Remove the temporary directories used on a best-effort (don't fail if the calls error out) @@ -514,121 +527,57 @@ func rollbackEtcdData(cfg *kubeadmapi.InitConfiguration, pathMgr StaticPodPathMa // renewCertsByComponent takes charge of renewing certificates used by a specific component before // the static pod of the component is upgraded -func renewCertsByComponent(cfg *kubeadmapi.InitConfiguration, kubernetesDir, component string) error { - // if the cluster is using a local etcd - if cfg.Etcd.Local != nil { - if component == constants.Etcd || component == constants.KubeAPIServer { - // try to load the etcd CA - caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.CertificatesDir, certsphase.KubeadmCertEtcdCA.BaseName) - if err != nil { - return errors.Wrapf(err, "failed to upgrade the %s CA certificate and key", constants.Etcd) - } - // create a renewer for certificates signed by etcd CA - renewer := renewal.NewFileRenewal(caCert, caKey) - // then, if upgrading the etcd component, renew all the certificates signed by etcd CA and used - // by etcd itself (the etcd-server, the etcd-peer and the etcd-healthcheck-client certificate) - if component == constants.Etcd { - for _, cert := range []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertEtcdServer, - &certsphase.KubeadmCertEtcdPeer, - &certsphase.KubeadmCertEtcdHealthcheck, - } { - fmt.Printf("[upgrade/staticpods] Renewing %q certificate\n", cert.BaseName) - if err := renewal.RenewExistingCert(cfg.CertificatesDir, cert.BaseName, renewer); err != nil { - return errors.Wrapf(err, "failed to renew %s certificates", cert.Name) - } - } - } - // if upgrading the apiserver component, renew the certificate signed by etcd CA and used - // by the apiserver (the apiserver-etcd-client certificate) - if component == constants.KubeAPIServer { - cert := certsphase.KubeadmCertEtcdAPIClient - fmt.Printf("[upgrade/staticpods] Renewing %q certificate\n", cert.BaseName) - if err := renewal.RenewExistingCert(cfg.CertificatesDir, cert.BaseName, renewer); err != nil { - return errors.Wrapf(err, "failed to renew %s certificate and key", cert.Name) - } +func renewCertsByComponent(cfg *kubeadmapi.InitConfiguration, component string, certsRenewMgr *renewal.Manager) error { + var certificates []string + + // if etcd, only in case of local etcd, renew server, peer and health check certificate + if component == constants.Etcd { + if cfg.Etcd.Local != nil { + certificates = []string{ + certsphase.KubeadmCertEtcdServer.Name, + certsphase.KubeadmCertEtcdPeer.Name, + certsphase.KubeadmCertEtcdHealthcheck.Name, } } } + + // if apiserver, renew apiserver serving certificate, kubelet and front-proxy client certificate. + //if local etcd, renew also the etcd client certificate if component == constants.KubeAPIServer { - // Checks if an external CA is provided by the user (when the CA Cert is present but the CA Key is not) - // if not, then CA is managed by kubeadm, so it is possible to renew all the certificates signed by ca - // and used the apis server (the apiserver certificate and the apiserver-kubelet-client certificate) - externalCA, _ := certsphase.UsingExternalCA(&cfg.ClusterConfiguration) - if !externalCA { - // try to load ca - caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.CertificatesDir, certsphase.KubeadmCertRootCA.BaseName) - if err != nil { - return errors.Wrapf(err, "failed to upgrade the %s certificates", constants.KubeAPIServer) - } - // create a renewer for certificates signed by CA - renewer := renewal.NewFileRenewal(caCert, caKey) - // renew the certificates - for _, cert := range []*certsphase.KubeadmCert{ - &certsphase.KubeadmCertAPIServer, - &certsphase.KubeadmCertKubeletClient, - } { - fmt.Printf("[upgrade/staticpods] Renewing %q certificate\n", cert.BaseName) - if err := renewal.RenewExistingCert(cfg.CertificatesDir, cert.BaseName, renewer); err != nil { - return errors.Wrapf(err, "failed to renew %s certificate and key", cert.Name) - } - } + certificates = []string{ + certsphase.KubeadmCertAPIServer.Name, + certsphase.KubeadmCertKubeletClient.Name, + certsphase.KubeadmCertFrontProxyClient.Name, } - - // Checks if an external Front-Proxy CA is provided by the user (when the Front-Proxy CA Cert is present but the Front-Proxy CA Key is not) - // if not, then Front-Proxy CA is managed by kubeadm, so it is possible to renew all the certificates signed by ca - // and used the apis server (the front-proxy-client certificate) - externalFrontProxyCA, _ := certsphase.UsingExternalFrontProxyCA(&cfg.ClusterConfiguration) - if !externalFrontProxyCA { - // try to load front-proxy-ca - caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.CertificatesDir, certsphase.KubeadmCertFrontProxyCA.BaseName) - if err != nil { - return errors.Wrapf(err, "failed to upgrade the %s certificates", constants.KubeAPIServer) - } - // create a renewer for certificates signed by Front-Proxy CA - renewer := renewal.NewFileRenewal(caCert, caKey) - // renew the certificates - cert := certsphase.KubeadmCertFrontProxyClient - fmt.Printf("[upgrade/staticpods] Renewing %q certificate\n", cert.BaseName) - if err := renewal.RenewExistingCert(cfg.CertificatesDir, cert.BaseName, renewer); err != nil { - return errors.Wrapf(err, "failed to renew %s certificate and key", cert.Name) - } + if cfg.Etcd.Local != nil { + certificates = append(certificates, certsphase.KubeadmCertEtcdAPIClient.Name) } } + + // if controller-manager, renew the certificate embedded in the controller-manager kubeConfig file if component == constants.KubeControllerManager { - // renew the certificate embedded in the controller-manager.conf file - err := renewEmbeddedCertsByName(cfg, kubernetesDir, constants.ControllerManagerKubeConfigFileName) - if err != nil { - return errors.Wrapf(err, "failed to upgrade the %s certificates", constants.ControllerManagerKubeConfigFileName) + certificates = []string{ + constants.ControllerManagerKubeConfigFileName, } } + + // if scheduler, renew the certificate embedded in the scheduler kubeConfig file if component == constants.KubeScheduler { - // renew the certificate embedded in the scheduler.conf file - err := renewEmbeddedCertsByName(cfg, kubernetesDir, constants.SchedulerKubeConfigFileName) - if err != nil { - return errors.Wrapf(err, "failed to upgrade the %s certificates", constants.SchedulerKubeConfigFileName) + certificates = []string{ + constants.SchedulerKubeConfigFileName, } } - return nil -} -func renewEmbeddedCertsByName(cfg *kubeadmapi.InitConfiguration, kubernetesDir, kubeConfigFile string) error { - // Checks if an external CA is provided by the user (when the CA Cert is present but the CA Key is not) - // if not, then CA is managed by kubeadm, so it is possible to renew all the certificates signed by ca - // and used by the apis server (the apiserver certificate and the apiserver-kubelet-client certificate) - externalCA, _ := certsphase.UsingExternalCA(&cfg.ClusterConfiguration) - if !externalCA { - // try to load ca - caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.CertificatesDir, certsphase.KubeadmCertRootCA.BaseName) + // renew the selected components + for _, cert := range certificates { + fmt.Printf("[upgrade/staticpods] Renewing %s certificate\n", cert) + renewed, err := certsRenewMgr.RenewUsingLocalCA(cert) if err != nil { - return errors.Wrapf(err, "failed to upgrade the %s certificates", kubeConfigFile) + return err } - // create a renewer for certificates signed by CA - renewer := renewal.NewFileRenewal(caCert, caKey) - // renew the certificate embedded in the controller-manager.conf file - fmt.Printf("[upgrade/staticpods] Renewing certificate embedded in %q \n", kubeConfigFile) - if err := renewal.RenewEmbeddedClientCert(kubernetesDir, kubeConfigFile, renewer); err != nil { - return errors.Wrapf(err, "failed to renew certificate embedded in %s", kubeConfigFile) + if !renewed { + // if not error, but not renewed because of external CA detected, inform the user + fmt.Printf("[upgrade/staticpods] External CA detected, %s certificate can't be renewed\n", cert) } } diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 33406c0450c..6c106f9d89d 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal" controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd" kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" @@ -823,7 +824,12 @@ func TestRenewCertsByComponent(t *testing.T) { } // Renew everything - err := renewCertsByComponent(cfg, tmpDir, test.component) + rm, err := renewal.NewManager(&cfg.ClusterConfiguration, tmpDir) + if err != nil { + t.Fatalf("Failed to create the certificate renewal manager: %v", err) + } + + err = renewCertsByComponent(cfg, test.component, rm) if test.shouldErrorOnRenew { if err == nil { t.Fatal("expected renewal error, got nothing") From 1c897874bfbf92bc009a7b734e90cc1b05e90811 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 15 May 2019 10:26:00 +0200 Subject: [PATCH 186/194] autogenerated --- cmd/kubeadm/app/cmd/alpha/BUILD | 2 +- cmd/kubeadm/app/phases/certs/renewal/BUILD | 19 +++++++++++++------ cmd/kubeadm/app/phases/upgrade/BUILD | 1 + 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/cmd/kubeadm/app/cmd/alpha/BUILD b/cmd/kubeadm/app/cmd/alpha/BUILD index 6484f318f2c..25ab2aac7da 100644 --- a/cmd/kubeadm/app/cmd/alpha/BUILD +++ b/cmd/kubeadm/app/cmd/alpha/BUILD @@ -12,6 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/alpha", visibility = ["//visibility:public"], deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", @@ -20,7 +21,6 @@ go_library( "//cmd/kubeadm/app/cmd/util:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/features:go_default_library", - "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/app/phases/certs/renewal:go_default_library", "//cmd/kubeadm/app/phases/kubeconfig:go_default_library", "//cmd/kubeadm/app/phases/kubelet:go_default_library", diff --git a/cmd/kubeadm/app/phases/certs/renewal/BUILD b/cmd/kubeadm/app/phases/certs/renewal/BUILD index 404d2291dfc..7a784d558ec 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/BUILD +++ b/cmd/kubeadm/app/phases/certs/renewal/BUILD @@ -3,20 +3,24 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "certsapi.go", - "filerenewal.go", - "interface.go", - "renewal.go", + "apirenewer.go", + "filerenewer.go", + "manager.go", + "readwriter.go", ], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal", visibility = ["//visibility:public"], deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", + "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/certificate/csr:go_default_library", "//staging/src/k8s.io/client-go/util/keyutil:go_default_library", @@ -27,11 +31,14 @@ go_library( go_test( name = "go_default_test", srcs = [ - "filerenewal_test.go", - "renewal_test.go", + "apirenewer_test.go", + "filerenewer_test.go", + "manager_test.go", + "readwriter_test.go", ], embed = [":go_default_library"], deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/kubeconfig:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index 0df07487a3e..b3eff4a6df4 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -76,6 +76,7 @@ go_test( "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/phases/certs/renewal:go_default_library", "//cmd/kubeadm/app/phases/controlplane:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/phases/kubeconfig:go_default_library", From 4213f4d7974785f11b2abe027308153005c98653 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 15 May 2019 08:32:03 +0000 Subject: [PATCH 187/194] fix azure disk lun error --- pkg/volume/azure_dd/attacher.go | 2 +- pkg/volume/azure_dd/azure_dd.go | 2 +- .../azure/azure_controller_common.go | 11 ++++++----- .../azure/azure_controller_common_test.go | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 5b559078069..4eec28f23b4 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -82,7 +82,7 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( klog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) - err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, compute.CachingTypes(*volumeSource.CachingMode)) + lun, err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { klog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index a15f4390695..3df26078f92 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -44,7 +44,7 @@ type DiskController interface { DeleteManagedDisk(diskURI string) error // Attaches the disk to the host machine. - AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) // Detaches the disk, identified by disk name or uri, from the host machine. DetachDisk(diskName, diskUri string, nodeName types.NodeName) error diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index 7a6f49b79b3..7bad7d72db9 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -91,16 +91,17 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) } // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI. -func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) error { +// return (lun, error) +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) { vmset, err := c.getNodeVMSet(nodeName) if err != nil { - return err + return -1, err } instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) if err != nil { klog.Warningf("failed to get azure instance id (%v)", err) - return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) + return -1, fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } diskOpMutex.LockKey(instanceid) @@ -109,11 +110,11 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri lun, err := c.GetNextDiskLun(nodeName) if err != nil { klog.Warningf("no LUN available for instance %q (%v)", nodeName, err) - return fmt.Errorf("all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)", diskName, diskURI, instanceid, err) + return -1, fmt.Errorf("all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)", diskName, diskURI, instanceid, err) } klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName) - return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) + return lun, vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) } // DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go index c6bd2807bce..4db263d5699 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go @@ -36,7 +36,7 @@ func TestAttachDisk(t *testing.T) { diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup) - err := common.AttachDisk(true, "", diskURI, "node1", compute.CachingTypesReadOnly) + _, err := common.AttachDisk(true, "", diskURI, "node1", compute.CachingTypesReadOnly) if err != nil { fmt.Printf("TestAttachDisk return expected error: %v", err) } else { From 95f33ce39957e86cc3e81f4c8f325ecd133cbe9d Mon Sep 17 00:00:00 2001 From: Andy Xie Date: Tue, 14 May 2019 10:36:22 +0800 Subject: [PATCH 188/194] enhance leader election doc --- .../tools/leaderelection/leaderelection.go | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go index 02bdebd1d35..4b0948c3b51 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -16,12 +16,15 @@ limitations under the License. // Package leaderelection implements leader election of a set of endpoints. // It uses an annotation in the endpoints object to store the record of the -// election state. +// election state. This implementation does not guarantee that only one +// client is acting as a leader (a.k.a. fencing). // -// This implementation does not guarantee that only one client is acting as a -// leader (a.k.a. fencing). A client observes timestamps captured locally to -// infer the state of the leader election. Thus the implementation is tolerant -// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate. +// A client observes timestamps captured locally to infer the state of the +// leader election. Thus the implementation is tolerant to arbitrary clock +// skew, but is not tolerant to arbitrary clock skew rate. Timestamp(renew time) +// is not meaningful if it was collected on another machine. The implementation +// of this client only acts on locally collected timestamps and cannot rely on +// the accuracy of timestamp in the record for correctness. // // However the level of tolerance to skew rate can be configured by setting // RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a @@ -104,7 +107,15 @@ type LeaderElectionConfig struct { // LeaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. This is measured against time of - // last observed ack. + // last observed ack. A client needs to wait a full LeaseDuration without + // observing a change to the record before it can attempt to take over even + // when a client with a different identity against the record's starts and + // the renew time in the record is older than LeaseDuration. A.k.a., when + // all clients are shutdown and after at least a LeaseDuration, clients + // started with different identities against the record's must wait a full + // LeaseDuration before acquiring a lock. Thus LeaseDuration should be as + // short as possible to avoid a possible long waiting. LeaseDuration is 15 + // seconds in core Kubernetes components. LeaseDuration time.Duration // RenewDeadline is the duration that the acting master will retry // refreshing leadership before giving up. From 59af63c687233c486cf2af18ca9c654dae98a43f Mon Sep 17 00:00:00 2001 From: Maciej Borsz Date: Wed, 15 May 2019 11:32:10 +0200 Subject: [PATCH 189/194] Make coredns memory limit configurable --- cluster/addons/dns/coredns/coredns.yaml.base | 2 +- cluster/addons/dns/coredns/coredns.yaml.in | 2 +- cluster/addons/dns/coredns/coredns.yaml.sed | 2 +- cluster/addons/dns/coredns/transforms2salt.sed | 1 + cluster/addons/dns/coredns/transforms2sed.sed | 1 + cluster/addons/dns/kube-dns/kube-dns.yaml.base | 2 +- cluster/addons/dns/kube-dns/kube-dns.yaml.in | 2 +- cluster/addons/dns/kube-dns/kube-dns.yaml.sed | 2 +- cluster/addons/dns/kube-dns/transforms2salt.sed | 1 + cluster/addons/dns/kube-dns/transforms2sed.sed | 1 + cluster/gce/config-default.sh | 1 + cluster/gce/config-test.sh | 1 + cluster/gce/gci/configure-helper.sh | 2 ++ cluster/gce/util.sh | 1 + 14 files changed, 15 insertions(+), 6 deletions(-) diff --git a/cluster/addons/dns/coredns/coredns.yaml.base b/cluster/addons/dns/coredns/coredns.yaml.base index efc946b144a..f89f56a740f 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.base +++ b/cluster/addons/dns/coredns/coredns.yaml.base @@ -120,7 +120,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 170Mi + memory: __PILLAR__DNS__MEMORY__LIMIT__ requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/coredns.yaml.in b/cluster/addons/dns/coredns/coredns.yaml.in index 17bb9de0320..c70c8bff674 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.in +++ b/cluster/addons/dns/coredns/coredns.yaml.in @@ -120,7 +120,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 170Mi + memory: {{ pillar['dns_memory_limit'] }} requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/coredns.yaml.sed b/cluster/addons/dns/coredns/coredns.yaml.sed index 53910bb6507..556afa76042 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns/coredns.yaml.sed @@ -120,7 +120,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 170Mi + memory: $DNS_MEMORY_LIMIT requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/transforms2salt.sed b/cluster/addons/dns/coredns/transforms2salt.sed index 0a0778b9292..4d65cac1f92 100644 --- a/cluster/addons/dns/coredns/transforms2salt.sed +++ b/cluster/addons/dns/coredns/transforms2salt.sed @@ -1,4 +1,5 @@ s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g +s/__PILLAR__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g diff --git a/cluster/addons/dns/coredns/transforms2sed.sed b/cluster/addons/dns/coredns/transforms2sed.sed index 7d64f8e0b51..d13e358ce1b 100644 --- a/cluster/addons/dns/coredns/transforms2sed.sed +++ b/cluster/addons/dns/coredns/transforms2sed.sed @@ -1,4 +1,5 @@ s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g +s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns/kube-dns.yaml.base index b6096e13e13..cb70ab2c80a 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.base @@ -106,7 +106,7 @@ spec: # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: - memory: 170Mi + memory: __PILLAR__DNS__MEMORY__LIMIT__ requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns/kube-dns.yaml.in index 79da48824f8..48a70e7ecb2 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.in @@ -106,7 +106,7 @@ spec: # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: - memory: 170Mi + memory: {{ pillar['dns_memory_limit'] }} requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns/kube-dns.yaml.sed index 2b7f0e7d9ef..85cd7a19f91 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.sed @@ -106,7 +106,7 @@ spec: # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: - memory: 170Mi + memory: $DNS_MEMORY_LIMIT requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/kube-dns/transforms2salt.sed b/cluster/addons/dns/kube-dns/transforms2salt.sed index 0a0778b9292..4d65cac1f92 100644 --- a/cluster/addons/dns/kube-dns/transforms2salt.sed +++ b/cluster/addons/dns/kube-dns/transforms2salt.sed @@ -1,4 +1,5 @@ s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g +s/__PILLAR__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g diff --git a/cluster/addons/dns/kube-dns/transforms2sed.sed b/cluster/addons/dns/kube-dns/transforms2sed.sed index 7d64f8e0b51..d13e358ce1b 100644 --- a/cluster/addons/dns/kube-dns/transforms2sed.sed +++ b/cluster/addons/dns/kube-dns/transforms2sed.sed @@ -1,4 +1,5 @@ s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g +s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 566c4c75d48..3e4b3724556 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -254,6 +254,7 @@ CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" +DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}" # Optional: Enable DNS horizontal autoscaler ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index aaeaab6ad0e..73f78db956b 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -285,6 +285,7 @@ ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="10.0.0.10" LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}" DNS_DOMAIN="cluster.local" +DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}" # Optional: Enable DNS horizontal autoscaler ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 12ac4efbec4..e44d93b3173 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -2410,6 +2410,7 @@ function setup-coredns-manifest { sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}" sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}" sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}" + sed -i -e "s@{{ *pillar\['dns_memory_limit'\] *}}@${DNS_MEMORY_LIMIT}@g" "${coredns_file}" if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce" @@ -2461,6 +2462,7 @@ EOF # Replace the salt configurations with variable values. sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}" sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}" + sed -i -e "s@{{ *pillar\['dns_memory_limit'\] *}}@${DNS_MEMORY_LIMIT}@g" "${kubedns_file}" if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 52fa484c23a..4643927af42 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -1123,6 +1123,7 @@ ENABLE_NODELOCAL_DNS: $(yaml-quote ${ENABLE_NODELOCAL_DNS:-false}) DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-}) LOCAL_DNS_IP: $(yaml-quote ${LOCAL_DNS_IP:-}) DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-}) +DNS_MEMORY_LIMIT: $(yaml-quote ${DNS_MEMORY_LIMIT:-}) ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}) KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false}) KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-}) From 944976d11fc06165afb076a2b9793f62064e8a57 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 15 May 2019 14:29:20 -0700 Subject: [PATCH 190/194] Extract pod specs and handlers into fixtures --- test/e2e/common/container_probe.go | 343 +++++++++++------------------ 1 file changed, 128 insertions(+), 215 deletions(-) diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 5ebbbf2cbf9..2412482a00f 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -38,8 +38,7 @@ import ( ) const ( - probTestContainerName = "test-webserver" - probTestInitialDelaySeconds = 15 + probeTestInitialDelaySeconds = 15 defaultObservationTimeout = time.Minute * 4 ) @@ -59,7 +58,8 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay. */ framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() { - p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) + containerName := "test-webserver" + p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) f.WaitForPodReady(p.Name) p, err := podClient.Get(p.Name, metav1.GetOptions{}) @@ -72,11 +72,11 @@ var _ = framework.KubeDescribe("Probing container", func() { // is true for a single container pod. readyTime, err := getTransitionTimeForReadyCondition(p) framework.ExpectNoError(err) - startedTime, err := getContainerStartedTime(p, probTestContainerName) + startedTime, err := getContainerStartedTime(p, containerName) framework.ExpectNoError(err) e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) - initialDelay := probTestInitialDelaySeconds * time.Second + initialDelay := probeTestInitialDelaySeconds * time.Second if readyTime.Sub(startedTime) < initialDelay { framework.Failf("Pod became ready before it's %v initial delay", initialDelay) } @@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Probing container", func() { then the Pod MUST never be ready, never be running and restart count MUST be zero. */ framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() { - p := podClient.Create(makePodSpec(probe.withFailing().build(), nil)) + p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) Consistently(func() (bool, error) { p, err := podClient.Get(p.Name, metav1.GetOptions{}) if err != nil { @@ -117,30 +117,14 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1. */ framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() { - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-exec", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - Command: []string{"cat", "/tmp/health"}, - }, - }, - InitialDelaySeconds: 15, - FailureThreshold: 1, - }, - }, - }, - }, - }, 1, defaultObservationTimeout) + cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"} + livenessProbe := &v1.Probe{ + Handler: execHandler([]string{"cat", "/tmp/health"}), + InitialDelaySeconds: 15, + FailureThreshold: 1, + } + pod := busyBoxPodSpec(nil, livenessProbe, cmd) + runLivenessTest(f, pod, 1, defaultObservationTimeout) }) /* @@ -149,30 +133,14 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Pod is created with liveness probe that uses ‘exec’ command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0. */ framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() { - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-exec", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - Command: []string{"cat", "/tmp/health"}, - }, - }, - InitialDelaySeconds: 15, - FailureThreshold: 1, - }, - }, - }, - }, - }, 0, defaultObservationTimeout) + cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"} + livenessProbe := &v1.Probe{ + Handler: execHandler([]string{"cat", "/tmp/health"}), + InitialDelaySeconds: 15, + FailureThreshold: 1, + } + pod := busyBoxPodSpec(nil, livenessProbe, cmd) + runLivenessTest(f, pod, 0, defaultObservationTimeout) }) /* @@ -181,31 +149,13 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. */ framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() { - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-http", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.Liveness), - Command: []string{"/server"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/healthz", - Port: intstr.FromInt(8080), - }, - }, - InitialDelaySeconds: 15, - FailureThreshold: 1, - }, - }, - }, - }, - }, 1, defaultObservationTimeout) + livenessProbe := &v1.Probe{ + Handler: httpGetHandler("/healthz", 8080), + InitialDelaySeconds: 15, + FailureThreshold: 1, + } + pod := livenessPodSpec(nil, livenessProbe) + runLivenessTest(f, pod, 1, defaultObservationTimeout) }) /* @@ -214,31 +164,13 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart. */ framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() { - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-http", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.Liveness), - Command: []string{"/server"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/healthz", - Port: intstr.FromInt(8080), - }, - }, - InitialDelaySeconds: 5, - FailureThreshold: 1, - }, - }, - }, - }, - }, 5, time.Minute*5) + livenessProbe := &v1.Probe{ + Handler: httpGetHandler("/healthz", 8080), + InitialDelaySeconds: 5, + FailureThreshold: 1, + } + pod := livenessPodSpec(nil, livenessProbe) + runLivenessTest(f, pod, 5, time.Minute*5) }) /* @@ -247,32 +179,14 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: A Pod is created with liveness probe on http endpoint ‘/’. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero. */ framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() { - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-http", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.TestWebserver), - Ports: []v1.ContainerPort{{ContainerPort: 80}}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/", - Port: intstr.FromInt(80), - }, - }, - InitialDelaySeconds: 15, - TimeoutSeconds: 5, - FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers. - }, - }, - }, - }, - }, 0, defaultObservationTimeout) + livenessProbe := &v1.Probe{ + Handler: httpGetHandler("/", 80), + InitialDelaySeconds: 15, + TimeoutSeconds: 5, + FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers. + } + pod := testWebServerPodSpec(nil, livenessProbe, "test-webserver", 80) + runLivenessTest(f, pod, 0, defaultObservationTimeout) }) /* @@ -283,31 +197,15 @@ var _ = framework.KubeDescribe("Probing container", func() { It("should be restarted with a docker exec liveness probe with timeout ", func() { // TODO: enable this test once the default exec handler supports timeout. framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API") - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-exec", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"/bin/sh", "-c", "sleep 600"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - Command: []string{"/bin/sh", "-c", "sleep 10"}, - }, - }, - InitialDelaySeconds: 15, - TimeoutSeconds: 1, - FailureThreshold: 1, - }, - }, - }, - }, - }, 1, defaultObservationTimeout) + cmd := []string{"/bin/sh", "-c", "sleep 600"} + livenessProbe := &v1.Probe{ + Handler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}), + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + FailureThreshold: 1, + } + pod := busyBoxPodSpec(nil, livenessProbe, cmd) + runLivenessTest(f, pod, 1, defaultObservationTimeout) }) /* @@ -316,31 +214,13 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: A Pod is created with liveness probe on http endpoint /redirect?loc=healthz. The http handler on the /redirect will redirect to the /healthz endpoint, which will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. */ It("should be restarted with a local redirect http liveness probe", func() { - runLivenessTest(f, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-http-redirect", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.Liveness), - Command: []string{"/server"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/redirect?loc=" + url.QueryEscape("/healthz"), - Port: intstr.FromInt(8080), - }, - }, - InitialDelaySeconds: 15, - FailureThreshold: 1, - }, - }, - }, - }, - }, 1, defaultObservationTimeout) + livenessProbe := &v1.Probe{ + Handler: httpGetHandler("/redirect?loc="+url.QueryEscape("/healthz"), 8080), + InitialDelaySeconds: 15, + FailureThreshold: 1, + } + pod := livenessPodSpec(nil, livenessProbe) + runLivenessTest(f, pod, 1, defaultObservationTimeout) }) /* @@ -349,31 +229,12 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: A Pod is created with liveness probe on http endpoint /redirect with a redirect to http://0.0.0.0/. The http handler on the /redirect should not follow the redirect, but instead treat it as a success and generate an event. */ It("should *not* be restarted with a non-local redirect http liveness probe", func() { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "liveness-http-redirect", - Labels: map[string]string{"test": "liveness"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.Liveness), - Command: []string{"/server"}, - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/redirect?loc=" + url.QueryEscape("http://0.0.0.0/"), - Port: intstr.FromInt(8080), - }, - }, - InitialDelaySeconds: 15, - FailureThreshold: 1, - }, - }, - }, - }, + livenessProbe := &v1.Probe{ + Handler: httpGetHandler("/redirect?loc="+url.QueryEscape("http://0.0.0.0/"), 8080), + InitialDelaySeconds: 15, + FailureThreshold: 1, } + pod := livenessPodSpec(nil, livenessProbe) runLivenessTest(f, pod, 0, defaultObservationTimeout) // Expect an event of type "ProbeWarning". expectedEvent := fields.Set{ @@ -417,21 +278,78 @@ func getRestartCount(p *v1.Pod) int { return count } -func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod { - pod := &v1.Pod{ +func testWebServerPodSpec(readinessProbe, livenessProbe *v1.Probe, containerName string, port int) *v1.Pod { + return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())}, Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: probTestContainerName, + Name: containerName, Image: imageutils.GetE2EImage(imageutils.TestWebserver), + Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, }, } - return pod +} + +func busyBoxPodSpec(readinessProbe, livenessProbe *v1.Probe, cmd []string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox-" + string(uuid.NewUUID()), + Labels: map[string]string{"test": "liveness"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "busybox", + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: cmd, + LivenessProbe: livenessProbe, + ReadinessProbe: readinessProbe, + }, + }, + }, + } +} + +func livenessPodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "liveness-" + string(uuid.NewUUID()), + Labels: map[string]string{"test": "liveness"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "liveness", + Image: imageutils.GetE2EImage(imageutils.Liveness), + Command: []string{"/server"}, + LivenessProbe: livenessProbe, + ReadinessProbe: readinessProbe, + }, + }, + }, + } +} + +func execHandler(cmd []string) v1.Handler { + return v1.Handler{ + Exec: &v1.ExecAction{ + Command: cmd, + }, + } +} + +func httpGetHandler(path string, port int) v1.Handler { + return v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Path: path, + Port: intstr.FromInt(port), + }, + } } type webserverProbeBuilder struct { @@ -451,15 +369,10 @@ func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder { func (b webserverProbeBuilder) build() *v1.Probe { probe := &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Port: intstr.FromInt(80), - Path: "/", - }, - }, + Handler: httpGetHandler("/", 80), } if b.initialDelay { - probe.InitialDelaySeconds = probTestInitialDelaySeconds + probe.InitialDelaySeconds = probeTestInitialDelaySeconds } if b.failing { probe.HTTPGet.Port = intstr.FromInt(81) From 0541be17875e8bf3e7d0ab7d0b104db8073ae411 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 15 May 2019 15:06:02 -0700 Subject: [PATCH 191/194] Add a tcp probe e2e test --- test/e2e/common/container_probe.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 2412482a00f..10881e4844d 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -158,6 +158,21 @@ var _ = framework.KubeDescribe("Probing container", func() { runLivenessTest(f, pod, 1, defaultObservationTimeout) }) + /* + Release : v1.15 + Testname: Pod liveness probe, using tcp socket, no restart + Description: A Pod is created with liveness probe on tcp socket 8080. The http handler on port 8080 will return http errors after 10 seconds, but socket will remain open. Liveness probe MUST not fail to check health and the restart count should remain 0. + */ + It("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func() { + livenessProbe := &v1.Probe{ + Handler: tcpSocketHandler(8080), + InitialDelaySeconds: 15, + FailureThreshold: 1, + } + pod := livenessPodSpec(nil, livenessProbe) + runLivenessTest(f, pod, 0, defaultObservationTimeout) + }) + /* Release : v1.9 Testname: Pod liveness probe, using http endpoint, multiple restarts (slow) @@ -352,6 +367,14 @@ func httpGetHandler(path string, port int) v1.Handler { } } +func tcpSocketHandler(port int) v1.Handler { + return v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.FromInt(port), + }, + } +} + type webserverProbeBuilder struct { failing bool initialDelay bool From 64f2c689485fed07bd6a65dff43e660b58ac41a8 Mon Sep 17 00:00:00 2001 From: Mayank Gaikwad <8110509+mgdevstack@users.noreply.github.com> Date: Thu, 16 May 2019 20:07:37 +0530 Subject: [PATCH 192/194] Add [LinuxOnly] to e2e verifying PQDN resolution --- test/e2e/network/dns.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index acd94976b63..1a26102599b 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -69,7 +69,8 @@ var _ = SIGDescribe("DNS", func() { validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - ginkgo.It("should resolve DNS of partial qualified names for the cluster ", func() { + // [LinuxOnly]: As Windows currently does not support resolving PQDNs. + ginkgo.It("should resolve DNS of partial qualified names for the cluster [LinuxOnly]", func() { // All the names we need to be able to resolve. // TODO: Spin up a separate test service and test that dns works for that service. namesToResolve := []string{ @@ -171,7 +172,8 @@ var _ = SIGDescribe("DNS", func() { validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - ginkgo.It("should resolve DNS of partial qualified names for services ", func() { + // [LinuxOnly]: As Windows currently does not support resolving PQDNs. + ginkgo.It("should resolve DNS of partial qualified names for services [LinuxOnly]", func() { // Create a test headless service. ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ From 145935d8157abd5b1cfa0c225c4610911ae03194 Mon Sep 17 00:00:00 2001 From: Antoine Pelisse Date: Fri, 3 May 2019 14:56:23 -0700 Subject: [PATCH 193/194] Implement rollout restart for statefulset and daemonset --- pkg/kubectl/cmd/rollout/rollout_restart.go | 9 ++-- .../polymorphichelpers/objectrestarter.go | 42 +++++++++++++++++++ test/cmd/apps.sh | 8 ++++ 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/pkg/kubectl/cmd/rollout/rollout_restart.go b/pkg/kubectl/cmd/rollout/rollout_restart.go index b273f3336b5..b8bb92d074d 100644 --- a/pkg/kubectl/cmd/rollout/rollout_restart.go +++ b/pkg/kubectl/cmd/rollout/rollout_restart.go @@ -54,11 +54,14 @@ var ( restartLong = templates.LongDesc(` Restart a resource. - A deployment with the "RolloutStrategy" will be rolling restarted.`) + Resource will be rollout restarted.`) restartExample = templates.Examples(` # Restart a deployment - kubectl rollout restart deployment/nginx`) + kubectl rollout restart deployment/nginx + + # Restart a daemonset + kubectl rollout restart daemonset/abc`) ) // NewRolloutRestartOptions returns an initialized RestartOptions instance @@ -73,7 +76,7 @@ func NewRolloutRestartOptions(streams genericclioptions.IOStreams) *RestartOptio func NewCmdRolloutRestart(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewRolloutRestartOptions(streams) - validArgs := []string{"deployment"} + validArgs := []string{"deployment", "daemonset", "statefulset"} cmd := &cobra.Command{ Use: "restart RESOURCE", diff --git a/pkg/kubectl/polymorphichelpers/objectrestarter.go b/pkg/kubectl/polymorphichelpers/objectrestarter.go index 1110beb7f36..891762c38dd 100644 --- a/pkg/kubectl/polymorphichelpers/objectrestarter.go +++ b/pkg/kubectl/polymorphichelpers/objectrestarter.go @@ -71,6 +71,48 @@ func defaultObjectRestarter(obj runtime.Object) ([]byte, error) { obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + case *extensionsv1beta1.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(extensionsv1beta1.SchemeGroupVersion), obj) + + case *appsv1.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta2.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + case *appsv1.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta1.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + + case *appsv1beta2.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + default: return nil, fmt.Errorf("restarting is not supported") } diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index 1f947d2e59d..4786c087965 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -43,6 +43,10 @@ run_daemonset_tests() { kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4' + # Rollout restart should change generation + kubectl rollout restart daemonset/bind "${kube_flags[@]}" + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '5' + # Clean up kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" @@ -488,6 +492,10 @@ run_stateful_set_tests() { # TODO: test robust scaling in an e2e. wait-for-pods-with-label "app=nginx-statefulset" "nginx-0" + # Rollout restart should change generation + kubectl rollout restart statefulset nginx "${kube_flags[@]}" + kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '3' + ### Clean up kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}" # Post-condition: no pods from statefulset controller From 8ae998ceb69ae83afe730795aea3bd44913ad868 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Thu, 16 May 2019 17:33:05 +0000 Subject: [PATCH 194/194] Update CHANGELOG-1.14.md for v1.14.2. --- CHANGELOG-1.14.md | 150 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 120 insertions(+), 30 deletions(-) diff --git a/CHANGELOG-1.14.md b/CHANGELOG-1.14.md index d54038e304b..076bf1342b1 100644 --- a/CHANGELOG-1.14.md +++ b/CHANGELOG-1.14.md @@ -1,16 +1,23 @@ -- [v1.14.1](#v1141) - - [Downloads for v1.14.1](#downloads-for-v1141) +- [v1.14.2](#v1142) + - [Downloads for v1.14.2](#downloads-for-v1142) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.14.0](#changelog-since-v1140) + - [Changelog since v1.14.1](#changelog-since-v1141) - [Other notable changes](#other-notable-changes) -- [v1.14.0](#v1140) - - [Downloads for v1.14.0](#downloads-for-v1140) +- [v1.14.1](#v1141) + - [Downloads for v1.14.1](#downloads-for-v1141) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) + - [Changelog since v1.14.0](#changelog-since-v1140) + - [Other notable changes](#other-notable-changes-1) +- [v1.14.0](#v1140) + - [Downloads for v1.14.0](#downloads-for-v1140) + - [Client Binaries](#client-binaries-2) + - [Server Binaries](#server-binaries-2) + - [Node Binaries](#node-binaries-2) - [Kubernetes v1.14 Release Notes](#kubernetes-v114-release-notes) - [1.14 What’s New](#114-whats-new) - [Known Issues](#known-issues) @@ -42,57 +49,140 @@ - [External Dependencies](#external-dependencies) - [v1.14.0-rc.1](#v1140-rc1) - [Downloads for v1.14.0-rc.1](#downloads-for-v1140-rc1) - - [Client Binaries](#client-binaries-2) - - [Server Binaries](#server-binaries-2) - - [Node Binaries](#node-binaries-2) - - [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2) - - [Action Required](#action-required) - - [Other notable changes](#other-notable-changes-1) -- [v1.14.0-beta.2](#v1140-beta2) - - [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1) - - [Action Required](#action-required-1) + - [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-2) -- [v1.14.0-beta.1](#v1140-beta1) - - [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1) +- [v1.14.0-beta.2](#v1140-beta2) + - [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3) - - [Action Required](#action-required-2) + - [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-3) -- [v1.14.0-alpha.3](#v1140-alpha3) - - [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3) +- [v1.14.0-beta.1](#v1140-beta1) + - [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2) - - [Action Required](#action-required-3) + - [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-4) -- [v1.14.0-alpha.2](#v1140-alpha2) - - [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2) +- [v1.14.0-alpha.3](#v1140-alpha3) + - [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1) - - [Action Required](#action-required-4) + - [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-5) -- [v1.14.0-alpha.1](#v1140-alpha1) - - [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1) +- [v1.14.0-alpha.2](#v1140-alpha2) + - [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) + - [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-6) +- [v1.14.0-alpha.1](#v1140-alpha1) + - [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Changelog since v1.13.0](#changelog-since-v1130) - [Action Required](#action-required-5) - - [Other notable changes](#other-notable-changes-6) + - [Other notable changes](#other-notable-changes-7) +# v1.14.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.14.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes.tar.gz) | `ef1228ef7cdc3a53e9a5003acb1616aff48eba53db147af82c5e318c174f14db410bb55c030acd67d7f7694b085185ca5f9ac1d3fb9bb6ec853196571e86ad2e` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-src.tar.gz) | `1721ea726dd19f06bade3e9751379764ffb16289b8902164d78a000eb22da15f11358b208f3996df09cd805f98daa540e49f156c1b7aabee6a06df13de8386ca` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-darwin-386.tar.gz) | `f707f3293173cbb47dc8537b19d7da443e40d9c2b3945e8e0559513d227d98a97058b5ee3762fbf93e79b98bceadb23fc985bfbff33c8f4970966383d5032df1` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-darwin-amd64.tar.gz) | `dcd61588fc0b27d6539f937106a88f8ebb3f19e9a41d37a79804a2594e12860247883374d7594b52a248915820be98b0dd7f756e581f5512cf731f9992bc3950` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-386.tar.gz) | `90ad262988898cc25c2d84fdf1d62d3cdf8f16a9b7598d477a1b516b7e87e19196a4e501388e68fccc30916ac617977f6e22e4ec13fa2046bda47d386b45a0e6` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-amd64.tar.gz) | `a4394293cecdc177db7d3ef29f9d9efb7f922d193b00d83fa17c847e2aa1cd1c38eff1f4233843fededf15d99f7c434bf701d84b93a3cb834a4699cbddf02385` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-arm.tar.gz) | `265599b200f6de8d2c01ac36a33a0fca9faf36fb68e3e3dd5dad9166b9e6605db2aadd4199a05b5b9e20d065a8e59e7d0d130e5038dc01b37ed9705a8550d677` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-arm64.tar.gz) | `31799018b7840cafac0fa4f8cc474396feaab71340eb7f38a122109fdcf759afc6066e67c5a26fe234232ab9a180d7312e81b3911c153f2e949415236a7b1709` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-ppc64le.tar.gz) | `670bbe7c3142ccfa99a1eebc6d94798a8c3720165301ef615812aea64e512e03db4a9e2d80bfa073083b87c1a123a1a8e0c72fe2be26e2dfe8a499a3237deb32` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-s390x.tar.gz) | `58d161e747ec0924f3a937bd4e6856be9bad9227ca2564f2b59cdc9bfd063d78cb9c6381441aac21d3d809a1edee059697cbef5aabd344bb3fb58d4a56641415` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-windows-386.tar.gz) | `696caeb9611137bce1988c8bf7a1e326f92dbb6f9eb31f82cc2d9cf262888b220c3abed5edb8807c58d37b659a80e46f79ecb9d8ea67627cf6a7e6b9ffa3e5c6` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-windows-amd64.tar.gz) | `156ccc2102a6f92fe1078feaed835913b34eac94bbd0846726eb43fa60f0beb724355e3a3be4de87630f27f67effdd88a5014aa197ba8695bf36da2b70ee1c14` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-amd64.tar.gz) | `f7d9687eb49ea71f0d8b1ccfac33ed05cd341d7cfacb0711fce4a722801769deb05f72f19ade10b6dc29409f0c9136653c489653ca1f20b698c1310f8a43600f` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-arm.tar.gz) | `5c2247e4cab886cbca59ef47ea32d9ab8bb5f47495f844337dadce2362b76ebedc8a912f34131f9ec2e15bcb9023d75efb561ce7e51ce5fc7d0cb6f058a96840` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-arm64.tar.gz) | `a341bb15e659d4000fe29b88371cc1c02df4715786901b870546c04cd943f5cad56bd4f014062c4ef2d601f107038bb4024c029f62b8b37456bbcf4d14cfc5d0` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-ppc64le.tar.gz) | `d179c809da68cc4530910dd1a7c3749598bd40f5b7a773b2b3a9b9d0b0e25c5a0fa8f2caa8f1874b7168d2acb708f0d5014ca4f4721252ce414e36734485e32b` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-s390x.tar.gz) | `fdc8ffccd1c5a2e225f19b52eabceae5e8fac5e599235797b96d37223df10d45f70218dcbf5027a00db0129929fe179cd16b1f42ae2a6e7a4d020a642cd03981` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-amd64.tar.gz) | `12c6139a4b497220f239f6c5e9a9b2e864d6dc298495ef4243b780fcf6c9c3aab53c88fa33d8527ed45d79de707cbce733e0c34c06b10fe2a07b4c3daafc0f50` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-arm.tar.gz) | `53e14c9dd53562747dcfdfff7738bccdd369a2bd6f550e1ce181aa219e48c0fe92f786c4ed8d4f62fada48018917d573e4e63c0168bf205b707309ef78bac9b5` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-arm64.tar.gz) | `5917436bdafab57f6564d6e32819b28f32d373bdb22ae53a46f7c7510283ffa87199d08db31862f8db286d5e96a37e299f8a31f0fd630bfd94698ba58b16e9af` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-ppc64le.tar.gz) | `12a8ca3c87f165ef4eb493adcd3038d5689c592b411ebbbc97741b1de67a40f91fed7c83d0bf97bd59719c8d08e686c49e6d6dd9c6ef24b80010eb0777614187` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-s390x.tar.gz) | `1919f8b370199261803ec856e558ad75100cf6db8f5619be5710f528a46a6c58692d659bb11525e351fd46673765348050ea6f1a7427fd458386f807040b67eb` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-windows-amd64.tar.gz) | `86057b6ca519a6b454a4b898c7a12f12a2bb25c8be85e53fd2c9b1e4191e334611ca87e679b5832acdd37c05486972eb9c0b4c4bcbf4b688239d9482a9590745` + +## Changelog since v1.14.1 + +### Other notable changes + +* Update to use go 1.12.4 ([#76576](https://github.com/kubernetes/kubernetes/pull/76576), [@cblecker](https://github.com/cblecker)) +* Update to use go 1.12.5 ([#77528](https://github.com/kubernetes/kubernetes/pull/77528), [@cblecker](https://github.com/cblecker)) +* Check if container memory stats are available before accessing it ([#77656](https://github.com/kubernetes/kubernetes/pull/77656), [@yastij](https://github.com/yastij)) +* Bump addon-manager to v9.0.1 ([#77282](https://github.com/kubernetes/kubernetes/pull/77282), [@MrHohn](https://github.com/MrHohn)) + * - Rebase image on debian-base:v1.0.0 +* If a pod has a running instance, the stats of its previously terminated instances will not show up in the kubelet summary stats any more for CRI runtimes like containerd and cri-o. ([#77426](https://github.com/kubernetes/kubernetes/pull/77426), [@Random-Liu](https://github.com/Random-Liu)) + * This keeps the behavior consistent with Docker integration, and fixes an issue that some container Prometheus metrics don't work when there are summary stats for multiple instances of the same pod. +* Add name validation for dynamic client methods in client-go ([#75072](https://github.com/kubernetes/kubernetes/pull/75072), [@lblackstone](https://github.com/lblackstone)) +* Fix issue in Portworx volume driver causing controller manager to crash ([#76341](https://github.com/kubernetes/kubernetes/pull/76341), [@harsh-px](https://github.com/harsh-px)) +* Fixes segmentation fault issue with Protobuf library when log entries are deeply nested. ([#77224](https://github.com/kubernetes/kubernetes/pull/77224), [@qingling128](https://github.com/qingling128)) +* Update Cluster Autoscaler to 1.14.2 ([#77064](https://github.com/kubernetes/kubernetes/pull/77064), [@losipiuk](https://github.com/losipiuk)) + * - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.2 + * - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.1 +* Fixes an error with stuck informers when an etcd watch receives update or delete events with missing data ([#76675](https://github.com/kubernetes/kubernetes/pull/76675), [@ryanmcnamara](https://github.com/ryanmcnamara)) +* [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.2 to pick up security fixes. ([#76762](https://github.com/kubernetes/kubernetes/pull/76762), [@serathius](https://github.com/serathius)) +* specify azure file share name in azure file plugin ([#76988](https://github.com/kubernetes/kubernetes/pull/76988), [@andyzhangx](https://github.com/andyzhangx)) +* Windows nodes on GCE use a known-working 1809 image rather than the latest 1809 image. ([#76722](https://github.com/kubernetes/kubernetes/pull/76722), [@pjh](https://github.com/pjh)) +* kube-proxy: os exit when CleanupAndExit is set to true ([#76732](https://github.com/kubernetes/kubernetes/pull/76732), [@JieJhih](https://github.com/JieJhih)) +* Clean links handling in cp's tar code ([#76788](https://github.com/kubernetes/kubernetes/pull/76788), [@soltysh](https://github.com/soltysh)) +* Adds a new "storage_operation_status_count" metric for kube-controller-manager and kubelet to count success and error statues. ([#75750](https://github.com/kubernetes/kubernetes/pull/75750), [@msau42](https://github.com/msau42)) +* kubeadm: Fix a bug where if couple of CRIs are installed a user override of the CRI during join (via kubeadm join --cri-socket ...) is ignored and kubeadm bails out with an error ([#76505](https://github.com/kubernetes/kubernetes/pull/76505), [@rosti](https://github.com/rosti)) +* fix detach azure disk back off issue which has too big lock in failure retry condition ([#76573](https://github.com/kubernetes/kubernetes/pull/76573), [@andyzhangx](https://github.com/andyzhangx)) +* Ensure the backend pools are set correctly for Azure SLB with multiple backend pools (e.g. outbound rules) ([#76691](https://github.com/kubernetes/kubernetes/pull/76691), [@feiskyer](https://github.com/feiskyer)) +* fix azure disk list corruption issue ([#77187](https://github.com/kubernetes/kubernetes/pull/77187), [@andyzhangx](https://github.com/andyzhangx)) +* [IPVS] Introduces flag ipvs-strict-arp to configure stricter ARP sysctls, defaulting to false to preserve existing behaviors. This was enabled by default in 1.13.0, which impacted a few CNI plugins. ([#75295](https://github.com/kubernetes/kubernetes/pull/75295), [@lbernail](https://github.com/lbernail)) +* [metrics-server addon] Restore connecting to nodes via IP addresses ([#76819](https://github.com/kubernetes/kubernetes/pull/76819), [@serathius](https://github.com/serathius)) +* Fixes a NPD bug on GCI, so that it disables glog writing to files for log-counter ([#76211](https://github.com/kubernetes/kubernetes/pull/76211), [@wangzhen127](https://github.com/wangzhen127)) +* Fixes bug in DaemonSetController causing it to stop processing some DaemonSets for 5 minutes after node removal. ([#76060](https://github.com/kubernetes/kubernetes/pull/76060), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) + + + # v1.14.1 [Documentation](https://docs.k8s.io)