From bef7f39d0c7856de375ec10a5662fedcc109a6fe Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Wed, 13 Feb 2019 21:20:35 -0500 Subject: [PATCH 001/209] Request network plugin in bug report template --- .github/ISSUE_TEMPLATE/bug-report.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 999c3a6628b..b6cf0c9eb05 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -22,4 +22,5 @@ labels: kind/bug - OS (e.g. from /etc/os-release): - Kernel (e.g. `uname -a`): - Install tools: +- Network plugin and version (if this is a network-related bug): - Others: From b2ce446a8811ff65044a30173081df13696d1cce Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Tue, 5 Mar 2019 15:21:29 +0800 Subject: [PATCH 002/209] Migrate oom watcher not relying on cAdviosr any more, it is part of plan of removing cAdvisor from k8s. For more informations about this plan, please refer: https://github.com/kubernetes/kubernetes/issues/68522 --- pkg/kubelet/BUILD | 5 +- pkg/kubelet/kubelet.go | 5 +- pkg/kubelet/oom/BUILD | 82 +++++++++++++++++++ .../oom_watcher_linux.go} | 43 ++++------ .../oom_watcher_linux_test.go} | 6 +- pkg/kubelet/oom/oom_watcher_unsupported.go | 35 ++++++++ pkg/kubelet/oom/types.go | 25 ++++++ 7 files changed, 167 insertions(+), 34 deletions(-) create mode 100644 pkg/kubelet/oom/BUILD rename pkg/kubelet/{oom_watcher.go => oom/oom_watcher_linux.go} (52%) rename pkg/kubelet/{oom_watcher_test.go => oom/oom_watcher_linux_test.go} (84%) create mode 100644 pkg/kubelet/oom/oom_watcher_unsupported.go create mode 100644 pkg/kubelet/oom/types.go diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index b824a8dbbb5..571d612a12f 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -21,7 +21,6 @@ go_library( "kubelet_pods.go", "kubelet_resources.go", "kubelet_volumes.go", - "oom_watcher.go", "pod_container_deletor.go", "pod_workers.go", "reason_cache.go", @@ -72,6 +71,7 @@ go_library( "//pkg/kubelet/network/dns:go_default_library", "//pkg/kubelet/nodelease:go_default_library", "//pkg/kubelet/nodestatus:go_default_library", + "//pkg/kubelet/oom:go_default_library", "//pkg/kubelet/pleg:go_default_library", "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/preemption:go_default_library", @@ -144,7 +144,6 @@ go_library( "//staging/src/k8s.io/node-api/pkg/client/clientset/versioned:go_default_library", "//third_party/forked/golang/expansion:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", - "//vendor/github.com/google/cadvisor/events:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -168,7 +167,6 @@ go_test( "kubelet_test.go", "kubelet_volumes_linux_test.go", "kubelet_volumes_test.go", - "oom_watcher_test.go", "pod_container_deletor_test.go", "pod_workers_test.go", "reason_cache_test.go", @@ -297,6 +295,7 @@ filegroup( "//pkg/kubelet/network:all-srcs", "//pkg/kubelet/nodelease:all-srcs", "//pkg/kubelet/nodestatus:all-srcs", + "//pkg/kubelet/oom:all-srcs", "//pkg/kubelet/pleg:all-srcs", "//pkg/kubelet/pod:all-srcs", "//pkg/kubelet/preemption:all-srcs", diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 16d98618682..b33282bb7b2 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -80,6 +80,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/metrics/collectors" "k8s.io/kubernetes/pkg/kubelet/network/dns" "k8s.io/kubernetes/pkg/kubelet/nodelease" + oomwatcher "k8s.io/kubernetes/pkg/kubelet/oom" "k8s.io/kubernetes/pkg/kubelet/pleg" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/preemption" @@ -469,7 +470,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, containerRefManager := kubecontainer.NewRefManager() - oomWatcher := NewOOMWatcher(kubeDeps.CAdvisorInterface, kubeDeps.Recorder) + oomWatcher := oomwatcher.NewOOMWatcher(kubeDeps.Recorder) clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS)) for _, ipEntry := range kubeCfg.ClusterDNS { @@ -1088,7 +1089,7 @@ type Kubelet struct { os kubecontainer.OSInterface // Watcher of out of memory events. - oomWatcher OOMWatcher + oomWatcher oomwatcher.OOMWatcher // Monitor resource usage resourceAnalyzer serverstats.ResourceAnalyzer diff --git a/pkg/kubelet/oom/BUILD b/pkg/kubelet/oom/BUILD new file mode 100644 index 00000000000..0e852f1fce7 --- /dev/null +++ b/pkg/kubelet/oom/BUILD @@ -0,0 +1,82 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "oom_watcher_linux.go", + "oom_watcher_unsupported.go", + "types.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/oom", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/github.com/google/cadvisor/utils/oomparser:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "go_default_test", + srcs = ["oom_watcher_linux_test.go"], + embed = [":go_default_library"], + deps = select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubelet/oom_watcher.go b/pkg/kubelet/oom/oom_watcher_linux.go similarity index 52% rename from pkg/kubelet/oom_watcher.go rename to pkg/kubelet/oom/oom_watcher_linux.go index 1ca014b4bab..77c75bf65cb 100644 --- a/pkg/kubelet/oom_watcher.go +++ b/pkg/kubelet/oom/oom_watcher_linux.go @@ -1,3 +1,5 @@ +// +build linux + /* Copyright 2015 The Kubernetes Authors. @@ -14,61 +16,52 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubelet +package oom import ( - "github.com/google/cadvisor/events" - cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/record" "k8s.io/klog" - "k8s.io/kubernetes/pkg/kubelet/cadvisor" + + "github.com/google/cadvisor/utils/oomparser" ) -// OOMWatcher defines the interface of OOM watchers. -type OOMWatcher interface { - Start(ref *v1.ObjectReference) error -} - type realOOMWatcher struct { - cadvisor cadvisor.Interface recorder record.EventRecorder } +var _ OOMWatcher = &realOOMWatcher{} + // NewOOMWatcher creates and initializes a OOMWatcher based on parameters. -func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) OOMWatcher { +func NewOOMWatcher(recorder record.EventRecorder) OOMWatcher { return &realOOMWatcher{ - cadvisor: cadvisor, recorder: recorder, } } const systemOOMEvent = "SystemOOM" -// Watches cadvisor for system oom's and records an event for every system oom encountered. +// Watches for system oom's and records an event for every system oom encountered. func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error { - request := events.Request{ - EventType: map[cadvisorapi.EventType]bool{ - cadvisorapi.EventOom: true, - }, - ContainerName: "/", - IncludeSubcontainers: false, - } - eventChannel, err := ow.cadvisor.WatchEvents(&request) + oomLog, err := oomparser.New() if err != nil { return err } + outStream := make(chan *oomparser.OomInstance, 10) + go oomLog.StreamOoms(outStream) go func() { defer runtime.HandleCrash() - for event := range eventChannel.GetChannel() { - klog.V(2).Infof("Got sys oom event from cadvisor: %v", event) - ow.recorder.PastEventf(ref, metav1.Time{Time: event.Timestamp}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered") + for event := range outStream { + if event.ContainerName == "/" { + klog.V(1).Infof("Got sys oom event: %v", event) + ow.recorder.PastEventf(ref, metav1.Time{Time: event.TimeOfDeath}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered") + } } - klog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") + klog.Errorf("Unexpectedly stopped receiving OOM notifications") }() return nil } diff --git a/pkg/kubelet/oom_watcher_test.go b/pkg/kubelet/oom/oom_watcher_linux_test.go similarity index 84% rename from pkg/kubelet/oom_watcher_test.go rename to pkg/kubelet/oom/oom_watcher_linux_test.go index 6fd0287e571..5aba6e41424 100644 --- a/pkg/kubelet/oom_watcher_test.go +++ b/pkg/kubelet/oom/oom_watcher_linux_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubelet +package oom import ( "testing" @@ -23,14 +23,12 @@ import ( "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" - cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" ) func TestBasic(t *testing.T) { fakeRecorder := &record.FakeRecorder{} - mockCadvisor := &cadvisortest.Fake{} node := &v1.ObjectReference{} - oomWatcher := NewOOMWatcher(mockCadvisor, fakeRecorder) + oomWatcher := NewOOMWatcher(fakeRecorder) assert.NoError(t, oomWatcher.Start(node)) // TODO: Improve this test once cadvisor exports events.EventChannel as an interface diff --git a/pkg/kubelet/oom/oom_watcher_unsupported.go b/pkg/kubelet/oom/oom_watcher_unsupported.go new file mode 100644 index 00000000000..aea98ae71ca --- /dev/null +++ b/pkg/kubelet/oom/oom_watcher_unsupported.go @@ -0,0 +1,35 @@ +// +build !linux + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package oom + +import ( + "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" +) + +type oomWatcherUnsupported struct{} + +var _ OOMWatcher = new(oomWatcherUnsupported) + +func NewOOMWatcher(_ record.EventRecorder) OOMWatcher { + return &oomWatcherUnsupported{} +} + +func (ow *oomWatcherUnsupported) Start(_ *v1.ObjectReference) error { + return nil +} diff --git a/pkg/kubelet/oom/types.go b/pkg/kubelet/oom/types.go new file mode 100644 index 00000000000..222ef8d9830 --- /dev/null +++ b/pkg/kubelet/oom/types.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package oom + +import ( + "k8s.io/api/core/v1" +) + +// OOMWatcher defines the interface of OOM watchers. +type OOMWatcher interface { + Start(ref *v1.ObjectReference) error +} From c16d9193c1c1b182aad312283690e6d178aaadbe Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Tue, 5 Mar 2019 17:22:35 +0800 Subject: [PATCH 003/209] bazel fix --- pkg/kubelet/oom/oom_watcher_unsupported.go | 1 + pkg/kubelet/oom/types.go | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/kubelet/oom/oom_watcher_unsupported.go b/pkg/kubelet/oom/oom_watcher_unsupported.go index aea98ae71ca..3dd83e8a765 100644 --- a/pkg/kubelet/oom/oom_watcher_unsupported.go +++ b/pkg/kubelet/oom/oom_watcher_unsupported.go @@ -15,6 +15,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package oom import ( diff --git a/pkg/kubelet/oom/types.go b/pkg/kubelet/oom/types.go index 222ef8d9830..09d50b181de 100644 --- a/pkg/kubelet/oom/types.go +++ b/pkg/kubelet/oom/types.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package oom import ( From 611563c913fd8252404e73444f2efaafc16cbe22 Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Wed, 6 Mar 2019 19:47:54 +0800 Subject: [PATCH 004/209] golint fix --- hack/.golint_failures | 1 + pkg/kubelet/oom/oom_watcher_unsupported.go | 1 + 2 files changed, 2 insertions(+) diff --git a/hack/.golint_failures b/hack/.golint_failures index c028ddbbcc0..204d0b8de9e 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -190,6 +190,7 @@ pkg/kubelet/dockershim/network/testing pkg/kubelet/events pkg/kubelet/lifecycle pkg/kubelet/metrics +pkg/kubelet/oom pkg/kubelet/pod pkg/kubelet/pod/testing pkg/kubelet/preemption diff --git a/pkg/kubelet/oom/oom_watcher_unsupported.go b/pkg/kubelet/oom/oom_watcher_unsupported.go index 3dd83e8a765..b7f1530b17d 100644 --- a/pkg/kubelet/oom/oom_watcher_unsupported.go +++ b/pkg/kubelet/oom/oom_watcher_unsupported.go @@ -27,6 +27,7 @@ type oomWatcherUnsupported struct{} var _ OOMWatcher = new(oomWatcherUnsupported) +// NewOOMWatcher creates a fake one here func NewOOMWatcher(_ record.EventRecorder) OOMWatcher { return &oomWatcherUnsupported{} } From f76de72816f4195df5990fd4716e652466d2ec8e Mon Sep 17 00:00:00 2001 From: "Junyoung, Sung" Date: Fri, 8 Mar 2019 00:54:58 +0900 Subject: [PATCH 005/209] Fix typo in IsLikelyNotMountPoint function comment: --bin -> --bind --- pkg/util/mount/mount_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 9ffd766a51b..42cb791a4b0 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -226,7 +226,7 @@ func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { // IsLikelyNotMountPoint determines if a directory is not a mountpoint. // It is fast but not necessarily ALWAYS correct. If the path is in fact // a bind mount from one part of a mount to another it will not be detected. -// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b") +// mkdir /tmp/a /tmp/b; mount --bind /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b") // will return true. When in fact /tmp/b is a mount point. If this situation // if of interest to you, don't use this function... func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { From 6cc4513562c4a03d1e81656724b24fd803174ec8 Mon Sep 17 00:00:00 2001 From: "Junyoung, Sung" Date: Fri, 8 Mar 2019 00:54:07 +0900 Subject: [PATCH 006/209] Fix old link for issue/pr kind label in pull request template --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c99222977cc..245aa98617c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,7 +2,7 @@ 1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide 2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here: -https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label +https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label 3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md 4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews 5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md From ae4ccc91b2812460ec8b839c7b51850626cb999a Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Sat, 9 Mar 2019 13:24:13 +0100 Subject: [PATCH 007/209] Add IPv6 support to the Downward e2e API test Current regex used in the Downward e2e API tests is matching only IPv4 addresses, consequently those tests fails with IPv6 clusters. This patch modifies the regex to match ipv4 and ipv6 addresses. Ref: https://github.com/kubernetes/kubernetes/issues/70248 --- test/e2e/common/downward_api.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go index bd7ac61205c..0055af21423 100644 --- a/test/e2e/common/downward_api.go +++ b/test/e2e/common/downward_api.go @@ -78,7 +78,7 @@ var _ = Describe("[sig-node] Downward API", func() { expectations := []string{ fmt.Sprintf("POD_NAME=%v", podName), fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name), - "POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)", + "POD_IP=(?:\\d+\\.\\d+\\.\\d+\\.\\d+|[a-fA-F0-9:]+)", } testDownwardAPI(f, podName, env, expectations) @@ -105,7 +105,7 @@ var _ = Describe("[sig-node] Downward API", func() { } expectations := []string{ - "HOST_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)", + "HOST_IP=(?:\\d+\\.\\d+\\.\\d+\\.\\d+|[a-fA-F0-9:]+)", } testDownwardAPI(f, podName, env, expectations) From d5d57baa3607125aa3afa7bf5a4cc4f305bf7ce0 Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Sun, 10 Mar 2019 16:47:53 +0100 Subject: [PATCH 008/209] Add ipv6 support to the e2e healthz test The test [k8s.io] Probing container [It] should not be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] fails because it's using a nginx image that's spawns a server that's only listening on IPv4 by default. Switching to an image like TestWebserver that's listening in IPv4 and IPv6 by default allows the test to run on IPv4 and IPv6 environments. Reference: https://github.com/kubernetes/kubernetes/issues/70248 --- test/e2e/common/container_probe.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 9988a29919e..dfe3114756d 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -252,7 +252,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Containers: []v1.Container{ { Name: "liveness", - Image: imageutils.GetE2EImage(imageutils.Nginx), + Image: imageutils.GetE2EImage(imageutils.TestWebserver), Ports: []v1.ContainerPort{{ContainerPort: 80}}, LivenessProbe: &v1.Probe{ Handler: v1.Handler{ From 527d2aa2bd75fc4b7add814d124de03df7cb0eef Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Sun, 10 Mar 2019 17:24:09 +0100 Subject: [PATCH 009/209] Add IPv6 support to the Container Lifecycle tests Current e2e tests for the Container Lifecycle Hooks weren't using brackets for the IPv6 URL addresses per RFC2732, thus those tests were failing. This patches add brackets to the target URL if it's an IPv6 address. Reference: https://github.com/kubernetes/kubernetes/issues/70248 --- test/e2e/common/lifecycle_hook.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/e2e/common/lifecycle_hook.go b/test/e2e/common/lifecycle_hook.go index 1790b236b37..fbd89e0d7d0 100644 --- a/test/e2e/common/lifecycle_hook.go +++ b/test/e2e/common/lifecycle_hook.go @@ -17,6 +17,8 @@ limitations under the License. package common import ( + "fmt" + "strings" "time" "k8s.io/api/core/v1" @@ -38,7 +40,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { preStopWaitTimeout = 30 * time.Second ) Context("when create a pod with lifecycle hook", func() { - var targetIP string + var targetIP, targetURL string podHandleHookRequest := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-handle-http-request", @@ -63,6 +65,10 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { By("create the container to handle the HTTPGet hook request.") newPod := podClient.CreateSync(podHandleHookRequest) targetIP = newPod.Status.PodIP + targetURL = targetIP + if strings.Contains(targetIP, ":") { + targetURL = fmt.Sprintf("[%s]", targetIP) + } }) testPodWithHook := func(podWithHook *v1.Pod) { By("create the pod with lifecycle hook") @@ -93,7 +99,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { lifecycle := &v1.Lifecycle{ PostStart: &v1.Handler{ Exec: &v1.ExecAction{ - Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=poststart"}, + Command: []string{"sh", "-c", "curl http://" + targetURL + ":8080/echo?msg=poststart"}, }, }, } @@ -109,7 +115,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { lifecycle := &v1.Lifecycle{ PreStop: &v1.Handler{ Exec: &v1.ExecAction{ - Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=prestop"}, + Command: []string{"sh", "-c", "curl http://" + targetURL + ":8080/echo?msg=prestop"}, }, }, } From 183247ca5cab838ea6479a1a54a5a4555e09f1ea Mon Sep 17 00:00:00 2001 From: Bruce Ma Date: Mon, 18 Mar 2019 21:22:34 +0800 Subject: [PATCH 010/209] change bandwidth units from Kb to b Signed-off-by: Bruce Ma --- pkg/kubelet/dockershim/network/cni/cni.go | 7 +++++-- pkg/kubelet/dockershim/network/cni/cni_test.go | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/dockershim/network/cni/cni.go b/pkg/kubelet/dockershim/network/cni/cni.go index af5bb74defe..fcf33f4fef0 100644 --- a/pkg/kubelet/dockershim/network/cni/cni.go +++ b/pkg/kubelet/dockershim/network/cni/cni.go @@ -398,11 +398,14 @@ func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string if ingress != nil || egress != nil { bandwidthParam := cniBandwidthEntry{} if ingress != nil { - bandwidthParam.IngressRate = int(ingress.Value() / 1000) + // see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md and + // https://github.com/containernetworking/plugins/blob/master/plugins/meta/bandwidth/README.md + // Rates are in bits per second, burst values are in bits. + bandwidthParam.IngressRate = int(ingress.Value()) bandwidthParam.IngressBurst = math.MaxInt32 // no limit } if egress != nil { - bandwidthParam.EgressRate = int(egress.Value() / 1000) + bandwidthParam.EgressRate = int(egress.Value()) bandwidthParam.EgressBurst = math.MaxInt32 // no limit } rt.CapabilityArgs["bandwidth"] = bandwidthParam diff --git a/pkg/kubelet/dockershim/network/cni/cni_test.go b/pkg/kubelet/dockershim/network/cni/cni_test.go index 241bd1fca5c..997bf973b7f 100644 --- a/pkg/kubelet/dockershim/network/cni/cni_test.go +++ b/pkg/kubelet/dockershim/network/cni/cni_test.go @@ -291,7 +291,7 @@ func TestCNIPlugin(t *testing.T) { t.Errorf("mismatch in expected port mappings. expected %v got %v", expectedMappings, inputConfig.RuntimeConfig.PortMappings) } expectedBandwidth := map[string]interface{}{ - "ingressRate": 1000.0, "egressRate": 1000.0, + "ingressRate": 1000000.0, "egressRate": 1000000.0, "ingressBurst": 2147483647.0, "egressBurst": 2147483647.0, } if !reflect.DeepEqual(inputConfig.RuntimeConfig.Bandwidth, expectedBandwidth) { From a07169bcadf3c35ff456c17bed970ccbca0963cb Mon Sep 17 00:00:00 2001 From: Yongkun Gui Date: Fri, 1 Mar 2019 17:48:48 -0800 Subject: [PATCH 011/209] kube-proxy: Drop packets in INVALID state Fixes: #74839 --- pkg/proxy/iptables/proxier.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index a4f8dc8ef76..a4feb37879f 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -34,7 +34,7 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" @@ -1298,6 +1298,16 @@ func (proxier *Proxier) syncProxyRules() { } } + // Drop the packets in INVALID state, which would potentially cause + // unexpected connection reset. + // https://github.com/kubernetes/kubernetes/issues/74839 + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "conntrack", + "--ctstate", "INVALID", + "-j", "DROP", + ) + // If the masqueradeMark has been added then we want to forward that same // traffic, this allows NodePort traffic to be forwarded even if the default // FORWARD policy is not accept. From 54c2c2690cb7e33bf20e4f15896aca96618852fc Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Thu, 21 Mar 2019 16:29:47 -0700 Subject: [PATCH 012/209] Update test/e2e/scheduling/nvidia-gpus to also run cuda10 vector add. --- test/e2e/scheduling/nvidia-gpus.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 8dc0a3873dd..c3aa9f0ea2c 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -53,7 +53,7 @@ func makeCudaAdditionDevicePluginTestPod() *v1.Pod { RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { - Name: "vector-addition", + Name: "vector-addition-cuda8", Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ @@ -61,6 +61,15 @@ func makeCudaAdditionDevicePluginTestPod() *v1.Pod { }, }, }, + { + Name: "vector-addition-cuda10", + Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd2), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + gpuResourceName: *resource.NewQuantity(1, resource.DecimalSI), + }, + }, + }, }, }, } From 62d8081eda2dd76af9e467663a5ca7e5115f8439 Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Fri, 29 Mar 2019 13:27:10 +0800 Subject: [PATCH 013/209] Fix a log info error --- pkg/kubelet/cm/container_manager_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index d62e308fb21..79e647b8c97 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -256,7 +256,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I // the input is provided in that format. // this is important because we do not want any name conversion to occur. if !cgroupManager.Exists(cgroupRoot) { - return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err) + return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist", cgroupRoot) } klog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) // Include the top level cgroup for enforcing node allocatable into cgroup-root. From bc279da872fb764c419e4a96eb4082d4b82770ba Mon Sep 17 00:00:00 2001 From: SataQiu Date: Thu, 4 Apr 2019 23:20:52 +0800 Subject: [PATCH 014/209] fix some shellcheck failures of cluster/*.sh --- cluster/clientbin.sh | 2 +- cluster/common.sh | 2 +- cluster/log-dump/log-dump.sh | 2 +- cluster/pre-existing/util.sh | 2 +- cluster/test-e2e.sh | 2 +- cluster/test-network.sh | 2 +- cluster/test-smoke.sh | 2 +- cluster/validate-cluster.sh | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cluster/clientbin.sh b/cluster/clientbin.sh index 078729eb9d1..dc4f673f17c 100755 --- a/cluster/clientbin.sh +++ b/cluster/clientbin.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=${KUBE_ROOT:-$(dirname "${BASH_SOURCE}")/..} +KUBE_ROOT=${KUBE_ROOT:-$(dirname "${BASH_SOURCE[0]}")/..} # Detect the OS name/arch so that we can find our binary case "$(uname -s)" in diff --git a/cluster/common.sh b/cluster/common.sh index a2613b55800..052f2e1fa02 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -20,7 +20,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd) +KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config" diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh index d9defea525c..650295d6c21 100755 --- a/cluster/log-dump/log-dump.sh +++ b/cluster/log-dump/log-dump.sh @@ -63,7 +63,7 @@ readonly max_dump_processes=25 # TODO: Get rid of all the sourcing of bash dependencies eventually. function setup() { - KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. + KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. if [[ -z "${use_custom_instance_list}" ]]; then : ${KUBE_CONFIG_FILE:="config-test.sh"} echo "Sourcing kube-util.sh" diff --git a/cluster/pre-existing/util.sh b/cluster/pre-existing/util.sh index f77b15e910e..48d0199d0ef 100644 --- a/cluster/pre-existing/util.sh +++ b/cluster/pre-existing/util.sh @@ -18,7 +18,7 @@ # pre-existing Kubernetes master. See test/kubemark/pre-existing/README.md # for me details on using a pre-existing provider. -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/hack/lib/util.sh" diff --git a/cluster/test-e2e.sh b/cluster/test-e2e.sh index 4ea71b80c75..b42f8278134 100755 --- a/cluster/test-e2e.sh +++ b/cluster/test-e2e.sh @@ -21,7 +21,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/cluster/kube-util.sh" echo "Testing cluster with provider: ${KUBERNETES_PROVIDER}" 1>&2 diff --git a/cluster/test-network.sh b/cluster/test-network.sh index 239bb8aa153..cd0d159e8ef 100755 --- a/cluster/test-network.sh +++ b/cluster/test-network.sh @@ -24,7 +24,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBE_CONFIG_FILE="config-default.sh" ${KUBE_ROOT}/hack/ginkgo-e2e.sh --ginkgo.focus=Networking diff --git a/cluster/test-smoke.sh b/cluster/test-smoke.sh index 3e0561d8989..b5e6d34f92d 100755 --- a/cluster/test-smoke.sh +++ b/cluster/test-smoke.sh @@ -23,7 +23,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. TEST_ARGS="$@" diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index c28dcb0df5a..a2848149e73 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -24,7 +24,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then source "${KUBE_ROOT}/cluster/env.sh" From 5549a330c26460b5be8fde178212bdc26e34fce6 Mon Sep 17 00:00:00 2001 From: yankaiz Date: Mon, 1 Apr 2019 18:05:41 -0700 Subject: [PATCH 015/209] Bump debian-iptables versions to v11.0.2. --- build/common.sh | 4 ++-- build/workspace.bzl | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/build/common.sh b/build/common.sh index 8aa2a62746d..5bfb754671f 100755 --- a/build/common.sh +++ b/build/common.sh @@ -89,8 +89,8 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730 # $1 - server architecture kube::build::get_docker_wrapped_binaries() { local arch=$1 - local debian_base_version=0.4.1 - local debian_iptables_version=v11.0.1 + local debian_base_version=v1.0.0 + local debian_iptables_version=v11.0.2 ### If you change any of these lists, please also update DOCKERIZED_BINARIES ### in build/BUILD. And kube::golang::server_image_targets local targets=( diff --git a/build/workspace.bzl b/build/workspace.bzl index cffc0cb71a6..e6924d8f75e 100644 --- a/build/workspace.bzl +++ b/build/workspace.bzl @@ -46,7 +46,7 @@ _ETCD_TARBALL_ARCH_SHA256 = { # list to each of its platform-specific images in # debian_image_dependencies(). _DEBIAN_BASE_DIGEST = "sha256:6966a0aedd7592c18ff2dd803c08bd85780ee19f5e3a2e7cf908a4cd837afcde" # 0.4.1 -_DEBIAN_IPTABLES_DIGEST = "sha256:656e45c00083359107b1d6ae0411ff3894ba23011a8533e229937a71be84e063" # v11.0.1 +_DEBIAN_IPTABLES_DIGEST = "sha256:b522b0035dba3ac2d5c0dbaaf8217bd66248e790332ccfdf653e0f943a280dcf" # v11.0.2 _DEBIAN_HYPERKUBE_BASE_DIGEST = "sha256:8cabe02be6e86685d8860b7ace7c7addc9591a339728703027a4854677f1c772" # 0.12.1 # Dependencies needed for a Kubernetes "release", e.g. building docker images, From f7f51fab2a01d2d529593934f43415fe3a93fd8f Mon Sep 17 00:00:00 2001 From: Yubao Liu Date: Sun, 7 Apr 2019 20:41:25 +0800 Subject: [PATCH 016/209] change default 5s ttl to 30s for coredns to be same with kube-dns/dnsmasq --- cluster/addons/dns/coredns/coredns.yaml.base | 1 + cluster/addons/dns/coredns/coredns.yaml.in | 1 + cluster/addons/dns/coredns/coredns.yaml.sed | 1 + cmd/kubeadm/app/phases/addons/dns/manifests.go | 1 + test/e2e/network/dns_configmap.go | 5 +++++ 5 files changed, 9 insertions(+) diff --git a/cluster/addons/dns/coredns/coredns.yaml.base b/cluster/addons/dns/coredns/coredns.yaml.base index 6936dc79215..efc946b144a 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.base +++ b/cluster/addons/dns/coredns/coredns.yaml.base @@ -68,6 +68,7 @@ data: pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } prometheus :9153 forward . /etc/resolv.conf diff --git a/cluster/addons/dns/coredns/coredns.yaml.in b/cluster/addons/dns/coredns/coredns.yaml.in index 0821820fd2a..17bb9de0320 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.in +++ b/cluster/addons/dns/coredns/coredns.yaml.in @@ -68,6 +68,7 @@ data: pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } prometheus :9153 forward . /etc/resolv.conf diff --git a/cluster/addons/dns/coredns/coredns.yaml.sed b/cluster/addons/dns/coredns/coredns.yaml.sed index 38688ced306..53910bb6507 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns/coredns.yaml.sed @@ -68,6 +68,7 @@ data: pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } prometheus :9153 forward . /etc/resolv.conf diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index 3069c12dd16..34f2adc3679 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -318,6 +318,7 @@ data: pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 }{{ .Federation }} prometheus :9153 forward . {{ .UpstreamNameserver }} diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index 5426daba443..c979e917dba 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -71,6 +71,7 @@ func (t *dnsFederationsConfigMapTest) run() { pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } federation %v { abc def.com @@ -86,6 +87,7 @@ func (t *dnsFederationsConfigMapTest) run() { pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } federation %v { ghi xyz.com @@ -235,6 +237,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) { pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } forward . %v } @@ -333,6 +336,7 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) { pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } forward . %v }`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP), @@ -443,6 +447,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { pods insecure upstream fallthrough in-addr.arpa ip6.arpa + ttl 30 } forward . %v }`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP), From 9f2147161ebcb5064408d6b23d6ecbd315797c50 Mon Sep 17 00:00:00 2001 From: Jay Date: Mon, 8 Apr 2019 15:49:29 +0800 Subject: [PATCH 017/209] Fix spell error --- pkg/proxy/iptables/proxier.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 5bdd13c4322..b55209bd492 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -178,7 +178,7 @@ func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint { return &endpointsInfo{BaseEndpointInfo: baseInfo} } -// Equal overrides the Equal() function imlemented by proxy.BaseEndpointInfo. +// Equal overrides the Equal() function implemented by proxy.BaseEndpointInfo. func (e *endpointsInfo) Equal(other proxy.Endpoint) bool { o, ok := other.(*endpointsInfo) if !ok { From 4d72932a9d103e7ded13ce6845d3c9d88b6639f6 Mon Sep 17 00:00:00 2001 From: Rohit Ramkumar Date: Wed, 3 Apr 2019 09:30:10 -0400 Subject: [PATCH 018/209] Remove Ingress-GCE test that verifies backend health check is not reconciled. --- test/e2e/network/BUILD | 1 - test/e2e/network/ingress.go | 69 ------------------------------------- 2 files changed, 70 deletions(-) diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index fdba7173ec5..4137c71e581 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -36,7 +36,6 @@ go_library( "//pkg/controller/endpoint:go_default_library", "//pkg/master/ports:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/networking/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index b102b33aa59..5d08fe87a8b 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -27,7 +27,6 @@ import ( compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -121,74 +120,6 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } }) - It("should not reconcile manually modified health check for ingress", func() { - By("Creating a basic HTTP ingress and wait for it to come up.") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil) - jig.WaitForIngress(true) - - // Get cluster UID. - clusterID, err := gce.GetClusterID(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) - // Get the related nodeports. - nodePorts := jig.GetIngressNodePorts(false) - Expect(len(nodePorts)).ToNot(Equal(0)) - - // Filter health check using cluster UID as the suffix. - By("Retrieving relevant health check resources from GCE.") - gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) - hcs, err := gceCloud.ListHealthChecks() - Expect(err).NotTo(HaveOccurred()) - var hcToChange *compute.HealthCheck - for _, hc := range hcs { - if strings.HasSuffix(hc.Name, clusterID) { - Expect(hc.HttpHealthCheck).NotTo(BeNil()) - if fmt.Sprintf("%d", hc.HttpHealthCheck.Port) == nodePorts[0] { - hcToChange = hc - break - } - } - } - Expect(hcToChange).NotTo(BeNil()) - - By(fmt.Sprintf("Modifying health check %v without involving ingress.", hcToChange.Name)) - // Change timeout from 60s to 25s. - hcToChange.TimeoutSec = 25 - // Change path from /healthz to /. - hcToChange.HttpHealthCheck.RequestPath = "/" - err = gceCloud.UpdateHealthCheck(hcToChange) - Expect(err).NotTo(HaveOccurred()) - - // Add one more path to ingress to trigger resource syncing. - By("Adding a new path to ingress and wait for it to take effect.") - jig.Update(func(ing *extensions.Ingress) { - ing.Spec.Rules = append(ing.Spec.Rules, extensions.IngressRule{ - Host: "ingress.test.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ - { - Path: "/test", - // Copy backend from the first rule. - Backend: ing.Spec.Rules[0].HTTP.Paths[0].Backend, - }, - }, - }, - }, - }) - }) - // Wait for change to take effect before checking the health check resource. - jig.WaitForIngress(false) - - // Validate the modified fields on health check are intact. - By("Checking if the modified health check is unchanged.") - hcAfterSync, err := gceCloud.GetHealthCheck(hcToChange.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(hcAfterSync.HttpHealthCheck).ToNot(Equal(nil)) - Expect(hcAfterSync.TimeoutSec).To(Equal(hcToChange.TimeoutSec)) - Expect(hcAfterSync.HttpHealthCheck.RequestPath).To(Equal(hcToChange.HttpHealthCheck.RequestPath)) - }) - It("should create ingress with pre-shared certificate", func() { executePresharedCertTest(f, jig, "") }) From bad985dc3abd7182455062aaa8ed279fc43881f9 Mon Sep 17 00:00:00 2001 From: aaa <1693291525@qq.com> Date: Wed, 10 Apr 2019 17:41:29 -0400 Subject: [PATCH 019/209] Using const() defines constants together --- pkg/kubelet/dockershim/network/plugins.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/dockershim/network/plugins.go b/pkg/kubelet/dockershim/network/plugins.go index bc1173b3ff2..f0e0b9a455d 100644 --- a/pkg/kubelet/dockershim/network/plugins.go +++ b/pkg/kubelet/dockershim/network/plugins.go @@ -36,12 +36,14 @@ import ( utilexec "k8s.io/utils/exec" ) -const DefaultPluginName = "kubernetes.io/no-op" +const ( + DefaultPluginName = "kubernetes.io/no-op" -// Called when the node's Pod CIDR is known when using the -// controller manager's --allocate-node-cidrs=true option -const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" -const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" + // Called when the node's Pod CIDR is known when using the + // controller manager's --allocate-node-cidrs=true option + NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" + NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" +) // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { From c1243ddd05b8ada606a0b1387ec414ba671abec6 Mon Sep 17 00:00:00 2001 From: Pingan2017 Date: Mon, 10 Dec 2018 16:08:32 +0800 Subject: [PATCH 020/209] remove unused func deprecatedAlias --- pkg/kubectl/cmd/BUILD | 1 - pkg/kubectl/cmd/cmd.go | 15 ----------- pkg/kubectl/cmd/cmd_test.go | 54 ------------------------------------- 3 files changed, 70 deletions(-) diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 3672fe88d02..7636237e4cf 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -79,7 +79,6 @@ go_test( deps = [ "//pkg/kubectl/cmd/util:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 2cfe569e93c..35e9e54cc0e 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -555,18 +555,3 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { func runHelp(cmd *cobra.Command, args []string) { cmd.Help() } - -// deprecatedAlias is intended to be used to create a "wrapper" command around -// an existing command. The wrapper works the same but prints a deprecation -// message before running. This command is identical functionality. -func deprecatedAlias(deprecatedVersion string, cmd *cobra.Command) *cobra.Command { - // Have to be careful here because Cobra automatically extracts the name - // of the command from the .Use field. - originalName := cmd.Name() - - cmd.Use = deprecatedVersion - cmd.Deprecated = fmt.Sprintf("use %q instead", originalName) - cmd.Short = fmt.Sprintf("%s. This command is deprecated, use %q instead", cmd.Short, originalName) - cmd.Hidden = true - return cmd -} diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index 34da43ebb06..2470b85b139 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -17,16 +17,12 @@ limitations under the License. package cmd import ( - "bytes" "fmt" "io/ioutil" "os" "reflect" - "strings" "testing" - "github.com/spf13/cobra" - "k8s.io/cli-runtime/pkg/genericclioptions" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) @@ -57,56 +53,6 @@ func TestNormalizationFuncGlobalExistence(t *testing.T) { } } -func Test_deprecatedAlias(t *testing.T) { - var correctCommandCalled bool - makeCobraCommand := func() *cobra.Command { - cobraCmd := new(cobra.Command) - cobraCmd.Use = "print five lines" - cobraCmd.Run = func(*cobra.Command, []string) { - correctCommandCalled = true - } - return cobraCmd - } - - original := makeCobraCommand() - alias := deprecatedAlias("echo", makeCobraCommand()) - - if len(alias.Deprecated) == 0 { - t.Error("deprecatedAlias should always have a non-empty .Deprecated") - } - if !strings.Contains(alias.Deprecated, "print") { - t.Error("deprecatedAlias should give the name of the new function in its .Deprecated field") - } - if !alias.Hidden { - t.Error("deprecatedAlias should never have .Hidden == false (deprecated aliases should be hidden)") - } - - if alias.Name() != "echo" { - t.Errorf("deprecatedAlias has name %q, expected %q", - alias.Name(), "echo") - } - if original.Name() != "print" { - t.Errorf("original command has name %q, expected %q", - original.Name(), "print") - } - - buffer := new(bytes.Buffer) - alias.SetOutput(buffer) - alias.Execute() - str := buffer.String() - if !strings.Contains(str, "deprecated") || !strings.Contains(str, "print") { - t.Errorf("deprecation warning %q does not include enough information", str) - } - - // It would be nice to test to see that original.Run == alias.Run - // Unfortunately Golang does not allow comparing functions. I could do - // this with reflect, but that's technically invoking undefined - // behavior. Best we can do is make sure that the function is called. - if !correctCommandCalled { - t.Errorf("original function doesn't appear to have been called by alias") - } -} - func TestKubectlCommandHandlesPlugins(t *testing.T) { tests := []struct { name string From 119400ad714b97fee7b139ee70c3b2996aa902ab Mon Sep 17 00:00:00 2001 From: aaa <1693291525@qq.com> Date: Thu, 11 Apr 2019 12:16:29 -0400 Subject: [PATCH 021/209] Use constant blocks to define constants in pkg/kubectl --- pkg/kubectl/cmd/apply/apply_test.go | 11 ++++------- pkg/kubectl/cmd/set/set_serviceaccount_test.go | 8 +++++--- pkg/kubectl/explain/model_printer.go | 13 +++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pkg/kubectl/cmd/apply/apply_test.go b/pkg/kubectl/cmd/apply/apply_test.go index adae68a0cc3..18e11835b7d 100644 --- a/pkg/kubectl/cmd/apply/apply_test.go +++ b/pkg/kubectl/cmd/apply/apply_test.go @@ -100,8 +100,10 @@ const ( dirName = "../../../../test/fixtures/pkg/kubectl/cmd/apply/testdir" filenameRCJSON = "../../../../test/fixtures/pkg/kubectl/cmd/apply/rc.json" - filenameWidgetClientside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/widget-clientside.yaml" - filenameWidgetServerside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/widget-serverside.yaml" + filenameWidgetClientside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/widget-clientside.yaml" + filenameWidgetServerside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/widget-serverside.yaml" + filenameDeployObjServerside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/deploy-serverside.yaml" + filenameDeployObjClientside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/deploy-clientside.yaml" ) func readConfigMapList(t *testing.T, filename string) [][]byte { @@ -870,11 +872,6 @@ func testApplyMultipleObjects(t *testing.T, asList bool) { } } -const ( - filenameDeployObjServerside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/deploy-serverside.yaml" - filenameDeployObjClientside = "../../../../test/fixtures/pkg/kubectl/cmd/apply/deploy-clientside.yaml" -) - func readDeploymentFromFile(t *testing.T, file string) []byte { raw := readBytesFromFile(t, file) obj := &appsv1.Deployment{} diff --git a/pkg/kubectl/cmd/set/set_serviceaccount_test.go b/pkg/kubectl/cmd/set/set_serviceaccount_test.go index 2bf7cd42533..a518bcb1e4f 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount_test.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount_test.go @@ -41,14 +41,16 @@ import ( "k8s.io/kubernetes/pkg/kubectl/scheme" ) -const serviceAccount = "serviceaccount1" -const serviceAccountMissingErrString = "serviceaccount is required" -const resourceMissingErrString = `You must provide one or more resources by argument or filename. +const ( + serviceAccount = "serviceaccount1" + serviceAccountMissingErrString = "serviceaccount is required" + resourceMissingErrString = `You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' ' ' ''` +) func TestSetServiceAccountLocal(t *testing.T) { inputs := []struct { diff --git a/pkg/kubectl/explain/model_printer.go b/pkg/kubectl/explain/model_printer.go index f717cf6a6aa..2de90c11f34 100644 --- a/pkg/kubectl/explain/model_printer.go +++ b/pkg/kubectl/explain/model_printer.go @@ -21,12 +21,13 @@ import ( "k8s.io/kube-openapi/pkg/util/proto" ) -// fieldIndentLevel is the level of indentation for fields. -const fieldIndentLevel = 3 - -// descriptionIndentLevel is the level of indentation for the -// description. -const descriptionIndentLevel = 5 +const ( + // fieldIndentLevel is the level of indentation for fields. + fieldIndentLevel = 3 + // descriptionIndentLevel is the level of indentation for the + // description. + descriptionIndentLevel = 5 +) // modelPrinter prints a schema in Writer. Its "Builder" will decide if // it's recursive or not. From 7a39c99297dd4e45d10b5033eec8508c54c9ca0e Mon Sep 17 00:00:00 2001 From: aaa <1693291525@qq.com> Date: Thu, 11 Apr 2019 13:47:34 -0400 Subject: [PATCH 022/209] Fix shellcheck failures on test-e2e-node.sh and test-integration.sh --- hack/.shellcheck_failures | 2 -- hack/make-rules/test-e2e-node.sh | 20 ++++++++++---------- hack/make-rules/test-integration.sh | 11 +++++++---- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 4ecd1af012a..43da54485b9 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -34,8 +34,6 @@ ./hack/make-rules/clean.sh ./hack/make-rules/helpers/cache_go_dirs.sh ./hack/make-rules/make-help.sh -./hack/make-rules/test-e2e-node.sh -./hack/make-rules/test-integration.sh ./hack/make-rules/test.sh ./hack/make-rules/update.sh ./hack/make-rules/verify.sh diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh index b656e5e9aa2..0b8efff34ea 100755 --- a/hack/make-rules/test-e2e-node.sh +++ b/hack/make-rules/test-e2e-node.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. source "${KUBE_ROOT}/hack/lib/init.sh" focus=${FOCUS:-""} @@ -26,7 +26,7 @@ skip=${SKIP-"\[Flaky\]|\[Slow\]|\[Serial\]"} # Currently, parallelism only affects when REMOTE=true. For local test, # ginkgo default parallelism (cores - 1) is used. parallelism=${PARALLELISM:-8} -artifacts=${ARTIFACTS:-"/tmp/_artifacts/`date +%y%m%dT%H%M%S`"} +artifacts="${ARTIFACTS:-"/tmp/_artifacts/$(date +%y%m%dT%H%M%S)"}" remote=${REMOTE:-"false"} runtime=${RUNTIME:-"docker"} container_runtime_endpoint=${CONTAINER_RUNTIME_ENDPOINT:-""} @@ -38,7 +38,7 @@ extra_envs=${EXTRA_ENVS:-} # Parse the flags to pass to ginkgo ginkgoflags="" -if [[ ${parallelism} > 1 ]]; then +if [[ ${parallelism} -gt 1 ]]; then ginkgoflags="${ginkgoflags} -nodes=${parallelism} " fi @@ -57,21 +57,21 @@ fi # Setup the directory to copy test artifacts (logs, junit.xml, etc) from remote host to local host if [ ! -d "${artifacts}" ]; then echo "Creating artifacts directory at ${artifacts}" - mkdir -p ${artifacts} + mkdir -p "${artifacts}" fi echo "Test artifacts will be written to ${artifacts}" if [[ ${runtime} == "remote" ]] ; then - if [[ ! -z ${container_runtime_endpoint} ]] ; then + if [[ -n ${container_runtime_endpoint} ]] ; then test_args="--container-runtime-endpoint=${container_runtime_endpoint} ${test_args}" fi - if [[ ! -z ${image_service_endpoint} ]] ; then + if [[ -n ${image_service_endpoint} ]] ; then test_args="--image-service-endpoint=${image_service_endpoint} ${test_args}" fi fi -if [ ${remote} = true ] ; then +if [ "${remote}" = true ] ; then # The following options are only valid in remote run. images=${IMAGES:-""} hosts=${HOSTS:-""} @@ -85,8 +85,8 @@ if [ ${remote} = true ] ; then gubernator=${GUBERNATOR:-"false"} image_config_file=${IMAGE_CONFIG_FILE:-""} if [[ ${hosts} == "" && ${images} == "" && ${image_config_file} == "" ]]; then - image_project=${IMAGE_PROJECT:-"cos-cloud"} - gci_image=$(gcloud compute images list --project ${image_project} \ + image_project="${IMAGE_PROJECT:-"cos-cloud"}" + gci_image=$(gcloud compute images list --project "${image_project}" \ --no-standard-images --filter="name ~ 'cos-beta.*'" --format="table[no-heading](name)") images=${gci_image} metadata="user-data<${KUBE_ROOT}/test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled" @@ -116,7 +116,7 @@ if [ ${remote} = true ] ; then IFS=',' read -ra IM <<< "${images}" images="" for i in "${IM[@]}"; do - if [[ $(gcloud compute instances list "${instance_prefix}-${i}" | grep ${i}) ]]; then + if gcloud compute instances list "${instance_prefix}-${i}" | grep "${i}"; then if [[ "${hosts}" != "" ]]; then hosts="${hosts}," fi diff --git a/hack/make-rules/test-integration.sh b/hack/make-rules/test-integration.sh index 47e4dd908a3..fa7b2084e53 100755 --- a/hack/make-rules/test-integration.sh +++ b/hack/make-rules/test-integration.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. source "${KUBE_ROOT}/hack/lib/init.sh" # Lists of API Versions of each groups that should be tested, groups are # separated by comma, lists are separated by semicolon. e.g., @@ -40,7 +40,7 @@ KUBE_TEST_VMODULE=${KUBE_TEST_VMODULE:-"garbagecollector*=6,graph_builder*=6"} kube::test::find_integration_test_dirs() { ( - cd ${KUBE_ROOT} + cd "${KUBE_ROOT}" find test/integration/ -name '*_test.go' -print0 \ | xargs -0n1 dirname | sed "s|^|${KUBE_GO_PACKAGE}/|" \ | LC_ALL=C sort -u @@ -67,7 +67,10 @@ runTests() { kube::etcd::start kube::log::status "Running integration test cases" - KUBE_RACE="-race" + # export KUBE_RACE + # + # Enable the Go race detector. + export KUBE_RACE="-race" make -C "${KUBE_ROOT}" test \ WHAT="${WHAT:-$(kube::test::find_integration_test_dirs | paste -sd' ' -)}" \ GOFLAGS="${GOFLAGS:-}" \ @@ -94,7 +97,7 @@ checkEtcdOnPath trap cleanup EXIT # Convert the CSV to an array of API versions to test -IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" +IFS=';' read -ra apiVersions <<< "${KUBE_TEST_API_VERSIONS}" for apiVersion in "${apiVersions[@]}"; do runTests "${apiVersion}" done From f38d4938bea48b0d04ff5f43778deaf3ad5a29f6 Mon Sep 17 00:00:00 2001 From: Travis Rhoden Date: Thu, 11 Apr 2019 12:19:55 -0600 Subject: [PATCH 023/209] Move ExecMount to pkg/volume/util/exec This patch moves the ExecMounter found in pkg/util/mount to pkg/volume/util/exec. This is done in preparation for pkg/util/mount to move out of tree. This specific implemention of mount.Interface is only used internally to K8s and does not need to move out of tree. --- pkg/kubelet/BUILD | 1 + pkg/kubelet/volume_host.go | 3 +- pkg/util/mount/BUILD | 3 - pkg/volume/util/BUILD | 1 + pkg/volume/util/exec/BUILD | 74 +++++++++++++++++++ .../mount => volume/util/exec}/exec_mount.go | 22 +++--- .../util/exec}/exec_mount_test.go | 18 +++-- .../util/exec}/exec_mount_unsupported.go | 16 ++-- 8 files changed, 109 insertions(+), 29 deletions(-) create mode 100644 pkg/volume/util/exec/BUILD rename pkg/{util/mount => volume/util/exec}/exec_mount.go (88%) rename pkg/{util/mount => volume/util/exec}/exec_mount_test.go (86%) rename pkg/{util/mount => volume/util/exec}/exec_mount_unsupported.go (83%) diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 448aeb9f19e..182fbb40152 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -110,6 +110,7 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/csi:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/exec:go_default_library", "//pkg/volume/util/subpath:go_default_library", "//pkg/volume/util/types:go_default_library", "//pkg/volume/util/volumepathhandler:go_default_library", diff --git a/pkg/kubelet/volume_host.go b/pkg/kubelet/volume_host.go index 4b2e51844cf..6cf1a56e549 100644 --- a/pkg/kubelet/volume_host.go +++ b/pkg/kubelet/volume_host.go @@ -43,6 +43,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" + execmnt "k8s.io/kubernetes/pkg/volume/util/exec" "k8s.io/kubernetes/pkg/volume/util/subpath" ) @@ -230,7 +231,7 @@ func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface { if exec == nil { return kvh.kubelet.mounter } - return mount.NewExecMounter(exec, kvh.kubelet.mounter) + return execmnt.NewExecMounter(exec, kvh.kubelet.mounter) } func (kvh *kubeletVolumeHost) GetHostName() string { diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index e665748dbe8..f4e40b669f9 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -5,8 +5,6 @@ go_library( srcs = [ "doc.go", "exec.go", - "exec_mount.go", - "exec_mount_unsupported.go", "fake.go", "mount.go", "mount_helper_common.go", @@ -38,7 +36,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "exec_mount_test.go", "mount_helper_test.go", "mount_linux_test.go", "mount_test.go", diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index dac92b53ea1..d6200ddc6e3 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -84,6 +84,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/volume/util/exec:all-srcs", "//pkg/volume/util/fs:all-srcs", "//pkg/volume/util/nestedpendingoperations:all-srcs", "//pkg/volume/util/nsenter:all-srcs", diff --git a/pkg/volume/util/exec/BUILD b/pkg/volume/util/exec/BUILD new file mode 100644 index 00000000000..5f4fd655ac6 --- /dev/null +++ b/pkg/volume/util/exec/BUILD @@ -0,0 +1,74 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "exec_mount.go", + "exec_mount_unsupported.go", + ], + importpath = "k8s.io/kubernetes/pkg/volume/util/exec", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/util/mount:go_default_library", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "go_default_test", + srcs = ["exec_mount_test.go"], + embed = [":go_default_library"], + deps = select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/util/mount:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/util/mount/exec_mount.go b/pkg/volume/util/exec/exec_mount.go similarity index 88% rename from pkg/util/mount/exec_mount.go rename to pkg/volume/util/exec/exec_mount.go index cdddb635989..9171085418a 100644 --- a/pkg/util/mount/exec_mount.go +++ b/pkg/volume/util/exec/exec_mount.go @@ -16,23 +16,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mount +package exec import ( "fmt" "os" "k8s.io/klog" + + "k8s.io/kubernetes/pkg/util/mount" ) // ExecMounter is a mounter that uses provided Exec interface to mount and // unmount a filesystem. For all other calls it uses a wrapped mounter. type execMounter struct { - wrappedMounter Interface - exec Exec + wrappedMounter mount.Interface + exec mount.Exec } -func NewExecMounter(exec Exec, wrapped Interface) Interface { +func NewExecMounter(exec mount.Exec, wrapped mount.Interface) mount.Interface { return &execMounter{ wrappedMounter: wrapped, exec: exec, @@ -40,11 +42,11 @@ func NewExecMounter(exec Exec, wrapped Interface) Interface { } // execMounter implements mount.Interface -var _ Interface = &execMounter{} +var _ mount.Interface = &execMounter{} // Mount runs mount(8) using given exec interface. func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindOpts, bindRemountOpts := IsBind(options) + bind, bindOpts, bindRemountOpts := mount.IsBind(options) if bind { err := m.doExecMount(source, target, fstype, bindOpts) @@ -60,7 +62,7 @@ func (m *execMounter) Mount(source string, target string, fstype string, options // doExecMount calls exec(mount ) using given exec interface. func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { klog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) - mountArgs := MakeMountArgs(source, target, fstype, options) + mountArgs := mount.MakeMountArgs(source, target, fstype, options) output, err := m.exec.Run("mount", mountArgs...) klog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) if err != nil { @@ -84,7 +86,7 @@ func (m *execMounter) Unmount(target string) error { } // List returns a list of all mounted filesystems. -func (m *execMounter) List() ([]MountPoint, error) { +func (m *execMounter) List() ([]mount.MountPoint, error) { return m.wrappedMounter.List() } @@ -112,7 +114,7 @@ func (m *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (strin return m.wrappedMounter.GetDeviceNameFromMount(mountPath, pluginDir) } -func (m *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { +func (m *execMounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool { return m.wrappedMounter.IsMountPointMatch(mp, dir) } @@ -120,7 +122,7 @@ func (m *execMounter) MakeRShared(path string) error { return m.wrappedMounter.MakeRShared(path) } -func (m *execMounter) GetFileType(pathname string) (FileType, error) { +func (m *execMounter) GetFileType(pathname string) (mount.FileType, error) { return m.wrappedMounter.GetFileType(pathname) } diff --git a/pkg/util/mount/exec_mount_test.go b/pkg/volume/util/exec/exec_mount_test.go similarity index 86% rename from pkg/util/mount/exec_mount_test.go rename to pkg/volume/util/exec/exec_mount_test.go index d6ccd844895..f25485e0f25 100644 --- a/pkg/util/mount/exec_mount_test.go +++ b/pkg/volume/util/exec/exec_mount_test.go @@ -16,13 +16,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mount +package exec import ( "fmt" "reflect" "strings" "testing" + + "k8s.io/kubernetes/pkg/util/mount" ) var ( @@ -33,7 +35,7 @@ var ( ) func TestMount(t *testing.T) { - exec := NewFakeExec(func(cmd string, args ...string) ([]byte, error) { + exec := mount.NewFakeExec(func(cmd string, args ...string) ([]byte, error) { if cmd != "mount" { t.Errorf("expected mount command, got %q", cmd) } @@ -45,7 +47,7 @@ func TestMount(t *testing.T) { return nil, nil }) - wrappedMounter := &fakeMounter{FakeMounter: &FakeMounter{}, t: t} + wrappedMounter := &fakeMounter{FakeMounter: &mount.FakeMounter{}, t: t} mounter := NewExecMounter(exec, wrappedMounter) mounter.Mount(sourcePath, destinationPath, fsType, mountOptions) @@ -53,7 +55,7 @@ func TestMount(t *testing.T) { func TestBindMount(t *testing.T) { cmdCount := 0 - exec := NewFakeExec(func(cmd string, args ...string) ([]byte, error) { + exec := mount.NewFakeExec(func(cmd string, args ...string) ([]byte, error) { cmdCount++ if cmd != "mount" { t.Errorf("expected mount command, got %q", cmd) @@ -73,14 +75,14 @@ func TestBindMount(t *testing.T) { return nil, nil }) - wrappedMounter := &fakeMounter{FakeMounter: &FakeMounter{}, t: t} + wrappedMounter := &fakeMounter{FakeMounter: &mount.FakeMounter{}, t: t} mounter := NewExecMounter(exec, wrappedMounter) bindOptions := append(mountOptions, "bind") mounter.Mount(sourcePath, destinationPath, fsType, bindOptions) } func TestUnmount(t *testing.T) { - exec := NewFakeExec(func(cmd string, args ...string) ([]byte, error) { + exec := mount.NewFakeExec(func(cmd string, args ...string) ([]byte, error) { if cmd != "umount" { t.Errorf("expected unmount command, got %q", cmd) } @@ -92,7 +94,7 @@ func TestUnmount(t *testing.T) { return nil, nil }) - wrappedMounter := &fakeMounter{&FakeMounter{}, t} + wrappedMounter := &fakeMounter{&mount.FakeMounter{}, t} mounter := NewExecMounter(exec, wrappedMounter) mounter.Unmount(destinationPath) @@ -100,7 +102,7 @@ func TestUnmount(t *testing.T) { /* Fake wrapped mounter */ type fakeMounter struct { - *FakeMounter + *mount.FakeMounter t *testing.T } diff --git a/pkg/util/mount/exec_mount_unsupported.go b/pkg/volume/util/exec/exec_mount_unsupported.go similarity index 83% rename from pkg/util/mount/exec_mount_unsupported.go rename to pkg/volume/util/exec/exec_mount_unsupported.go index 328b383fd4d..09667c070fc 100644 --- a/pkg/util/mount/exec_mount_unsupported.go +++ b/pkg/volume/util/exec/exec_mount_unsupported.go @@ -16,18 +16,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mount +package exec import ( "errors" "os" + + "k8s.io/kubernetes/pkg/util/mount" ) type execMounter struct{} // ExecMounter is a mounter that uses provided Exec interface to mount and // unmount a filesystem. For all other calls it uses a wrapped mounter. -func NewExecMounter(exec Exec, wrapped Interface) Interface { +func NewExecMounter(exec mount.Exec, wrapped mount.Interface) mount.Interface { return &execMounter{} } @@ -39,11 +41,11 @@ func (mounter *execMounter) Unmount(target string) error { return nil } -func (mounter *execMounter) List() ([]MountPoint, error) { - return []MountPoint{}, nil +func (mounter *execMounter) List() ([]mount.MountPoint, error) { + return []mount.MountPoint{}, nil } -func (mounter *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { +func (mounter *execMounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool { return (mp.Path == dir) } @@ -67,8 +69,8 @@ func (mounter *execMounter) MakeRShared(path string) error { return nil } -func (mounter *execMounter) GetFileType(pathname string) (FileType, error) { - return FileType("fake"), errors.New("not implemented") +func (mounter *execMounter) GetFileType(pathname string) (mount.FileType, error) { + return mount.FileType("fake"), errors.New("not implemented") } func (mounter *execMounter) MakeDir(pathname string) error { From 96476fd054b3b0253c6e3cd8d92637d7bec2e1a7 Mon Sep 17 00:00:00 2001 From: Travis Rhoden Date: Thu, 11 Apr 2019 13:22:14 -0600 Subject: [PATCH 024/209] Fix linting issues for exec mounter --- pkg/volume/util/exec/exec_mount.go | 4 +++- pkg/volume/util/exec/exec_mount_unsupported.go | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/volume/util/exec/exec_mount.go b/pkg/volume/util/exec/exec_mount.go index 9171085418a..a5ab848a85d 100644 --- a/pkg/volume/util/exec/exec_mount.go +++ b/pkg/volume/util/exec/exec_mount.go @@ -34,6 +34,8 @@ type execMounter struct { exec mount.Exec } +// NewExecMounter returns a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. func NewExecMounter(exec mount.Exec, wrapped mount.Interface) mount.Interface { return &execMounter{ wrappedMounter: wrapped, @@ -66,7 +68,7 @@ func (m *execMounter) doExecMount(source, target, fstype string, options []strin output, err := m.exec.Run("mount", mountArgs...) klog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) if err != nil { - return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", + return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s", err, "mount", source, target, fstype, options, string(output)) } diff --git a/pkg/volume/util/exec/exec_mount_unsupported.go b/pkg/volume/util/exec/exec_mount_unsupported.go index 09667c070fc..fa02c1fa257 100644 --- a/pkg/volume/util/exec/exec_mount_unsupported.go +++ b/pkg/volume/util/exec/exec_mount_unsupported.go @@ -27,7 +27,7 @@ import ( type execMounter struct{} -// ExecMounter is a mounter that uses provided Exec interface to mount and +// NewExecMounter returns a mounter that uses provided Exec interface to mount and // unmount a filesystem. For all other calls it uses a wrapped mounter. func NewExecMounter(exec mount.Exec, wrapped mount.Interface) mount.Interface { return &execMounter{} @@ -85,7 +85,7 @@ func (mounter *execMounter) ExistsPath(pathname string) (bool, error) { return true, errors.New("not implemented") } -func (m *execMounter) EvalHostSymlinks(pathname string) (string, error) { +func (mounter *execMounter) EvalHostSymlinks(pathname string) (string, error) { return "", errors.New("not implemented") } From 538cd87864ee18fa0ae31b20b39728ada6f2f9ba Mon Sep 17 00:00:00 2001 From: Haiyan Meng Date: Fri, 8 Mar 2019 15:29:23 -0800 Subject: [PATCH 025/209] Add metrics to monitor the kubelet http server Signed-off-by: Haiyan Meng --- pkg/kubelet/server/BUILD | 2 + pkg/kubelet/server/metrics/BUILD | 28 ++++++++++ pkg/kubelet/server/metrics/metrics.go | 79 +++++++++++++++++++++++++++ pkg/kubelet/server/server.go | 49 +++++++++++++++++ pkg/kubelet/server/server_test.go | 21 +++++++ 5 files changed, 179 insertions(+) create mode 100644 pkg/kubelet/server/metrics/BUILD create mode 100644 pkg/kubelet/server/metrics/metrics.go diff --git a/pkg/kubelet/server/BUILD b/pkg/kubelet/server/BUILD index edfb943102f..fd735c0c4dd 100644 --- a/pkg/kubelet/server/BUILD +++ b/pkg/kubelet/server/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/kubelet/apis/resourcemetrics/v1alpha1:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/prober:go_default_library", + "//pkg/kubelet/server/metrics:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", "//pkg/kubelet/server/remotecommand:go_default_library", "//pkg/kubelet/server/stats:go_default_library", @@ -105,6 +106,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/kubelet/server/metrics:all-srcs", "//pkg/kubelet/server/portforward:all-srcs", "//pkg/kubelet/server/remotecommand:all-srcs", "//pkg/kubelet/server/stats:all-srcs", diff --git a/pkg/kubelet/server/metrics/BUILD b/pkg/kubelet/server/metrics/BUILD new file mode 100644 index 00000000000..6d22a4d7fc6 --- /dev/null +++ b/pkg/kubelet/server/metrics/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/metrics", + deps = [ + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/server/metrics/metrics.go b/pkg/kubelet/server/metrics/metrics.go new file mode 100644 index 00000000000..48ad2d145d0 --- /dev/null +++ b/pkg/kubelet/server/metrics/metrics.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + kubeletSubsystem = "kubelet" +) + +var ( + // HTTPRequests tracks the number of the http requests received since the server started. + HTTPRequests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: kubeletSubsystem, + Name: "http_requests_total", + Help: "Number of the http requests received since the server started", + }, + // server_type aims to differentiate the readonly server and the readwrite server. + // long_running marks whether the request is long-running or not. + // Currently, long-running requests include exec/attach/portforward/debug. + []string{"method", "path", "host", "server_type", "long_running"}, + ) + // HTTPRequestsDuration tracks the duration in seconds to serve http requests. + HTTPRequestsDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Subsystem: kubeletSubsystem, + Name: "http_requests_duration_seconds", + Help: "Duration in seconds to serve http requests", + // Use DefBuckets for now, will customize the buckets if necessary. + Buckets: prometheus.DefBuckets, + }, + []string{"method", "path", "host", "server_type", "long_running"}, + ) + // HTTPInflightRequests tracks the number of the inflight http requests. + HTTPInflightRequests = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: kubeletSubsystem, + Name: "http_inflight_requests", + Help: "Number of the inflight http requests", + }, + []string{"method", "path", "host", "server_type", "long_running"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + prometheus.MustRegister(HTTPRequests) + prometheus.MustRegister(HTTPRequestsDuration) + prometheus.MustRegister(HTTPInflightRequests) + }) +} + +// SinceInSeconds gets the time since the specified start in seconds. +func SinceInSeconds(start time.Time) float64 { + return time.Since(start).Seconds() +} diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index a4f75116558..4a9d9080d4c 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -63,6 +63,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober" + servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics" "k8s.io/kubernetes/pkg/kubelet/server/portforward" remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -807,6 +808,33 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp proxyStream(response.ResponseWriter, request.Request, url) } +// trimURLPath trims a URL path. +// For paths in the format of "/metrics/xxx", "metrics/xxx" is returned; +// For all other paths, the first part of the path is returned. +func trimURLPath(path string) string { + parts := strings.SplitN(strings.TrimPrefix(path, "/"), "/", 3) + if len(parts) == 0 { + return path + } + + if parts[0] == "metrics" && len(parts) > 1 { + return fmt.Sprintf("%s/%s", parts[0], parts[1]) + + } + return parts[0] +} + +// isLongRunningRequest determines whether the request is long-running or not. +func isLongRunningRequest(path string) bool { + longRunningRequestPaths := []string{"exec", "attach", "portforward", "debug"} + for _, p := range longRunningRequestPaths { + if p == path { + return true + } + } + return false +} + // ServeHTTP responds to HTTP requests on the Kubelet. func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { defer httplog.NewLogged(req, &w).StacktraceWhen( @@ -820,6 +848,27 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { http.StatusSwitchingProtocols, ), ).Log() + + // monitor http requests + var serverType string + if s.auth == nil { + serverType = "readonly" + } else { + serverType = "readwrite" + } + + method, path, host := req.Method, trimURLPath(req.URL.Path), req.URL.Host + + longRunning := strconv.FormatBool(isLongRunningRequest(path)) + + servermetrics.HTTPRequests.WithLabelValues(method, path, host, serverType, longRunning).Inc() + + servermetrics.HTTPInflightRequests.WithLabelValues(method, path, host, serverType, longRunning).Inc() + defer servermetrics.HTTPInflightRequests.WithLabelValues(method, path, host, serverType, longRunning).Dec() + + startTime := time.Now() + defer servermetrics.HTTPRequestsDuration.WithLabelValues(method, path, host, serverType, longRunning).Observe(servermetrics.SinceInSeconds(startTime)) + s.restfulCont.ServeHTTP(w, req) } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 1359eec86aa..7627b0bcc10 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -1666,3 +1666,24 @@ func TestDebuggingDisabledHandlers(t *testing.T) { assert.Equal(t, http.StatusOK, resp.StatusCode) } + +func TestTrimURLPath(t *testing.T) { + tests := []struct { + path, expected string + }{ + {"", ""}, + {"//", ""}, + {"/pods", "pods"}, + {"pods", "pods"}, + {"pods/", "pods"}, + {"good/", "good"}, + {"pods/probes", "pods"}, + {"metrics", "metrics"}, + {"metrics/resource", "metrics/resource"}, + {"metrics/hello", "metrics/hello"}, + } + + for _, test := range tests { + assert.Equal(t, test.expected, trimURLPath(test.path), fmt.Sprintf("path is: %s", test.path)) + } +} From 7c9e567008d25bb84da570f0b0b98a7238448d06 Mon Sep 17 00:00:00 2001 From: caiweidong Date: Sun, 31 Mar 2019 23:35:15 +0800 Subject: [PATCH 026/209] add operation name for other volume operations --- .../util/operationexecutor/operation_generator.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 05da765fc04..3d331797c03 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -211,6 +211,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( } return volumetypes.GeneratedOperations{ + OperationName: "verify_volumes_are_attached_per_node", OperationFunc: volumesAreAttachedFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume("", nil), "verify_volumes_are_attached_per_node"), EventRecorderFunc: nil, // nil because we do not want to generate event on error @@ -282,6 +283,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( } return volumetypes.GeneratedOperations{ + OperationName: "verify_volumes_are_attached", OperationFunc: bulkVolumeVerifyFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, nil), "verify_volumes_are_attached"), EventRecorderFunc: nil, // nil because we do not want to generate event on error @@ -388,6 +390,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( } return volumetypes.GeneratedOperations{ + OperationName: "volume_attach", OperationFunc: attachVolumeFunc, EventRecorderFunc: eventRecorderFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(attachableVolumePlugin.GetPluginName(), volumeToAttach.VolumeSpec), "volume_attach"), @@ -507,6 +510,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( } return volumetypes.GeneratedOperations{ + OperationName: "volume_detach", OperationFunc: getVolumePluginMgrFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, volumeToDetach.VolumeSpec), "volume_detach"), EventRecorderFunc: nil, // nil because we do not want to generate event on error @@ -1093,6 +1097,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( } return volumetypes.GeneratedOperations{ + OperationName: "map_volume", OperationFunc: mapVolumeFunc, EventRecorderFunc: eventRecorderFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "map_volume"), @@ -1163,6 +1168,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( } return volumetypes.GeneratedOperations{ + OperationName: "unmap_volume", OperationFunc: unmapVolumeFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "unmap_volume"), EventRecorderFunc: nil, // nil because we do not want to generate event on error @@ -1278,6 +1284,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( } return volumetypes.GeneratedOperations{ + OperationName: "unmap_device", OperationFunc: unmapDeviceFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmap_device"), EventRecorderFunc: nil, // nil because we do not want to generate event on error @@ -1352,6 +1359,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( } return volumetypes.GeneratedOperations{ + OperationName: "verify_controller_attached_volume", OperationFunc: verifyControllerAttachedVolumeFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "verify_controller_attached_volume"), EventRecorderFunc: nil, // nil because we do not want to generate event on error @@ -1468,6 +1476,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( } return volumetypes.GeneratedOperations{ + OperationName: "expand_volume", OperationFunc: expandVolumeFunc, EventRecorderFunc: eventRecorderFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeSpec), "expand_volume"), @@ -1542,6 +1551,7 @@ func (og *operationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc( } return volumetypes.GeneratedOperations{ + OperationName: "volume_fs_resize", OperationFunc: fsResizeFunc, EventRecorderFunc: eventRecorderFunc, CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_fs_resize"), From 9768ba9eb3277fadfb544241e0f83cf6054d0481 Mon Sep 17 00:00:00 2001 From: aaa <1693291525@qq.com> Date: Mon, 8 Apr 2019 21:11:00 -0400 Subject: [PATCH 027/209] Fix shellcheck failures in stage-upload.sh update pull request update pull request update pull request update pull request --- cluster/gce/gci/mounter/stage-upload.sh | 21 +++++++++++---------- hack/.shellcheck_failures | 1 - 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cluster/gce/gci/mounter/stage-upload.sh b/cluster/gce/gci/mounter/stage-upload.sh index f3359447c39..ae1657e283a 100755 --- a/cluster/gce/gci/mounter/stage-upload.sh +++ b/cluster/gce/gci/mounter/stage-upload.sh @@ -39,29 +39,30 @@ ACI_DIR=${STAGING_DIR}/gci-mounter CWD=${PWD} # Cleanup the temporary directories -function cleanup { - rm -rf ${DOWNLOAD_DIR} - rm -rf ${STAGING_DIR} - cd ${CWD} +cleanup() { + rm -rf "${DOWNLOAD_DIR}" + rm -rf "${STAGING_DIR}" + cd "${CWD}" } # Delete temporary directories on exit trap cleanup EXIT -mkdir ${ACI_DIR} +mkdir "${ACI_DIR}" # Convert docker image to aci and stage it echo "Downloading docker2aci ${DOCKER2ACI_VERSION}" -wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" &> /dev/null +wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" >/dev/null 2>&1 echo "Extracting docker2aci ${DOCKER2ACI_VERSION}" tar xzf docker2aci-${DOCKER2ACI_VERSION}.tar.gz -ACI_IMAGE=$(${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci ${DOCKER_IMAGE} 2>/dev/null | tail -n 1) -cp ${ACI_IMAGE} ${ACI_DIR}/${MOUNTER_ACI_IMAGE} +ACI_IMAGE=$("${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci" "${DOCKER_IMAGE}" 2>/dev/null | tail -n 1) +cp "${ACI_IMAGE}" "${ACI_DIR}/${MOUNTER_ACI_IMAGE}" # Upload the contents to gcs echo "Uploading gci mounter ACI in ${ACI_DIR} to ${MOUNTER_GCS_DIR}" -gsutil cp ${ACI_DIR}/${MOUNTER_ACI_IMAGE} ${MOUNTER_GCS_DIR} +gsutil cp "${ACI_DIR}/${MOUNTER_ACI_IMAGE}" "${MOUNTER_GCS_DIR}" echo "Upload completed" echo "Updated gci-mounter ACI version and SHA1 in cluster/gce/gci/configure.sh" -echo "${MOUNTER_ACI_IMAGE} hash: $(sha1sum ${ACI_DIR}/${MOUNTER_ACI_IMAGE})" +ACI_HASH=$(sha1sum "${ACI_DIR}/${MOUNTER_ACI_IMAGE}") +echo "${MOUNTER_ACI_IMAGE} hash: ${ACI_HASH}" diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 3ed7122401e..5afa8aa0674 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -11,7 +11,6 @@ ./cluster/gce/gci/flexvolume_node_setup.sh ./cluster/gce/gci/health-monitor.sh ./cluster/gce/gci/master-helper.sh -./cluster/gce/gci/mounter/stage-upload.sh ./cluster/gce/gci/shutdown.sh ./cluster/gce/list-resources.sh ./cluster/gce/upgrade-aliases.sh From 39c239c308b6ae79e014c755df9d1ddfbc3a0499 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sat, 13 Apr 2019 12:45:32 +0000 Subject: [PATCH 028/209] refactor detach azure disk retry operation --- .../azure/azure_controller_common.go | 24 +++++++++++++-- .../azure/azure_controller_standard.go | 30 +++++-------------- .../providers/azure/azure_controller_vmss.go | 30 +++++-------------- .../providers/azure/azure_fakes.go | 4 +-- .../providers/azure/azure_vmsets.go | 6 ++-- pkg/volume/azure_dd/attacher.go | 2 +- pkg/volume/azure_dd/azure_dd.go | 2 +- 7 files changed, 45 insertions(+), 53 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common.go b/pkg/cloudprovider/providers/azure/azure_controller_common.go index 7109ea73bd4..1b5148d3d51 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -95,14 +95,32 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) } -// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI. -func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { +// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI. +func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error { vmset, err := c.getNodeVMSet(nodeName) if err != nil { return err } - return vmset.DetachDiskByName(diskName, diskURI, nodeName) + resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + if c.cloud.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { + klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) + retryErr := kwait.ExponentialBackoff(c.cloud.requestBackoff(), func() (bool, error) { + resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + return c.cloud.processHTTPRetryResponse(nil, "", resp, err) + }) + if retryErr != nil { + err = retryErr + klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err) + } + } + if err != nil { + klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) + } else { + klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) + } + + return err } // getNodeDataDisks invokes vmSet interfaces to get data disks for the node. diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index b380996f4e8..e19bccc31ed 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -18,6 +18,7 @@ package azure import ( "fmt" + "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" @@ -88,7 +89,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error klog.V(2).Infof("azureDisk - err %v, try detach disk(%s, %s)", err, diskName, diskURI) - as.DetachDiskByName(diskName, diskURI, nodeName) + as.DetachDisk(diskName, diskURI, nodeName) } } else { klog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI) @@ -96,20 +97,20 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri return err } -// DetachDiskByName detaches a vhd from host +// DetachDisk detaches a disk from host // the vhd can be identified by diskName or diskURI -func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { +func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { vm, err := as.getVirtualMachine(nodeName) if err != nil { // if host doesn't exist, no need to detach klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI) - return nil + return nil, nil } vmName := mapNodeNameToVMName(nodeName) nodeResourceGroup, err := as.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := *vm.StorageProfile.DataDisks @@ -127,7 +128,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t } if !bFoundDisk { - return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + return nil, fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) } newVM := compute.VirtualMachine{ @@ -146,22 +147,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) - if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - klog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, vmName, diskName, diskURI, err) - retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM) - if retryErr != nil { - err = retryErr - klog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, vmName, diskName, diskURI, err) - } - } - if err != nil { - klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) - } else { - klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) - } - - return err + return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) } // GetDataDisks gets a list of data disks attached to the node. diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index 442dd8fedc0..a76c2d575f2 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -18,6 +18,7 @@ package azure import ( "fmt" + "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" @@ -93,7 +94,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error klog.Infof("azureDisk - err %s, try detach disk(%s, %s)", detail, diskName, diskURI) - ss.DetachDiskByName(diskName, diskURI, nodeName) + ss.DetachDisk(diskName, diskURI, nodeName) } } else { klog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI) @@ -101,18 +102,18 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod return err } -// DetachDiskByName detaches a vhd from host +// DetachDisk detaches a disk from host // the vhd can be identified by diskName or diskURI -func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { +func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName) if err != nil { - return err + return nil, err } nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := []compute.DataDisk{} @@ -133,7 +134,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No } if !bFoundDisk { - return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + return nil, fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) } newVM := compute.VirtualMachineScaleSetVM{ @@ -156,22 +157,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No defer ss.vmssVMCache.Delete(key) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) - resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - klog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, nodeName, diskName, diskURI, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) - if retryErr != nil { - err = retryErr - klog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, nodeName, diskName, diskURI, err) - } - } - if err != nil { - klog.Errorf("azureDisk - detach disk(%s, %s) from %s failed, err: %v", diskName, diskURI, nodeName, err) - } else { - klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) - } - - return err + return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) } // GetDataDisks gets a list of data disks attached to the node. diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 77cc02cb64a..af8e47abbba 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -910,8 +910,8 @@ func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { - return fmt.Errorf("unimplemented") +func (f *fakeVMSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { + return nil, fmt.Errorf("unimplemented") } func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go index 0d37a91c070..6dbdc1dd653 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -17,6 +17,8 @@ limitations under the License. package azure import ( + "net/http" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" @@ -60,8 +62,8 @@ type VMSet interface { // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error - // DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI. - DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error + // DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI. + DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) // GetDataDisks gets a list of data disks attached to the node. GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 90a0bf414d6..b9453f70154 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -301,7 +301,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) - err = diskController.DetachDiskByName("", diskURI, nodeName) + err = diskController.DetachDisk("", diskURI, nodeName) if err != nil { klog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index d0607423c2f..04e31a661ad 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -46,7 +46,7 @@ type DiskController interface { // Attaches the disk to the host machine. AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. - DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error + DetachDisk(diskName, diskUri string, nodeName types.NodeName) error // Check if a list of volumes are attached to the node with the specified NodeName DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) From 98c976ae4f24c9f6a4512a7a04f1a38bd3759317 Mon Sep 17 00:00:00 2001 From: ialidzhikov Date: Sat, 13 Apr 2019 16:23:07 +0300 Subject: [PATCH 029/209] Clean ineffectual assignment Signed-off-by: ialidzhikov --- pkg/volume/glusterfs/glusterfs.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 6b0b5ab3421..1529a63cfbd 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -328,7 +328,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { if b.readOnly { options = append(options, "ro") - } // Check for log-file,log-level options existence in user supplied mount options, if provided, use those. @@ -348,7 +347,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { // If logfile has not been provided, create driver specific log file. if !hasLogFile { - log = "" p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName) if err := os.MkdirAll(p, 0750); err != nil { return fmt.Errorf("failed to create directory %v: %v", p, err) @@ -361,7 +359,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { // Use derived log file in gluster fuse mount options = append(options, "log-file="+log) - } if !hasLogLevel { From db8bc689463b379444507abd86738e7633e958c3 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Mon, 15 Apr 2019 01:34:02 +0800 Subject: [PATCH 030/209] fix shellcheck failures of hack/update-generated-kms-dockerized.sh hack/update-generated-protobuf-dockerized.sh --- hack/.shellcheck_failures | 2 -- hack/update-generated-kms-dockerized.sh | 14 ++++++++------ hack/update-generated-protobuf-dockerized.sh | 10 ++++++---- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index b5b1c14911f..24b19e8d51a 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -42,8 +42,6 @@ ./hack/make-rules/vet.sh ./hack/test-integration.sh ./hack/test-update-storage-objects.sh -./hack/update-generated-kms-dockerized.sh -./hack/update-generated-protobuf-dockerized.sh ./hack/update-generated-runtime-dockerized.sh ./hack/update-openapi-spec.sh ./hack/update-vendor.sh diff --git a/hack/update-generated-kms-dockerized.sh b/hack/update-generated-kms-dockerized.sh index 7d61e67e222..0ccae983d15 100755 --- a/hack/update-generated-kms-dockerized.sh +++ b/hack/update-generated-kms-dockerized.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBE_KMS_GRPC_ROOT="${KUBE_ROOT}/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/" source "${KUBE_ROOT}/hack/lib/init.sh" @@ -40,22 +40,24 @@ if [[ -z "$(which protoc)" || "$(protoc --version)" != "libprotoc 3."* ]]; then fi function cleanup { - rm -f ${KUBE_KMS_GRPC_ROOT}/service.pb.go.bak + rm -f "${KUBE_KMS_GRPC_ROOT}/service.pb.go.bak" + rm -f "${KUBE_KMS_GRPC_ROOT}/service.pb.go.tmp" } trap cleanup EXIT -gogopath=$(dirname $(kube::util::find-binary "protoc-gen-gogo")) +gogopath=$(dirname "$(kube::util::find-binary "protoc-gen-gogo")") PATH="${gogopath}:${PATH}" \ protoc \ --proto_path="${KUBE_KMS_GRPC_ROOT}" \ --proto_path="${KUBE_ROOT}/vendor" \ - --gogo_out=plugins=grpc:${KUBE_KMS_GRPC_ROOT} ${KUBE_KMS_GRPC_ROOT}/service.proto + --gogo_out=plugins=grpc:"${KUBE_KMS_GRPC_ROOT}" "${KUBE_KMS_GRPC_ROOT}/service.proto" # Update boilerplate for the generated file. -echo "$(cat hack/boilerplate/boilerplate.generatego.txt ${KUBE_KMS_GRPC_ROOT}/service.pb.go)" > ${KUBE_KMS_GRPC_ROOT}/service.pb.go +cat hack/boilerplate/boilerplate.generatego.txt "${KUBE_KMS_GRPC_ROOT}/service.pb.go" > "${KUBE_KMS_GRPC_ROOT}/service.pb.go.tmp" && \ +mv "${KUBE_KMS_GRPC_ROOT}/service.pb.go.tmp" "${KUBE_KMS_GRPC_ROOT}/service.pb.go" # Run gofmt to clean up the generated code. kube::golang::verify_go_version -gofmt -l -s -w ${KUBE_KMS_GRPC_ROOT}/service.pb.go +gofmt -l -s -w "${KUBE_KMS_GRPC_ROOT}/service.pb.go" diff --git a/hack/update-generated-protobuf-dockerized.sh b/hack/update-generated-protobuf-dockerized.sh index 5104967d99f..b7bc5a66376 100755 --- a/hack/update-generated-protobuf-dockerized.sh +++ b/hack/update-generated-protobuf-dockerized.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env @@ -41,7 +41,9 @@ fi gotoprotobuf=$(kube::util::find-binary "go-to-protobuf") -APIROOTS=( ${1} ) +while IFS=$'\n' read -r line; do + APIROOTS+=( "$line" ); +done <<< "${1}" shift # requires the 'proto' tag to build (will remove when ready) @@ -52,6 +54,6 @@ PATH="${KUBE_ROOT}/_output/bin:${PATH}" \ "${gotoprotobuf}" \ --proto-import="${KUBE_ROOT}/vendor" \ --proto-import="${KUBE_ROOT}/third_party/protobuf" \ - --packages=$(IFS=, ; echo "${APIROOTS[*]}") \ - --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.generatego.txt \ + --packages="$(IFS=, ; echo "${APIROOTS[*]}")" \ + --go-header-file "${KUBE_ROOT}/hack/boilerplate/boilerplate.generatego.txt" \ "$@" From 759121309f1be442b1ae8083885c5bfc3aa216e9 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Sun, 14 Apr 2019 21:01:00 -0700 Subject: [PATCH 031/209] Update to go 1.12.4 --- build/build-image/cross/Dockerfile | 2 +- build/build-image/cross/VERSION | 2 +- build/root/WORKSPACE | 6 +++--- test/images/Makefile | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/build-image/cross/Dockerfile b/build/build-image/cross/Dockerfile index b0cd8ed3fe9..f0d0cd1f482 100644 --- a/build/build-image/cross/Dockerfile +++ b/build/build-image/cross/Dockerfile @@ -15,7 +15,7 @@ # This file creates a standard build environment for building cross # platform go binary for the architecture kubernetes cares about. -FROM golang:1.12.1 +FROM golang:1.12.4 ENV GOARM 7 ENV KUBE_DYNAMIC_CROSSPLATFORMS \ diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index c167785a48d..3ff2cc2b66f 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.12.1-2 +v1.12.4-1 diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index b0a1501f6b7..5c7932817e8 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -21,8 +21,8 @@ http_archive( http_archive( name = "io_bazel_rules_go", - sha256 = "6433336b4c5feb54e2f45df4c1c84ea4385b2dc0b6f274ec2cd5d745045eae1f", - urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.2/rules_go-0.17.2.tar.gz"), + sha256 = "91b79f4758fd16f2c6426279ce00c1d2d8577d61c519db39675ed84657e1a95e", + urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.4/rules_go-0.17.4.tar.gz"), ) load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") @@ -30,7 +30,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe go_rules_dependencies() go_register_toolchains( - go_version = "1.12.1", + go_version = "1.12.4", ) http_archive( diff --git a/test/images/Makefile b/test/images/Makefile index 679352771ff..c5a1797e01f 100644 --- a/test/images/Makefile +++ b/test/images/Makefile @@ -17,7 +17,7 @@ include ../../hack/make-rules/Makefile.manifest REGISTRY ?= gcr.io/kubernetes-e2e-test-images GOARM=7 QEMUVERSION=v2.9.1 -GOLANG_VERSION=1.12.1 +GOLANG_VERSION=1.12.4 export ifndef WHAT From 7f8fc5d18931e36cb1d3dbaf479bb1d02bb01d0b Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Thu, 11 Apr 2019 16:32:50 +0300 Subject: [PATCH 032/209] kubeadm: check all available CA certs against pinned certs Currently kubeadm produces an error upon parsing multiple certificates stored in the cluster-info configmap. Yet it should check all available certificates in a scenario like CA key rotation. Check all available CA certs against pinned certificate hashes. Fixes https://github.com/kubernetes/kubeadm/issues/1399 --- cmd/kubeadm/app/discovery/token/token.go | 35 +++++++++++++------ cmd/kubeadm/app/discovery/token/token_test.go | 11 +++--- cmd/kubeadm/app/util/pubkeypin/pubkeypin.go | 16 ++++++--- .../app/util/pubkeypin/pubkeypin_test.go | 6 ++-- 4 files changed, 44 insertions(+), 24 deletions(-) diff --git a/cmd/kubeadm/app/discovery/token/token.go b/cmd/kubeadm/app/discovery/token/token.go index bd6d7237d56..89ba5569486 100644 --- a/cmd/kubeadm/app/discovery/token/token.go +++ b/cmd/kubeadm/app/discovery/token/token.go @@ -119,14 +119,14 @@ func RetrieveValidatedConfigInfo(cfg *kubeadmapi.JoinConfiguration) (*clientcmda for _, cluster := range insecureConfig.Clusters { clusterCABytes = cluster.CertificateAuthorityData } - clusterCA, err := parsePEMCert(clusterCABytes) + clusterCAs, err := parsePEMCerts(clusterCABytes) if err != nil { return nil, errors.Wrapf(err, "failed to parse cluster CA from the %s configmap", bootstrapapi.ConfigMapClusterInfo) } // Validate the cluster CA public key against the pinned set - err = pubKeyPins.Check(clusterCA) + err = pubKeyPins.CheckAny(clusterCAs) if err != nil { return nil, errors.Wrapf(err, "cluster CA found in %s configmap is invalid", bootstrapapi.ConfigMapClusterInfo) } @@ -226,14 +226,27 @@ func fetchKubeConfigWithTimeout(apiEndpoint string, discoveryTimeout time.Durati } } -// parsePEMCert decodes a PEM-formatted certificate and returns it as an x509.Certificate -func parsePEMCert(certData []byte) (*x509.Certificate, error) { - pemBlock, trailingData := pem.Decode(certData) - if pemBlock == nil { - return nil, errors.New("invalid PEM data") +// parsePEMCerts decodes PEM-formatted certificates into a slice of x509.Certificates +func parsePEMCerts(certData []byte) ([]*x509.Certificate, error) { + var certificates []*x509.Certificate + var pemBlock *pem.Block + + for { + pemBlock, certData = pem.Decode(certData) + if pemBlock == nil { + return nil, errors.New("invalid PEM data") + } + + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errors.Wrap(err, "unable to parse certificate") + } + certificates = append(certificates, cert) + + if len(certData) == 0 { + break + } } - if len(trailingData) != 0 { - return nil, errors.New("trailing data after first PEM block") - } - return x509.ParseCertificate(pemBlock.Bytes) + + return certificates, nil } diff --git a/cmd/kubeadm/app/discovery/token/token_test.go b/cmd/kubeadm/app/discovery/token/token_test.go index 8be1147e0e1..9572feabc41 100644 --- a/cmd/kubeadm/app/discovery/token/token_test.go +++ b/cmd/kubeadm/app/discovery/token/token_test.go @@ -102,23 +102,24 @@ func TestParsePEMCert(t *testing.T) { }{ {"invalid certificate data", []byte{0}, false}, {"certificate with junk appended", []byte(testCertPEM + "\nABC"), false}, - {"multiple certificates", []byte(testCertPEM + "\n" + testCertPEM), false}, + {"multiple certificates", []byte(testCertPEM + "\n" + testCertPEM), true}, {"valid", []byte(testCertPEM), true}, + {"empty input", []byte{}, false}, } { - cert, err := parsePEMCert(testCase.input) + certs, err := parsePEMCerts(testCase.input) if testCase.expectValid { if err != nil { t.Errorf("failed TestParsePEMCert(%s): unexpected error %v", testCase.name, err) } - if cert == nil { + if certs == nil { t.Errorf("failed TestParsePEMCert(%s): returned nil", testCase.name) } } else { if err == nil { t.Errorf("failed TestParsePEMCert(%s): expected an error", testCase.name) } - if cert != nil { - t.Errorf("failed TestParsePEMCert(%s): expected not to get a certificate back, but got one", testCase.name) + if certs != nil { + t.Errorf("failed TestParsePEMCert(%s): expected not to get a certificate back, but got some", testCase.name) } } } diff --git a/cmd/kubeadm/app/util/pubkeypin/pubkeypin.go b/cmd/kubeadm/app/util/pubkeypin/pubkeypin.go index 16f74dee329..fb157160d6a 100644 --- a/cmd/kubeadm/app/util/pubkeypin/pubkeypin.go +++ b/cmd/kubeadm/app/util/pubkeypin/pubkeypin.go @@ -61,12 +61,18 @@ func (s *Set) Allow(pubKeyHashes ...string) error { return nil } -// Check if a certificate matches one of the public keys in the set -func (s *Set) Check(certificate *x509.Certificate) error { - if s.checkSHA256(certificate) { - return nil +// CheckAny checks if at least one certificate matches one of the public keys in the set +func (s *Set) CheckAny(certificates []*x509.Certificate) error { + var hashes []string + + for _, certificate := range certificates { + if s.checkSHA256(certificate) { + return nil + } + + hashes = append(hashes, Hash(certificate)) } - return errors.Errorf("public key %s not pinned", Hash(certificate)) + return errors.Errorf("none of the public keys %q are pinned", strings.Join(hashes, ":")) } // Empty returns true if the Set contains no pinned public keys. diff --git a/cmd/kubeadm/app/util/pubkeypin/pubkeypin_test.go b/cmd/kubeadm/app/util/pubkeypin/pubkeypin_test.go index 4e578a4bdb9..a5a0d57a45f 100644 --- a/cmd/kubeadm/app/util/pubkeypin/pubkeypin_test.go +++ b/cmd/kubeadm/app/util/pubkeypin/pubkeypin_test.go @@ -121,7 +121,7 @@ func TestSet(t *testing.T) { return } - err = s.Check(testCert(t, testCertPEM)) + err = s.CheckAny([]*x509.Certificate{testCert(t, testCertPEM)}) if err == nil { t.Error("expected test cert to not be allowed (yet)") return @@ -133,13 +133,13 @@ func TestSet(t *testing.T) { return } - err = s.Check(testCert(t, testCertPEM)) + err = s.CheckAny([]*x509.Certificate{testCert(t, testCertPEM)}) if err != nil { t.Errorf("expected test cert to be allowed, but got back: %v", err) return } - err = s.Check(testCert(t, testCert2PEM)) + err = s.CheckAny([]*x509.Certificate{testCert(t, testCert2PEM)}) if err == nil { t.Error("expected the second test cert to be disallowed") return From 62b4bcd1561a818a5b528a37fdb31ec66bbc5a76 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Sun, 14 Apr 2019 15:10:16 -0400 Subject: [PATCH 033/209] Update client-go module install instructions --- staging/src/k8s.io/client-go/INSTALL.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/client-go/INSTALL.md b/staging/src/k8s.io/client-go/INSTALL.md index db97efc515b..aba63c873c3 100644 --- a/staging/src/k8s.io/client-go/INSTALL.md +++ b/staging/src/k8s.io/client-go/INSTALL.md @@ -65,7 +65,7 @@ for each follows. ### Go modules Dependency management tools are built into go 1.11+ in the form of [go modules](https://github.com/golang/go/wiki/Modules). -These are used by the main Kubernetes repo (>= 1.15) and `client-go` (>= v12.0) to manage dependencies. +These are used by the main Kubernetes repo (>= 1.15) and `client-go` (on master, and v12.0.0+ once released) to manage dependencies. When using `client-go` v12.0.0+ and go 1.11.4+, go modules are the recommended dependency management tool. If you are using go 1.11 or 1.12 and are working with a project located within `$GOPATH`, @@ -83,10 +83,10 @@ go mod init ``` Indicate which version of `client-go` your project requires. -For `client-go` 12.0.0+, this is a single step: +For `client-go` on master (and once version v12.0.0 is released), this is a single step: ```sh -go get k8s.io/client-go@v12.0.0 # replace v12.0.0 with the required version +go get k8s.io/client-go@master # or v12.0.0+ once released ``` For `client-go` prior to v12.0.0, you also need to indicate the required versions of `k8s.io/api` and `k8s.io/apimachinery`: From 6c70ca61bee5ffc2e817c186ea3df9bd2205d768 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 14 Apr 2019 03:09:06 +0000 Subject: [PATCH 034/209] move disk lock process to azure cloud provider fix comments fix import keymux check error add unit test for attach/detach disk funcs --- .../providers/.import-restrictions | 3 +- pkg/cloudprovider/providers/azure/BUILD | 2 + .../azure/azure_controller_common.go | 39 ++++++++++- .../azure/azure_controller_common_test.go | 66 +++++++++++++++++++ pkg/volume/azure_dd/BUILD | 1 - pkg/volume/azure_dd/attacher.go | 40 ++--------- pkg/volume/azure_dd/azure_dd.go | 2 +- 7 files changed, 113 insertions(+), 40 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_controller_common_test.go diff --git a/pkg/cloudprovider/providers/.import-restrictions b/pkg/cloudprovider/providers/.import-restrictions index 80b05fdd205..8371496c69d 100644 --- a/pkg/cloudprovider/providers/.import-restrictions +++ b/pkg/cloudprovider/providers/.import-restrictions @@ -8,7 +8,8 @@ "k8s.io/utils/io", "k8s.io/utils/strings", "k8s.io/utils/exec", - "k8s.io/utils/path" + "k8s.io/utils/path", + "k8s.io/utils/keymutex" ] }, { diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index bd97289989d..bbe2482526c 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -69,6 +69,7 @@ go_library( "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/keymutex:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) @@ -78,6 +79,7 @@ go_test( srcs = [ "azure_backoff_test.go", "azure_cache_test.go", + "azure_controller_common_test.go", "azure_instances_test.go", "azure_loadbalancer_test.go", "azure_metrics_test.go", diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common.go b/pkg/cloudprovider/providers/azure/azure_controller_common.go index 1b5148d3d51..b4b2e532894 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "context" "fmt" "time" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" kwait "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" + "k8s.io/utils/keymutex" ) const ( @@ -50,6 +52,9 @@ var defaultBackOff = kwait.Backoff{ Jitter: 0.0, } +// acquire lock to attach/detach disk in one node +var diskOpMutex = keymutex.NewHashed(0) + type controllerCommon struct { subscriptionID string location string @@ -85,13 +90,29 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) return ss, nil } -// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. -func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { +// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI. +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) error { vmset, err := c.getNodeVMSet(nodeName) if err != nil { return err } + instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) + if err != nil { + klog.Warningf("failed to get azure instance id (%v)", err) + return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) + } + + diskOpMutex.LockKey(instanceid) + defer diskOpMutex.UnlockKey(instanceid) + + lun, err := c.GetNextDiskLun(nodeName) + if err != nil { + klog.Warningf("no LUN available for instance %q (%v)", nodeName, err) + return fmt.Errorf("all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)", diskName, diskURI, instanceid, err) + } + + klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName) return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) } @@ -102,11 +123,25 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N return err } + instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) + if err != nil { + klog.Warningf("failed to get azure instance id (%v)", err) + return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) + } + + klog.V(2).Infof("detach %v from node %q", diskURI, nodeName) + + // make the lock here as small as possible + diskOpMutex.LockKey(instanceid) resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + diskOpMutex.UnlockKey(instanceid) + if c.cloud.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) retryErr := kwait.ExponentialBackoff(c.cloud.requestBackoff(), func() (bool, error) { + diskOpMutex.LockKey(instanceid) resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + diskOpMutex.UnlockKey(instanceid) return c.cloud.processHTTPRetryResponse(nil, "", resp, err) }) if retryErr != nil { diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common_test.go b/pkg/cloudprovider/providers/azure/azure_controller_common_test.go new file mode 100644 index 00000000000..e4b30d41cfa --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_controller_common_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" +) + +func TestAttachDisk(t *testing.T) { + c := getTestCloud() + + common := &controllerCommon{ + location: c.Location, + storageEndpointSuffix: c.Environment.StorageEndpointSuffix, + resourceGroup: c.ResourceGroup, + subscriptionID: c.SubscriptionID, + cloud: c, + } + + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup) + + err := common.AttachDisk(true, "", diskURI, "node1", compute.CachingTypesReadOnly) + if err != nil { + fmt.Printf("TestAttachDisk return expected error: %v", err) + } else { + t.Errorf("TestAttachDisk unexpected nil err") + } +} + +func TestDetachDisk(t *testing.T) { + c := getTestCloud() + + common := &controllerCommon{ + location: c.Location, + storageEndpointSuffix: c.Environment.StorageEndpointSuffix, + resourceGroup: c.ResourceGroup, + subscriptionID: c.SubscriptionID, + cloud: c, + } + + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup) + + err := common.DetachDisk("", diskURI, "node1") + if err != nil { + fmt.Printf("TestAttachDisk return expected error: %v", err) + } else { + t.Errorf("TestAttachDisk unexpected nil err") + } +} diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index d45498cd240..a894b6ee53a 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -40,7 +40,6 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/utils/keymutex:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", ], ) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index b9453f70154..f7486e1e1d1 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -17,7 +17,6 @@ limitations under the License. package azure_dd import ( - "context" "fmt" "os" "path/filepath" @@ -36,7 +35,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/utils/keymutex" ) type azureDiskDetacher struct { @@ -55,9 +53,6 @@ var _ volume.Detacher = &azureDiskDetacher{} var _ volume.DeviceMounter = &azureDiskAttacher{} var _ volume.DeviceUnmounter = &azureDiskDetacher{} -// acquire lock to get an lun number -var getLunMutex = keymutex.NewHashed(0) - // Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, _, err := getVolumeSource(spec) @@ -66,12 +61,6 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( return "", err } - instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName) - if err != nil { - klog.Warningf("failed to get azure instance id (%v)", err) - return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) - } - diskController, err := getDiskController(a.plugin.host) if err != nil { return "", err @@ -82,30 +71,22 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( // Log error and continue with attach klog.Warningf( "Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v", - instanceid, err) + nodeName, err) } if err == nil { // Volume is already attached to node. - klog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun) + klog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, nodeName, lun) } else { klog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) - getLunMutex.LockKey(instanceid) - defer getLunMutex.UnlockKey(instanceid) - lun, err = diskController.GetNextDiskLun(nodeName) - if err != nil { - klog.Warningf("no LUN available for instance %q (%v)", nodeName, err) - return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err) - } - klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) - err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) + err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { klog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { - klog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) - return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err) + klog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, nodeName, err) + return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, nodeName, err) } } @@ -285,22 +266,11 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro return fmt.Errorf("invalid disk to detach: %q", diskURI) } - instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName) - if err != nil { - klog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) - return nil - } - - klog.V(2).Infof("detach %v from node %q", diskURI, nodeName) - diskController, err := getDiskController(d.plugin.host) if err != nil { return err } - getLunMutex.LockKey(instanceid) - defer getLunMutex.UnlockKey(instanceid) - err = diskController.DetachDisk("", diskURI, nodeName) if err != nil { klog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 04e31a661ad..811e04106aa 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -44,7 +44,7 @@ type DiskController interface { DeleteManagedDisk(diskURI string) error // Attaches the disk to the host machine. - AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. DetachDisk(diskName, diskUri string, nodeName types.NodeName) error From f40f767d94cb3ddc841075ec8f72f154ffe8ca45 Mon Sep 17 00:00:00 2001 From: George Angel Date: Tue, 16 Apr 2019 09:43:53 +0100 Subject: [PATCH 035/209] update k8s.gcr.io/k8s-dns-node-cache image version v1.15.0 is affected by https://github.com/kubernetes/dns/issues/282 --- cluster/addons/dns/nodelocaldns/nodelocaldns.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml b/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml index 1ddd9b4e03f..9ed27376559 100644 --- a/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml +++ b/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml @@ -111,7 +111,7 @@ spec: operator: "Exists" containers: - name: node-cache - image: k8s.gcr.io/k8s-dns-node-cache:1.15.0 + image: k8s.gcr.io/k8s-dns-node-cache:1.15.1 resources: limits: memory: 30Mi From 56b19637052d58b57844f5dd579de3923f1c7d6a Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 16 Apr 2019 10:28:44 -0400 Subject: [PATCH 036/209] github.com/onsi/ginkgo v1.6.0 --- Godeps/LICENSES | 102 ++++ go.mod | 2 +- go.sum | 7 +- staging/src/k8s.io/api/go.mod | 1 - staging/src/k8s.io/api/go.sum | 2 +- .../src/k8s.io/apiextensions-apiserver/go.mod | 1 - .../src/k8s.io/apiextensions-apiserver/go.sum | 8 +- staging/src/k8s.io/apimachinery/go.mod | 1 - staging/src/k8s.io/apimachinery/go.sum | 8 +- staging/src/k8s.io/apiserver/go.mod | 1 - staging/src/k8s.io/apiserver/go.sum | 8 +- staging/src/k8s.io/cli-runtime/go.mod | 1 - staging/src/k8s.io/cli-runtime/go.sum | 8 +- staging/src/k8s.io/client-go/go.mod | 1 - staging/src/k8s.io/client-go/go.sum | 8 +- staging/src/k8s.io/cloud-provider/go.mod | 1 - staging/src/k8s.io/cloud-provider/go.sum | 8 +- staging/src/k8s.io/cluster-bootstrap/go.mod | 1 - staging/src/k8s.io/cluster-bootstrap/go.sum | 2 +- staging/src/k8s.io/component-base/go.mod | 1 - staging/src/k8s.io/component-base/go.sum | 2 +- staging/src/k8s.io/csi-translation-lib/go.mod | 1 - staging/src/k8s.io/csi-translation-lib/go.sum | 2 +- staging/src/k8s.io/kube-aggregator/go.mod | 1 - staging/src/k8s.io/kube-aggregator/go.sum | 8 +- .../src/k8s.io/kube-controller-manager/go.mod | 1 - .../src/k8s.io/kube-controller-manager/go.sum | 2 +- staging/src/k8s.io/kube-proxy/go.mod | 1 - staging/src/k8s.io/kube-proxy/go.sum | 2 +- staging/src/k8s.io/kube-scheduler/go.mod | 1 - staging/src/k8s.io/kube-scheduler/go.sum | 2 +- staging/src/k8s.io/kubelet/go.mod | 1 - staging/src/k8s.io/kubelet/go.sum | 2 +- staging/src/k8s.io/metrics/go.mod | 1 - staging/src/k8s.io/metrics/go.sum | 8 +- staging/src/k8s.io/node-api/go.mod | 1 - staging/src/k8s.io/node-api/go.sum | 8 +- staging/src/k8s.io/sample-apiserver/go.mod | 1 - staging/src/k8s.io/sample-apiserver/go.sum | 8 +- staging/src/k8s.io/sample-cli-plugin/go.mod | 1 - staging/src/k8s.io/sample-cli-plugin/go.sum | 8 +- staging/src/k8s.io/sample-controller/go.mod | 1 - staging/src/k8s.io/sample-controller/go.sum | 8 +- vendor/BUILD | 3 + vendor/github.com/hpcloud/tail/.gitignore | 3 + vendor/github.com/hpcloud/tail/.travis.yml | 18 + vendor/github.com/hpcloud/tail/BUILD | 44 ++ vendor/github.com/hpcloud/tail/CHANGES.md | 63 ++ vendor/github.com/hpcloud/tail/Dockerfile | 19 + vendor/github.com/hpcloud/tail/LICENSE.txt | 21 + vendor/github.com/hpcloud/tail/Makefile | 11 + vendor/github.com/hpcloud/tail/README.md | 28 + vendor/github.com/hpcloud/tail/appveyor.yml | 11 + .../github.com/hpcloud/tail/ratelimiter/BUILD | 27 + .../hpcloud/tail/ratelimiter/Licence | 7 + .../hpcloud/tail/ratelimiter/leakybucket.go | 97 +++ .../hpcloud/tail/ratelimiter/memory.go | 58 ++ .../hpcloud/tail/ratelimiter/storage.go | 6 + vendor/github.com/hpcloud/tail/tail.go | 438 ++++++++++++++ vendor/github.com/hpcloud/tail/tail_posix.go | 11 + .../github.com/hpcloud/tail/tail_windows.go | 12 + vendor/github.com/hpcloud/tail/util/BUILD | 23 + vendor/github.com/hpcloud/tail/util/util.go | 48 ++ vendor/github.com/hpcloud/tail/watch/BUILD | 34 ++ .../hpcloud/tail/watch/filechanges.go | 36 ++ .../github.com/hpcloud/tail/watch/inotify.go | 128 ++++ .../hpcloud/tail/watch/inotify_tracker.go | 260 ++++++++ .../github.com/hpcloud/tail/watch/polling.go | 118 ++++ vendor/github.com/hpcloud/tail/watch/watch.go | 20 + vendor/github.com/hpcloud/tail/winfile/BUILD | 23 + .../hpcloud/tail/winfile/winfile.go | 92 +++ vendor/github.com/onsi/ginkgo/.gitignore | 5 +- vendor/github.com/onsi/ginkgo/.travis.yml | 10 +- vendor/github.com/onsi/ginkgo/BUILD | 1 + vendor/github.com/onsi/ginkgo/CHANGELOG.md | 63 +- vendor/github.com/onsi/ginkgo/CONTRIBUTING.md | 33 ++ vendor/github.com/onsi/ginkgo/README.md | 36 +- vendor/github.com/onsi/ginkgo/RELEASING.md | 14 + .../github.com/onsi/ginkgo/config/config.go | 17 +- .../onsi/ginkgo/ginkgo/bootstrap_command.go | 8 +- .../onsi/ginkgo/ginkgo/build_command.go | 2 +- .../onsi/ginkgo/ginkgo/convert_command.go | 3 +- .../onsi/ginkgo/ginkgo/generate_command.go | 12 +- vendor/github.com/onsi/ginkgo/ginkgo/main.go | 4 + .../onsi/ginkgo/ginkgo/nodot_command.go | 3 +- .../onsi/ginkgo/ginkgo/run_command.go | 89 ++- .../run_watch_and_build_command_flags.go | 9 +- .../onsi/ginkgo/ginkgo/testrunner/BUILD | 1 + .../ginkgo/ginkgo/testrunner/test_runner.go | 156 +++-- .../ginkgo/ginkgo/testsuite/test_suite.go | 8 +- .../onsi/ginkgo/ginkgo/unfocus_command.go | 31 +- .../onsi/ginkgo/ginkgo/version_command.go | 1 + .../onsi/ginkgo/ginkgo/watch/delta_tracker.go | 8 +- .../onsi/ginkgo/ginkgo/watch/package_hash.go | 15 +- .../ginkgo/ginkgo/watch/package_hashes.go | 7 +- .../onsi/ginkgo/ginkgo/watch_command.go | 5 +- vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 68 ++- .../ginkgo/internal/leafnodes/benchmarker.go | 4 +- .../onsi/ginkgo/internal/leafnodes/it_node.go | 3 +- .../ginkgo/internal/leafnodes/measure_node.go | 3 +- .../onsi/ginkgo/internal/leafnodes/runner.go | 8 +- .../ginkgo/internal/leafnodes/setup_nodes.go | 3 +- .../ginkgo/internal/leafnodes/suite_nodes.go | 3 +- .../synchronized_after_suite_node.go | 5 +- .../synchronized_before_suite_node.go | 7 +- .../onsi/ginkgo/internal/remote/BUILD | 20 + .../onsi/ginkgo/internal/remote/aggregator.go | 2 +- .../internal/remote/forwarding_reporter.go | 61 +- .../internal/remote/output_interceptor.go | 3 + .../remote/output_interceptor_unix.go | 28 + .../internal/remote/output_interceptor_win.go | 3 + .../onsi/ginkgo/internal/remote/server.go | 2 +- .../remote/syscall_dup_linux_arm64.go | 2 +- .../internal/remote/syscall_dup_solaris.go | 2 +- .../internal/remote/syscall_dup_unix.go | 2 +- .../onsi/ginkgo/internal/spec/spec.go | 71 ++- .../spec_iterator/parallel_spec_iterator.go | 3 +- .../ginkgo/internal/specrunner/spec_runner.go | 7 +- .../onsi/ginkgo/internal/suite/suite.go | 10 +- .../onsi/ginkgo/internal/writer/writer.go | 16 +- .../onsi/ginkgo/reporters/default_reporter.go | 2 +- .../onsi/ginkgo/reporters/junit_reporter.go | 7 +- .../onsi/ginkgo/reporters/stenographer/BUILD | 5 +- .../reporters/stenographer/stenographer.go | 7 +- .../ginkgo/reporters/teamcity_reporter.go | 5 +- vendor/gopkg.in/fsnotify.v1/.editorconfig | 5 + vendor/gopkg.in/fsnotify.v1/.gitignore | 6 + vendor/gopkg.in/fsnotify.v1/.travis.yml | 30 + vendor/gopkg.in/fsnotify.v1/AUTHORS | 52 ++ vendor/gopkg.in/fsnotify.v1/BUILD | 53 ++ vendor/gopkg.in/fsnotify.v1/CHANGELOG.md | 317 ++++++++++ vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 +++ vendor/gopkg.in/fsnotify.v1/LICENSE | 28 + vendor/gopkg.in/fsnotify.v1/README.md | 79 +++ vendor/gopkg.in/fsnotify.v1/fen.go | 37 ++ vendor/gopkg.in/fsnotify.v1/fsnotify.go | 66 +++ vendor/gopkg.in/fsnotify.v1/inotify.go | 337 +++++++++++ vendor/gopkg.in/fsnotify.v1/inotify_poller.go | 187 ++++++ vendor/gopkg.in/fsnotify.v1/kqueue.go | 521 ++++++++++++++++ vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 + .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 + vendor/gopkg.in/fsnotify.v1/windows.go | 561 ++++++++++++++++++ vendor/gopkg.in/tomb.v1/BUILD | 23 + vendor/gopkg.in/tomb.v1/LICENSE | 29 + vendor/gopkg.in/tomb.v1/README.md | 4 + vendor/gopkg.in/tomb.v1/tomb.go | 176 ++++++ vendor/modules.txt | 12 +- 147 files changed, 5178 insertions(+), 250 deletions(-) create mode 100644 vendor/github.com/hpcloud/tail/.gitignore create mode 100644 vendor/github.com/hpcloud/tail/.travis.yml create mode 100644 vendor/github.com/hpcloud/tail/BUILD create mode 100644 vendor/github.com/hpcloud/tail/CHANGES.md create mode 100644 vendor/github.com/hpcloud/tail/Dockerfile create mode 100644 vendor/github.com/hpcloud/tail/LICENSE.txt create mode 100644 vendor/github.com/hpcloud/tail/Makefile create mode 100644 vendor/github.com/hpcloud/tail/README.md create mode 100644 vendor/github.com/hpcloud/tail/appveyor.yml create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/BUILD create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/Licence create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/memory.go create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/storage.go create mode 100644 vendor/github.com/hpcloud/tail/tail.go create mode 100644 vendor/github.com/hpcloud/tail/tail_posix.go create mode 100644 vendor/github.com/hpcloud/tail/tail_windows.go create mode 100644 vendor/github.com/hpcloud/tail/util/BUILD create mode 100644 vendor/github.com/hpcloud/tail/util/util.go create mode 100644 vendor/github.com/hpcloud/tail/watch/BUILD create mode 100644 vendor/github.com/hpcloud/tail/watch/filechanges.go create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify.go create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify_tracker.go create mode 100644 vendor/github.com/hpcloud/tail/watch/polling.go create mode 100644 vendor/github.com/hpcloud/tail/watch/watch.go create mode 100644 vendor/github.com/hpcloud/tail/winfile/BUILD create mode 100644 vendor/github.com/hpcloud/tail/winfile/winfile.go create mode 100644 vendor/github.com/onsi/ginkgo/CONTRIBUTING.md create mode 100644 vendor/github.com/onsi/ginkgo/RELEASING.md create mode 100644 vendor/gopkg.in/fsnotify.v1/.editorconfig create mode 100644 vendor/gopkg.in/fsnotify.v1/.gitignore create mode 100644 vendor/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 vendor/gopkg.in/fsnotify.v1/AUTHORS create mode 100644 vendor/gopkg.in/fsnotify.v1/BUILD create mode 100644 vendor/gopkg.in/fsnotify.v1/CHANGELOG.md create mode 100644 vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/fsnotify.v1/LICENSE create mode 100644 vendor/gopkg.in/fsnotify.v1/README.md create mode 100644 vendor/gopkg.in/fsnotify.v1/fen.go create mode 100644 vendor/gopkg.in/fsnotify.v1/fsnotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify_poller.go create mode 100644 vendor/gopkg.in/fsnotify.v1/kqueue.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go create mode 100644 vendor/gopkg.in/fsnotify.v1/windows.go create mode 100644 vendor/gopkg.in/tomb.v1/BUILD create mode 100644 vendor/gopkg.in/tomb.v1/LICENSE create mode 100644 vendor/gopkg.in/tomb.v1/README.md create mode 100644 vendor/gopkg.in/tomb.v1/tomb.go diff --git a/Godeps/LICENSES b/Godeps/LICENSES index ba5c5553d6c..83283b10f9a 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -11507,6 +11507,35 @@ under a dual license of LGPLv3+ or GPLv2. ================================================================================ +================================================================================ += vendor/github.com/hpcloud/tail licensed under: = + +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/hpcloud/tail/LICENSE.txt 0bdce43b16cd5c587124d6f274632c87 +================================================================================ + + ================================================================================ = vendor/github.com/imdario/mergo licensed under: = @@ -19918,6 +19947,42 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/gopkg.in/fsnotify.v1 licensed under: = + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/gopkg.in/fsnotify.v1/LICENSE c38914c9a7ab03bb2b96d4baaee10769 +================================================================================ + + ================================================================================ = vendor/gopkg.in/gcfg.v1 licensed under: = @@ -20228,6 +20293,43 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/gopkg.in/tomb.v1 licensed under: = + +tomb - support for clean goroutine termination in Go. + +Copyright (c) 2010-2011 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/gopkg.in/tomb.v1/LICENSE 95d4102f39f26da9b66fee5d05ac597b +================================================================================ + + ================================================================================ = vendor/gopkg.in/warnings.v0 licensed under: = diff --git a/go.mod b/go.mod index 24f4f30ca54..dd20ed7f9bb 100644 --- a/go.mod +++ b/go.mod @@ -350,7 +350,7 @@ replace ( github.com/mvdan/xurls => github.com/mvdan/xurls v0.0.0-20160110113200-1b768d7c393a github.com/mxk/go-flowrate => github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/natefinch/lumberjack => github.com/natefinch/lumberjack v2.0.0+incompatible - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe + github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.6.0 github.com/onsi/gomega => github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v0.0.0-20170604055404-372ad780f634 diff --git a/go.sum b/go.sum index f434ceb7eef..ba3a1420764 100644 --- a/go.sum +++ b/go.sum @@ -215,6 +215,7 @@ github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 h1:oJ/NLadJn5HoxvonA6 github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64 h1:dk3GEa55HcRVIyCeNQmwwwH3kIXnqJPNseKOkDD+7uQ= github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -294,8 +295,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 h1:Yu3681ykYHDfLoI6XVjL4JWmkE+3TX9yfIWwRCh1kFM= @@ -445,6 +446,7 @@ google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -454,6 +456,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7 h1:986b60BAz gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84 h1:ELQJ5WuT+ydETLCpWvAuw8iGBQRGoJq+A3RAbbAcZUY= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod index c65fab83a8f..4a9732f5df7 100644 --- a/staging/src/k8s.io/api/go.mod +++ b/staging/src/k8s.io/api/go.mod @@ -11,7 +11,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index 14039ca2767..d27d9409e3e 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -22,7 +22,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index 65a4ee25a7d..22711f8a4f1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -38,7 +38,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 12ce371c471..57b2d5773b9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -37,6 +37,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -91,6 +92,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.3.0 h1:HJtP6RRwj2EpPCD/mhAWzSvLL/dFTdP github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -117,8 +119,8 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -199,12 +201,14 @@ google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7 h1:986b60BAz5vO2Vaf48yQaq+wb2bU4JsXxKu1+itW6x8= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index 59324225494..bc774897197 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -36,7 +36,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index daa74554a1f..0c791855a14 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -6,6 +6,7 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1 github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -21,6 +22,7 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1 github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -30,8 +32,8 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -51,9 +53,11 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5f golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index 41ea6f61991..bfeb6245432 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -74,7 +74,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 0defad7ac8e..5f6c802e820 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -36,6 +36,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -77,6 +78,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.3.0 h1:HJtP6RRwj2EpPCD/mhAWzSvLL/dFTdP github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -99,8 +101,8 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -170,6 +172,7 @@ google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -177,6 +180,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7 h1:986b60BAz gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84 h1:ELQJ5WuT+ydETLCpWvAuw8iGBQRGoJq+A3RAbbAcZUY= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index b7135168966..445d1d2ffae 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -27,7 +27,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index 4ab1f3cd42b..3f5bb0ea74e 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -13,6 +13,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -40,6 +41,7 @@ github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1: github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -54,8 +56,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -89,9 +91,11 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index de79431c9d1..252d62d8bbb 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -35,7 +35,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 5c370e1d757..9c8b749ca8b 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -12,6 +12,7 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1 github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -32,6 +33,7 @@ github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHpe github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -42,8 +44,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -73,9 +75,11 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index 9d1cc85fac7..5be82242375 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -14,7 +14,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index 09b1afc5b94..33336a1244d 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -20,6 +20,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= @@ -48,6 +49,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/ github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -63,8 +65,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -114,11 +116,13 @@ google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoA google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod index 6386920a660..103b2d2fc3f 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.mod +++ b/staging/src/k8s.io/cluster-bootstrap/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 6a6a436650f..c5c617c5e23 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -21,7 +21,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index 9f198cf3fc4..a8c11392756 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -12,7 +12,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index f31cd720ef7..2b27de2a7a1 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -21,7 +21,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod index 2f1a504dcb2..2ad5aeecc70 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.mod +++ b/staging/src/k8s.io/csi-translation-lib/go.mod @@ -11,7 +11,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index ca521ae6911..5999f63d724 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -59,7 +59,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index 02dacea5f75..af721de73ce 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -27,7 +27,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index bf62b76ff83..e2f00b2af0b 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -36,6 +36,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -76,6 +77,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.3.0 h1:HJtP6RRwj2EpPCD/mhAWzSvLL/dFTdP github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -101,8 +103,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -183,12 +185,14 @@ google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7 h1:986b60BAz5vO2Vaf48yQaq+wb2bU4JsXxKu1+itW6x8= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index 6730677a509..2944d438fd2 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index f31cd720ef7..2b27de2a7a1 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -21,7 +21,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index e44e1f20ec5..21a3eb63dcd 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index f31cd720ef7..2b27de2a7a1 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -21,7 +21,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index 86853a88b88..3b6147dc474 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index f31cd720ef7..2b27de2a7a1 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -21,7 +21,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod index d9a5c936bfb..5dcca92a64b 100644 --- a/staging/src/k8s.io/kubelet/go.mod +++ b/staging/src/k8s.io/kubelet/go.mod @@ -10,7 +10,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 6a6a436650f..c5c617c5e23 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -21,7 +21,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod index 3f12ff4cf86..10af589e042 100644 --- a/staging/src/k8s.io/metrics/go.mod +++ b/staging/src/k8s.io/metrics/go.mod @@ -14,7 +14,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index f549cfbb46b..c8a03c7f23a 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -8,6 +8,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -23,6 +24,7 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE= @@ -32,8 +34,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -72,9 +74,11 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/staging/src/k8s.io/node-api/go.mod b/staging/src/k8s.io/node-api/go.mod index 7e8457e571a..d25eafc3d30 100644 --- a/staging/src/k8s.io/node-api/go.mod +++ b/staging/src/k8s.io/node-api/go.mod @@ -11,7 +11,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/node-api/go.sum b/staging/src/k8s.io/node-api/go.sum index f5bcbb9b0b7..9a0445d4918 100644 --- a/staging/src/k8s.io/node-api/go.sum +++ b/staging/src/k8s.io/node-api/go.sum @@ -8,6 +8,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -25,6 +26,7 @@ github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1: github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE= @@ -34,8 +36,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -74,9 +76,11 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index b6e853b6f58..e1bce6bde67 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -17,7 +17,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index e7df898e7b0..ac75fa431a7 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -34,6 +34,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -74,6 +75,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.3.0 h1:HJtP6RRwj2EpPCD/mhAWzSvLL/dFTdP github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -98,8 +100,8 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -180,12 +182,14 @@ google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7 h1:986b60BAz5vO2Vaf48yQaq+wb2bU4JsXxKu1+itW6x8= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index 12f4fc41568..9062b580ff1 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -12,7 +12,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index 4ab1f3cd42b..3f5bb0ea74e 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -13,6 +13,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -40,6 +41,7 @@ github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1: github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -54,8 +56,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -89,9 +91,11 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index ecd8f00d557..2a09dbc1ec3 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -13,7 +13,6 @@ require ( ) replace ( - github.com/onsi/ginkgo => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 k8s.io/api => ../api diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index 38b27fab3ea..d2ebb18ecd2 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -8,6 +8,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -25,6 +26,7 @@ github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1: github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -35,8 +37,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe h1:QAfc0vzfRhbwvVHr25DYiE53mBfvVWgE2gxyzhqor08= -github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -75,9 +77,11 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/BUILD b/vendor/BUILD index 8336f49451c..2c12f902860 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -267,6 +267,7 @@ filegroup( "//vendor/github.com/heketi/heketi/client/api/go-client:all-srcs", "//vendor/github.com/heketi/heketi/pkg/glusterfs/api:all-srcs", "//vendor/github.com/heketi/heketi/pkg/utils:all-srcs", + "//vendor/github.com/hpcloud/tail:all-srcs", "//vendor/github.com/imdario/mergo:all-srcs", "//vendor/github.com/inconshreveable/mousetrap:all-srcs", "//vendor/github.com/jmespath/go-jmespath:all-srcs", @@ -437,10 +438,12 @@ filegroup( "//vendor/google.golang.org/appengine:all-srcs", "//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs", "//vendor/google.golang.org/grpc:all-srcs", + "//vendor/gopkg.in/fsnotify.v1:all-srcs", "//vendor/gopkg.in/gcfg.v1:all-srcs", "//vendor/gopkg.in/inf.v0:all-srcs", "//vendor/gopkg.in/natefinch/lumberjack.v2:all-srcs", "//vendor/gopkg.in/square/go-jose.v2:all-srcs", + "//vendor/gopkg.in/tomb.v1:all-srcs", "//vendor/gopkg.in/warnings.v0:all-srcs", "//vendor/gopkg.in/yaml.v2:all-srcs", "//vendor/k8s.io/gengo/args:all-srcs", diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore new file mode 100644 index 00000000000..6d9953c3c6a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.gitignore @@ -0,0 +1,3 @@ +.test +.go + diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml new file mode 100644 index 00000000000..9cf8bb7fc5f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.travis.yml @@ -0,0 +1,18 @@ +language: go + +script: + - go test -race -v ./... + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/fsnotify.v1 + - go get gopkg.in/tomb.v1 diff --git a/vendor/github.com/hpcloud/tail/BUILD b/vendor/github.com/hpcloud/tail/BUILD new file mode 100644 index 00000000000..b6bddba86b6 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/BUILD @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "tail.go", + "tail_posix.go", + "tail_windows.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail", + importpath = "github.com/hpcloud/tail", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/hpcloud/tail/ratelimiter:go_default_library", + "//vendor/github.com/hpcloud/tail/util:go_default_library", + "//vendor/github.com/hpcloud/tail/watch:go_default_library", + "//vendor/gopkg.in/tomb.v1:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/hpcloud/tail/winfile:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/hpcloud/tail/ratelimiter:all-srcs", + "//vendor/github.com/hpcloud/tail/util:all-srcs", + "//vendor/github.com/hpcloud/tail/watch:all-srcs", + "//vendor/github.com/hpcloud/tail/winfile:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md new file mode 100644 index 00000000000..422790c073d --- /dev/null +++ b/vendor/github.com/hpcloud/tail/CHANGES.md @@ -0,0 +1,63 @@ +# API v1 (gopkg.in/hpcloud/tail.v1) + +## April, 2016 + +* Migrated to godep, as depman is not longer supported +* Introduced golang vendoring feature +* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file + +## July, 2015 + +* Fix inotify watcher leak; remove `Cleanup` (#51) + +# API v0 (gopkg.in/hpcloud/tail.v0) + +## June, 2015 + +* Don't return partial lines (PR #40) +* Use stable version of fsnotify (#46) + +## July, 2014 + +* Fix tail for Windows (PR #36) + +## May, 2014 + +* Improved rate limiting using leaky bucket (PR #29) +* Fix odd line splitting (PR #30) + +## Apr, 2014 + +* LimitRate now discards read buffer (PR #28) +* allow reading of longer lines if MaxLineSize is unset (PR #24) +* updated deps.json to latest fsnotify (441bbc86b1) + +## Feb, 2014 + +* added `Config.Logger` to suppress library logging + +## Nov, 2013 + +* add Cleanup to remove leaky inotify watches (PR #20) + +## Aug, 2013 + +* redesigned Location field (PR #12) +* add tail.Tell (PR #14) + +## July, 2013 + +* Rate limiting (PR #10) + +## May, 2013 + +* Detect file deletions/renames in polling file watcher (PR #1) +* Detect file truncation +* Fix potential race condition when reopening the file (issue 5) +* Fix potential blocking of `tail.Stop` (issue 4) +* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop +* Support Follow=false + +## Feb, 2013 + +* Initial open source release diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile new file mode 100644 index 00000000000..cd297b940a9 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/ +ADD . $GOPATH/src/github.com/hpcloud/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/hpcloud/tail + +# expecting to run the test successfully. +RUN go test -v github.com/hpcloud/tail + +# expecting to install successfully +RUN go install -v github.com/hpcloud/tail +RUN go install -v github.com/hpcloud/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt new file mode 100644 index 00000000000..818d802a59a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/LICENSE.txt @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile new file mode 100644 index 00000000000..6591b24fc11 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Makefile @@ -0,0 +1,11 @@ +default: test + +test: *.go + go test -v -race ./... + +fmt: + gofmt -w . + +# Run the test in an isolated environment. +fulltest: + docker build -t hpcloud/tail . diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md new file mode 100644 index 00000000000..fb7fbc26c67 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/README.md @@ -0,0 +1,28 @@ +[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail) +[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](http://godoc.org/github.com/hpcloud/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. + +## Installing + + go get github.com/hpcloud/tail/... + +## Windows support + +This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml new file mode 100644 index 00000000000..d370055b6f0 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/appveyor.yml @@ -0,0 +1,11 @@ +version: 0.{build} +skip_tags: true +cache: C:\Users\appveyor\AppData\Local\NuGet\Cache +build_script: +- SET GOPATH=c:\workspace +- go test -v -race ./... +test: off +clone_folder: c:\workspace\src\github.com\hpcloud\tail +branches: + only: + - master diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/BUILD b/vendor/github.com/hpcloud/tail/ratelimiter/BUILD new file mode 100644 index 00000000000..b563182cbab --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "leakybucket.go", + "memory.go", + "storage.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/ratelimiter", + importpath = "github.com/hpcloud/tail/ratelimiter", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/hpcloud/tail/ratelimiter/Licence new file mode 100644 index 00000000000..434aab19f1a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/Licence @@ -0,0 +1,7 @@ +Copyright (C) 2013 99designs + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go new file mode 100644 index 00000000000..358b69e7f5c --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go @@ -0,0 +1,97 @@ +// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. +package ratelimiter + +import ( + "time" +) + +type LeakyBucket struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time + Now func() time.Time +} + +func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { + bucket := LeakyBucket{ + Size: size, + Fill: 0, + LeakInterval: leakInterval, + Now: time.Now, + Lastupdate: time.Now(), + } + + return &bucket +} + +func (b *LeakyBucket) updateFill() { + now := b.Now() + if b.Fill > 0 { + elapsed := now.Sub(b.Lastupdate) + + b.Fill -= float64(elapsed) / float64(b.LeakInterval) + if b.Fill < 0 { + b.Fill = 0 + } + } + b.Lastupdate = now +} + +func (b *LeakyBucket) Pour(amount uint16) bool { + b.updateFill() + + var newfill float64 = b.Fill + float64(amount) + + if newfill > float64(b.Size) { + return false + } + + b.Fill = newfill + + return true +} + +// The time at which this bucket will be completely drained +func (b *LeakyBucket) DrainedAt() time.Time { + return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) +} + +// The duration until this bucket is completely drained +func (b *LeakyBucket) TimeToDrain() time.Duration { + return b.DrainedAt().Sub(b.Now()) +} + +func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { + return b.Now().Sub(b.Lastupdate) +} + +type LeakyBucketSer struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time +} + +func (b *LeakyBucket) Serialise() *LeakyBucketSer { + bucket := LeakyBucketSer{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + } + + return &bucket +} + +func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { + bucket := LeakyBucket{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + Now: time.Now, + } + + return &bucket +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go new file mode 100644 index 00000000000..8f6a5784a9a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go @@ -0,0 +1,58 @@ +package ratelimiter + +import ( + "errors" + "time" +) + +const GC_SIZE int = 100 + +type Memory struct { + store map[string]LeakyBucket + lastGCCollected time.Time +} + +func NewMemory() *Memory { + m := new(Memory) + m.store = make(map[string]LeakyBucket) + m.lastGCCollected = time.Now() + return m +} + +func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { + + bucket, ok := m.store[key] + if !ok { + return nil, errors.New("miss") + } + + return &bucket, nil +} + +func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { + + if len(m.store) > GC_SIZE { + m.GarbageCollect() + } + + m.store[key] = bucket + + return nil +} + +func (m *Memory) GarbageCollect() { + now := time.Now() + + // rate limit GC to once per minute + if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { + + for key, bucket := range m.store { + // if the bucket is drained, then GC + if bucket.DrainedAt().Unix() > now.Unix() { + delete(m.store, key) + } + } + + m.lastGCCollected = now + } +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go new file mode 100644 index 00000000000..89b2fe8826e --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go @@ -0,0 +1,6 @@ +package ratelimiter + +type Storage interface { + GetBucketFor(string) (*LeakyBucket, error) + SetBucketFor(string, LeakyBucket) error +} diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go new file mode 100644 index 00000000000..2d252d60572 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail.go @@ -0,0 +1,438 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package tail + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/hpcloud/tail/ratelimiter" + "github.com/hpcloud/tail/util" + "github.com/hpcloud/tail/watch" + "gopkg.in/tomb.v1" +) + +var ( + ErrStop = fmt.Errorf("tail should now stop") +) + +type Line struct { + Text string + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +func NewLine(text string) *Line { + return &Line{text, time.Now(), nil} +} + +// SeekInfo represents arguments to `os.Seek` +type SeekInfo struct { + Offset int64 + Whence int // os.SEEK_* +} + +type logger interface { + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Fatalln(v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) + Panicln(v ...interface{}) + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// Config is used to specify how a file must be tailed. +type Config struct { + // File-specifc + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket + + // Generic IO + Follow bool // Continue looking for new lines (tail -f) + MaxLineSize int // If non-zero, split longer lines into multiple lines + + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger + Logger logger +} + +type Tail struct { + Filename string + Lines chan *Line + Config + + file *os.File + reader *bufio.Reader + + watcher watch.FileWatcher + changes *watch.FileChanges + + tomb.Tomb // provides: Done, Kill, Dying + + lk sync.Mutex +} + +var ( + // DefaultLogger is used when Config.Logger == nil + DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // DiscardingLogger can be used to disable logging output + DiscardingLogger = log.New(ioutil.Discard, "", 0) +) + +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +func TailFile(filename string, config Config) (*Tail, error) { + if config.ReOpen && !config.Follow { + util.Fatal("cannot set ReOpen without Follow.") + } + + t := &Tail{ + Filename: filename, + Lines: make(chan *Line), + Config: config, + } + + // when Logger was not specified in config, use default logger + if t.Logger == nil { + t.Logger = log.New(os.Stderr, "", log.LstdFlags) + } + + if t.Poll { + t.watcher = watch.NewPollingFileWatcher(filename) + } else { + t.watcher = watch.NewInotifyFileWatcher(filename) + } + + if t.MustExist { + var err error + t.file, err = OpenFile(t.Filename) + if err != nil { + return nil, err + } + } + + go t.tailFileSync() + + return t, nil +} + +// Return the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// it may readed one line in the chan(tail.Lines), +// so it may lost one line. +func (tail *Tail) Tell() (offset int64, err error) { + if tail.file == nil { + return + } + offset, err = tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return + } + + tail.lk.Lock() + defer tail.lk.Unlock() + if tail.reader == nil { + return + } + + offset -= int64(tail.reader.Buffered()) + return +} + +// Stop stops the tailing activity. +func (tail *Tail) Stop() error { + tail.Kill(nil) + return tail.Wait() +} + +// StopAtEOF stops tailing as soon as the end of the file is reached. +func (tail *Tail) StopAtEOF() error { + tail.Kill(errStopAtEOF) + return tail.Wait() +} + +var errStopAtEOF = errors.New("tail: stop at eof") + +func (tail *Tail) close() { + close(tail.Lines) + tail.closeFile() +} + +func (tail *Tail) closeFile() { + if tail.file != nil { + tail.file.Close() + tail.file = nil + } +} + +func (tail *Tail) reopen() error { + tail.closeFile() + for { + var err error + tail.file, err = OpenFile(tail.Filename) + if err != nil { + if os.IsNotExist(err) { + tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) + if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { + if err == tomb.ErrDying { + return err + } + return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) + } + continue + } + return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) + } + break + } + return nil +} + +func (tail *Tail) readLine() (string, error) { + tail.lk.Lock() + line, err := tail.reader.ReadString('\n') + tail.lk.Unlock() + if err != nil { + // Note ReadString "returns the data read before the error" in + // case of an error, including EOF, so we return it as is. The + // caller is expected to process it if err is EOF. + return line, err + } + + line = strings.TrimRight(line, "\n") + + return line, err +} + +func (tail *Tail) tailFileSync() { + defer tail.Done() + defer tail.close() + + if !tail.MustExist { + // deferred first open. + err := tail.reopen() + if err != nil { + if err != tomb.ErrDying { + tail.Kill(err) + } + return + } + } + + // Seek to requested location on first open of the file. + if tail.Location != nil { + _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) + tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) + if err != nil { + tail.Killf("Seek error on %s: %s", tail.Filename, err) + return + } + } + + tail.openReader() + + var offset int64 = 0 + var err error + + // Read line by line. + for { + // do not seek in named pipes + if !tail.Pipe { + // grab the position in case we need to back up in the event of a half-line + offset, err = tail.Tell() + if err != nil { + tail.Kill(err) + return + } + } + + line, err := tail.readLine() + + // Process `line` even if err is EOF. + if err == nil { + cooloff := !tail.sendLine(line) + if cooloff { + // Wait a second before seeking till the end of + // file when rate limit is reached. + msg := fmt.Sprintf( + "Too much log activity; waiting a second " + + "before resuming tailing") + tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + select { + case <-time.After(time.Second): + case <-tail.Dying(): + return + } + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + } else if err == io.EOF { + if !tail.Follow { + if line != "" { + tail.sendLine(line) + } + return + } + + if tail.Follow && line != "" { + // this has the potential to never return the last line if + // it's not followed by a newline; seems a fair trade here + err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) + if err != nil { + tail.Kill(err) + return + } + } + + // When EOF is reached, wait for more data to become + // available. Wait strategy is based on the `tail.watcher` + // implementation (inotify or polling). + err := tail.waitForChanges() + if err != nil { + if err != ErrStop { + tail.Kill(err) + } + return + } + } else { + // non-EOF error + tail.Killf("Error reading %s: %s", tail.Filename, err) + return + } + + select { + case <-tail.Dying(): + if tail.Err() == errStopAtEOF { + continue + } + return + default: + } + } +} + +// waitForChanges waits until the file has been appended, deleted, +// moved or truncated. When moved or deleted - the file will be +// reopened if ReOpen is true. Truncated files are always reopened. +func (tail *Tail) waitForChanges() error { + if tail.changes == nil { + pos, err := tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) + if err != nil { + return err + } + } + + select { + case <-tail.changes.Modified: + return nil + case <-tail.changes.Deleted: + tail.changes = nil + if tail.ReOpen { + // XXX: we must not log from a library. + tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened %s", tail.Filename) + tail.openReader() + return nil + } else { + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop + } + case <-tail.changes.Truncated: + // Always reopen truncated files (Follow is true) + tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) + tail.openReader() + return nil + case <-tail.Dying(): + return ErrStop + } + panic("unreachable") +} + +func (tail *Tail) openReader() { + if tail.MaxLineSize > 0 { + // add 2 to account for newline characters + tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) + } else { + tail.reader = bufio.NewReader(tail.file) + } +} + +func (tail *Tail) seekEnd() error { + return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) +} + +func (tail *Tail) seekTo(pos SeekInfo) error { + _, err := tail.file.Seek(pos.Offset, pos.Whence) + if err != nil { + return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) + } + // Reset the read buffer whenever the file is re-seek'ed + tail.reader.Reset(tail.file) + return nil +} + +// sendLine sends the line(s) to Lines channel, splitting longer lines +// if necessary. Return false if rate limit is reached. +func (tail *Tail) sendLine(line string) bool { + now := time.Now() + lines := []string{line} + + // Split longer lines + if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { + lines = util.PartitionString(line, tail.MaxLineSize) + } + + for _, line := range lines { + tail.Lines <- &Line{line, now, nil} + } + + if tail.Config.RateLimiter != nil { + ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) + if !ok { + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Filename) + return false + } + } + + return true +} + +// Cleanup removes inotify watches added by the tail package. This function is +// meant to be invoked from a process's exit handler. Linux kernel may not +// automatically remove inotify watches after the process exits. +func (tail *Tail) Cleanup() { + watch.Cleanup(tail.Filename) +} diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go new file mode 100644 index 00000000000..bc4dc3357ab --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_posix.go @@ -0,0 +1,11 @@ +// +build linux darwin freebsd netbsd openbsd + +package tail + +import ( + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go new file mode 100644 index 00000000000..ef2cfca1b74 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package tail + +import ( + "github.com/hpcloud/tail/winfile" + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/hpcloud/tail/util/BUILD b/vendor/github.com/hpcloud/tail/util/BUILD new file mode 100644 index 00000000000..8d6ed42c75b --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["util.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/util", + importpath = "github.com/hpcloud/tail/util", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go new file mode 100644 index 00000000000..54151fe39f1 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/hpcloud/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/hpcloud/tail/watch/BUILD b/vendor/github.com/hpcloud/tail/watch/BUILD new file mode 100644 index 00000000000..922a5b60440 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "filechanges.go", + "inotify.go", + "inotify_tracker.go", + "polling.go", + "watch.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/watch", + importpath = "github.com/hpcloud/tail/watch", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/hpcloud/tail/util:go_default_library", + "//vendor/gopkg.in/fsnotify.v1:go_default_library", + "//vendor/gopkg.in/tomb.v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go new file mode 100644 index 00000000000..3ce5dcecbb2 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool), make(chan bool), make(chan bool)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go new file mode 100644 index 00000000000..4478f1e1a01 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify.go @@ -0,0 +1,128 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + defer RemoveWatch(fw.Filename) + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + return + } + case <-t.Dying(): + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + changes.NotifyDeleted() + return + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go new file mode 100644 index 00000000000..03be4275ca2 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go @@ -0,0 +1,260 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) { + remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) { + remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + shared.watcher.Remove(fname) + } + shared.remove <- winfo +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) { + RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + // already in inotify watch + if shared.watchNums[fname] > 0 { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + return nil + } + + err := shared.watcher.Add(fname) + if err == nil { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { + shared.mux.Lock() + defer shared.mux.Unlock() + + ch := shared.chans[winfo.fname] + if ch == nil { + return + } + + delete(shared.chans, winfo.fname) + close(ch) + + if !winfo.isCreate() { + return + } + + shared.watchNums[winfo.fname]-- + if shared.watchNums[winfo.fname] == 0 { + delete(shared.watchNums, winfo.fname) + } +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go new file mode 100644 index 00000000000..49491f21dbf --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/hpcloud/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go new file mode 100644 index 00000000000..2e1783ef0aa --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/hpcloud/tail/winfile/BUILD b/vendor/github.com/hpcloud/tail/winfile/BUILD new file mode 100644 index 00000000000..b7aedb1c7e0 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/winfile/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["winfile.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/winfile", + importpath = "github.com/hpcloud/tail/winfile", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go new file mode 100644 index 00000000000..aa7e7bc5df5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/winfile/winfile.go @@ -0,0 +1,92 @@ +// +build windows + +package winfile + +import ( + "os" + "syscall" + "unsafe" +) + +// issue also described here +//https://codereview.appspot.com/8203043/ + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 +func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 +func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { + r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore index 922b4f7f919..18793c248aa 100644 --- a/vendor/github.com/onsi/ginkgo/.gitignore +++ b/vendor/github.com/onsi/ginkgo/.gitignore @@ -1,4 +1,7 @@ .DS_Store TODO tmp/**/* -*.coverprofile \ No newline at end of file +*.coverprofile +.vscode +.idea/ +*.log \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 19ca78b65fa..7ad39b78f69 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -1,8 +1,10 @@ language: go go: - - 1.5 - - 1.6 - - 1.7 + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x install: - go get -v -t ./... @@ -11,4 +13,4 @@ install: - go install github.com/onsi/ginkgo/ginkgo - export PATH=$PATH:$HOME/gopath/bin -script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet diff --git a/vendor/github.com/onsi/ginkgo/BUILD b/vendor/github.com/onsi/ginkgo/BUILD index 8d814067d3b..d9269eac495 100644 --- a/vendor/github.com/onsi/ginkgo/BUILD +++ b/vendor/github.com/onsi/ginkgo/BUILD @@ -16,6 +16,7 @@ go_library( "//vendor/github.com/onsi/ginkgo/internal/writer:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library", + "//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable:go_default_library", "//vendor/github.com/onsi/ginkgo/types:go_default_library", ], ) diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index 5bc46f919e0..32370206bfc 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,10 +1,71 @@ -## HEAD +## 1.6.0 + +### New Features +- add --debug flag to emit node output to files (#499) [39febac] + +### Fixes +- fix: for `go vet` to pass [69338ec] +- docs: fix for contributing instructions [7004cb1] +- consolidate and streamline contribution docs (#494) [d848015] +- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6] +- all: gofmt [000d317] +- Increase eventually timeout to 30s [c73579c] +- Clarify asynchronous test behaviour [294d8f4] +- Travis badge should only show master [26d2143] + +## 1.5.0 5/10/2018 + +### New Features +- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8] +- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8] +- Re-add noisySkippings flag [652e15c] +- Allow coverage to be displayed for focused specs (#367) [11459a8] +- Handle -outputdir flag (#364) [228e3a8] +- Handle -coverprofile flag (#355) [43392d5] + +### Fixes +- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23] +- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] +- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] +- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] +- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] +- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] +- Add an extra new line after reporting spec run completion for test2json [874520d] +- added name name field to junit reported testsuite [ae61c63] +- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] +- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] +- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] +- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] +- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] +- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa] +- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8] +- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9] +- Replace GOPATH in Environment [4b883f0] + + +## 1.4.0 7/16/2017 + +- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345) +- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357] +- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356] +- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277] +- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344] +- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248] + +## 1.3.0 3/28/2017 Improvements: +- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency. - `Skip(message)` can be used to skip the current test. - Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) - Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed` +- Support for retrying flaky tests with `--flakeAttempts` +- `ginkgo ./...` now recurses as you'd expect +- Added `Specify` a synonym for `It` +- Support colorise on Windows +- Broader support for various go compilation flags in the `ginkgo` CLI Bug Fixes: diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md new file mode 100644 index 00000000000..908b95c2c12 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing to Ginkgo + +Your contributions to Ginkgo are essential for its long-term maintenance and improvement. + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). + - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. +- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch. + If relevant, please submit a docs PR to that branch alongside your code PR. + +Thanks for supporting Ginkgo! + +## Setup + +Fork the repo, then: + +``` +go get github.com/onsi/ginkgo +go get github.com/onsi/gomega/... +cd $GOPATH/src/github.com/onsi/ginkgo +git remote add fork git@github.com:/ginkgo.git + +ginkgo -r -p # ensure tests are green +go vet ./... # ensure linter is happy +``` + +## Making the PR + - go to a new branch `git checkout -b my-feature` + - make your changes + - run tests and linter again (see above) + - `git push fork` + - open PR 🎉 diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md index 50d32ba1c59..cdf8d054a16 100644 --- a/vendor/github.com/onsi/ginkgo/README.md +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -1,19 +1,19 @@ -![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) +![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) -[![Build Status](https://travis-ci.org/onsi/ginkgo.svg)](https://travis-ci.org/onsi/ginkgo) +[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! -To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. ## Feature List - Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) - Structure your BDD-style tests expressively: - - Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) + - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown - - [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions + - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. @@ -25,7 +25,7 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google - `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime - - `ginkgo -cover` runs your tests using Golang's code coverage tool + - `ginkgo -cover` runs your tests using Go's code coverage tool - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression - `ginkgo -r` runs all tests suites under the current directory @@ -43,6 +43,8 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google - [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. +- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. + - Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. - A modular architecture that lets you easily: @@ -53,18 +55,18 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/) -## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework +## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org) ## Set Me Up! -You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!) +You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. ```bash -go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI -go get github.com/onsi/gomega # fetches the matcher library +go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI +go get -u github.com/onsi/gomega/... # fetches the matcher library cd path/to/package/you/want/to/test @@ -83,11 +85,11 @@ Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Go With that said, it's great to know what your options are :) -### What Golang gives you out of the box +### What Go gives you out of the box -Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. +Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. -### Matcher libraries for Golang's XUnit style tests +### Matcher libraries for Go's XUnit style tests A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: @@ -98,7 +100,7 @@ You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomeg ### BDD style testing frameworks -There are a handful of BDD-style testing frameworks written for Golang. Here are a few: +There are a handful of BDD-style testing frameworks written for Go. Here are a few: - [Ginkgo](https://github.com/onsi/ginkgo) ;) - [GoConvey](https://github.com/smartystreets/goconvey) @@ -106,10 +108,14 @@ There are a handful of BDD-style testing frameworks written for Golang. Here ar - [Mao](https://github.com/azer/mao) - [Zen](https://github.com/pranavraja/zen) -Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries. +Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries. Go explore! ## License Ginkgo is MIT-Licensed + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/RELEASING.md new file mode 100644 index 00000000000..1e298c2da71 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/RELEASING.md @@ -0,0 +1,14 @@ +A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: + +1. Ensure CHANGELOG.md is up to date. + - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Categorize the changes into + - Breaking Changes (requires a major version) + - New Features (minor version) + - Fixes (fix version) + - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) +1. Update `VERSION` in `config/config.go` +1. Create a commit with the version number as the commit message (e.g. `v1.3.0`) +1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`) +1. Push the commit and tag to GitHub +1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index 4b5485b4157..d4ed1fa5741 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.2.0" +const VERSION = "1.6.0" type GinkgoConfigType struct { RandomSeed int64 @@ -34,6 +34,7 @@ type GinkgoConfigType struct { FlakeAttempts int EmitSpecProgress bool DryRun bool + DebugParallel bool ParallelNode int ParallelTotal int @@ -47,6 +48,7 @@ type DefaultReporterConfigType struct { NoColor bool SlowSpecThreshold float64 NoisyPendings bool + NoisySkippings bool Succinct bool Verbose bool FullTrace bool @@ -64,7 +66,7 @@ func processPrefix(prefix string) string { func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { prefix = processPrefix(prefix) flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") - flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.") + flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.") flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") @@ -80,6 +82,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") + flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.") + if includeParallelFlags { flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") @@ -90,6 +94,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.") flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") @@ -139,6 +144,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%sprogress", prefix)) } + if ginkgo.DebugParallel { + result = append(result, fmt.Sprintf("--%sdebug", prefix)) + } + if ginkgo.ParallelNode != 0 { result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) } @@ -171,6 +180,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) } + if !reporter.NoisySkippings { + result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix)) + } + if reporter.Verbose { result = append(result, fmt.Sprintf("--%sv", prefix)) } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go index 1e209e4f525..fea4d4d4e6c 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go @@ -43,10 +43,10 @@ func BuildBootstrapCommand() *Command { var bootstrapText = `package {{.Package}} import ( + "testing" + {{.GinkgoImport}} {{.GomegaImport}} - - "testing" ) func Test{{.FormattedName}}(t *testing.T) { @@ -58,11 +58,11 @@ func Test{{.FormattedName}}(t *testing.T) { var agoutiBootstrapText = `package {{.Package}} import ( + "testing" + {{.GinkgoImport}} {{.GomegaImport}} "github.com/sclevine/agouti" - - "testing" ) func Test{{.FormattedName}}(t *testing.T) { diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go index 41bd1acf670..f0eb375c3b9 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go @@ -46,7 +46,7 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) { passed := true for _, suite := range suites { - runner := testrunner.New(suite, 1, false, r.commandFlags.GoOpts, nil) + runner := testrunner.New(suite, 1, false, 0, r.commandFlags.GoOpts, nil) fmt.Printf("Compiling %s...\n", suite.PackageName) path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName))) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go index 89e60d39302..5944ed85ccd 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go @@ -3,8 +3,9 @@ package main import ( "flag" "fmt" - "github.com/onsi/ginkgo/ginkgo/convert" "os" + + "github.com/onsi/ginkgo/ginkgo/convert" ) func BuildConvertCommand() *Command { diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go index 3b9405aa07f..019fd233732 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go @@ -34,10 +34,10 @@ func BuildGenerateCommand() *Command { var specText = `package {{.Package}} import ( - {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}} - {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}} {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}} + + {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}} ) var _ = Describe("{{.Subject}}", func() { @@ -45,15 +45,15 @@ var _ = Describe("{{.Subject}}", func() { }) ` -var agoutiSpecText = `package {{.Package}}_test +var agoutiSpecText = `package {{.Package}} import ( - {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}} - {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}} {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}} - . "github.com/sclevine/agouti/matchers" "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}} ) var _ = Describe("{{.Subject}}", func() { diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/ginkgo/main.go index 201e203e172..4a1aeef4f57 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/main.go @@ -50,6 +50,10 @@ By default, when running multiple tests (with -r or a list of packages) Ginkgo w ginkgo -keepGoing +To fail if there are ginkgo tests in a directory but no test suite (missing `RunSpecs`) + + ginkgo -requireSuite + To monitor packages and rerun tests when changes occur: ginkgo watch <-r> diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go index 212235bae0a..39b88b5d1bf 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go @@ -3,11 +3,12 @@ package main import ( "bufio" "flag" - "github.com/onsi/ginkgo/ginkgo/nodot" "io/ioutil" "os" "path/filepath" "regexp" + + "github.com/onsi/ginkgo/ginkgo/nodot" ) func BuildNodotCommand() *Command { diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go index 9f19192ac2d..569b6a29ca7 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go @@ -5,8 +5,12 @@ import ( "fmt" "math/rand" "os" + "strings" "time" + "io/ioutil" + "path/filepath" + "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/ginkgo/interrupthandler" "github.com/onsi/ginkgo/ginkgo/testrunner" @@ -71,7 +75,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { runners := []*testrunner.TestRunner{} for _, suite := range suites { - runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.GoOpts, additionalArgs)) + runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs)) } numSuites := 0 @@ -104,10 +108,25 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { runner.CleanUp() } + if r.isInCoverageMode() { + if r.getOutputDir() != "" { + // If coverprofile is set, combine coverages + if r.getCoverprofile() != "" { + if err := r.combineCoverprofiles(runners); err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + } else { + // Just move them + r.moveCoverprofiles(runners) + } + } + } + fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t)) if runResult.Passed { - if runResult.HasProgrammaticFocus { + if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { fmt.Printf("Test Suite Passed\n") fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) os.Exit(types.GINKGO_FOCUS_EXIT_CODE) @@ -121,6 +140,70 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { } } +// Moves all generated profiles to specified directory +func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) { + for _, runner := range runners { + _, filename := filepath.Split(runner.CoverageFile) + err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename)) + + if err != nil { + fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err) + return + } + } +} + +// Combines all generated profiles in the specified directory +func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error { + + path, _ := filepath.Abs(r.getOutputDir()) + if !fileExists(path) { + return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir()) + } + + fmt.Println("path is " + path) + + combined, err := os.OpenFile(filepath.Join(path, r.getCoverprofile()), + os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) + + if err != nil { + fmt.Printf("Unable to create combined profile, %v\n", err) + return nil // non-fatal error + } + + for _, runner := range runners { + contents, err := ioutil.ReadFile(runner.CoverageFile) + + if err != nil { + fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err) + return nil // non-fatal error + } + + _, err = combined.Write(contents) + + if err != nil { + fmt.Printf("Unable to append to coverprofile, %v\n", err) + return nil // non-fatal error + } + } + + fmt.Println("All profiles combined") + return nil +} + +func (r *SpecRunner) isInCoverageMode() bool { + opts := r.commandFlags.GoOpts + return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != "" +} + +func (r *SpecRunner) getCoverprofile() string { + return *r.commandFlags.GoOpts["coverprofile"].(*string) +} + +func (r *SpecRunner) getOutputDir() string { + return *r.commandFlags.GoOpts["outputdir"].(*string) +} + func (r *SpecRunner) ComputeSuccinctMode(numSuites int) { if config.DefaultReporterConfig.Verbose { config.DefaultReporterConfig.Succinct = false @@ -171,7 +254,7 @@ func orcMessage(iteration int) string { "Still good...", "I think your tests are fine....", "Yep, still passing", - "Here we go again...", + "Oh boy, here I go testin' again!", "Even the gophers are getting bored", "Did you try -race?", "Maybe you should stop now?", diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go index 20ba7924f0e..b7cb7f5661e 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go @@ -4,6 +4,8 @@ import ( "flag" "runtime" + "time" + "github.com/onsi/ginkgo/config" ) @@ -19,6 +21,7 @@ type RunWatchAndBuildCommandFlags struct { Notify bool AfterSuiteHook string AutoNodes bool + Timeout time.Duration //only for run command KeepGoing bool @@ -26,7 +29,8 @@ type RunWatchAndBuildCommandFlags struct { RandomizeSuites bool //only for watch command - Depth int + Depth int + WatchRegExp string FlagSet *flag.FlagSet } @@ -135,6 +139,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) { c.FlagSet.StringVar(c.stringSlot("memprofile"), "memprofile", "", "Write a memory profile to the specified file after all tests have passed.") c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.") c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.") + c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)") if mode == runMode || mode == watchMode { config.Flags(c.FlagSet, "", false) @@ -146,6 +151,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) { c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes") } c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes") + c.FlagSet.DurationVar(&(c.Timeout), "timeout", 24*time.Hour, "Suite fails if it does not complete within the specified timeout") } if mode == runMode { @@ -156,5 +162,6 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) { if mode == watchMode { c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree") + c.FlagSet.StringVar(&(c.WatchRegExp), "watchRegExp", `\.go$`, "Files matching this regular expression will be watched for changes") } } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/BUILD b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/BUILD index 3d29bfcf59a..1acf0e72042 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/BUILD +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/BUILD @@ -15,6 +15,7 @@ go_library( "//vendor/github.com/onsi/ginkgo/ginkgo/testsuite:go_default_library", "//vendor/github.com/onsi/ginkgo/internal/remote:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library", + "//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable:go_default_library", "//vendor/github.com/onsi/ginkgo/types:go_default_library", ], ) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go index 3645f1f4672..97a83145fa2 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go @@ -8,7 +8,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strconv" "strings" "syscall" @@ -18,6 +17,7 @@ import ( "github.com/onsi/ginkgo/ginkgo/testsuite" "github.com/onsi/ginkgo/internal/remote" "github.com/onsi/ginkgo/reporters/stenographer" + colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" "github.com/onsi/ginkgo/types" ) @@ -29,17 +29,23 @@ type TestRunner struct { numCPU int parallelStream bool + timeout time.Duration goOpts map[string]interface{} additionalArgs []string + stderr *bytes.Buffer + + CoverageFile string } -func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, goOpts map[string]interface{}, additionalArgs []string) *TestRunner { +func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner { runner := &TestRunner{ Suite: suite, numCPU: numCPU, parallelStream: parallelStream, goOpts: goOpts, additionalArgs: additionalArgs, + timeout: timeout, + stderr: new(bytes.Buffer), } if !suite.Precompiled { @@ -60,10 +66,10 @@ func (t *TestRunner) Compile() error { func (t *TestRunner) BuildArgs(path string) []string { args := []string{"test", "-c", "-i", "-o", path, t.Suite.Path} - if *t.goOpts["covermode"].(*string) != "" { - args = append(args, "-cover", fmt.Sprintf("-covermode=%s", *t.goOpts["covermode"].(*string))) + if t.getCoverMode() != "" { + args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode())) } else { - if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" { + if t.shouldCover() || t.getCoverPackage() != "" { args = append(args, "-cover", "-covermode=atomic") } } @@ -136,13 +142,16 @@ func (t *TestRunner) CompileTo(path string) error { output, err := cmd.CombinedOutput() if err != nil { - fixedOutput := fixCompilationOutput(string(output), t.Suite.Path) if len(output) > 0 { - return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput) + return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output) } return fmt.Errorf("Failed to compile %s", t.Suite.PackageName) } + if len(output) > 0 { + fmt.Println(string(output)) + } + if fileExists(path) == false { compiledFile := t.Suite.PackageName + ".test" if fileExists(compiledFile) { @@ -215,38 +224,6 @@ func copyFile(src, dst string) error { return out.Chmod(mode) } -/* -go test -c -i spits package.test out into the cwd. there's no way to change this. - -to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package. - -unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd. - -this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working. - -fixCompilationOutput..... rewrites the output to fix the paths. - -yeah...... -*/ -func fixCompilationOutput(output string, relToPath string) string { - relToPath = filepath.Join(relToPath) - re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`) - lines := strings.Split(output, "\n") - for i, line := range lines { - indices := re.FindStringSubmatchIndex(line) - if len(indices) == 0 { - continue - } - - path := line[indices[2]:indices[3]] - if filepath.Dir(path) != relToPath { - path = filepath.Join(relToPath, path) - lines[i] = path + line[indices[3]:] - } - } - return strings.Join(lines, "\n") -} - func (t *TestRunner) Run() RunResult { if t.Suite.IsGinkgo { if t.numCPU > 1 { @@ -324,7 +301,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { os.Stdout.Sync() - if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" { + if t.shouldCombineCoverprofiles() { t.combineCoverprofiles() } @@ -337,7 +314,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult { writers := make([]*logWriter, t.numCPU) reports := make([]*bytes.Buffer, t.numCPU) - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1) + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) server, err := remote.NewServer(t.numCPU) @@ -390,9 +367,8 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult { | | | Ginkgo timed out waiting for all parallel nodes to report back! | | | - ------------------------------------------------------------------- -`) - + -------------------------------------------------------------------`) + fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path) os.Stdout.Sync() for _, writer := range writers { @@ -406,21 +382,40 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult { os.Stdout.Sync() } - if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" { + if t.shouldCombineCoverprofiles() { t.combineCoverprofiles() } return res } +const CoverProfileSuffix = ".coverprofile" + func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd { - args := []string{"--test.timeout=24h"} - if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" { - coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile" - if t.numCPU > 1 { - coverprofile = fmt.Sprintf("%s.%d", coverprofile, node) + args := []string{"--test.timeout=" + t.timeout.String()} + + coverProfile := t.getCoverProfile() + + if t.shouldCombineCoverprofiles() { + + testCoverProfile := "--test.coverprofile=" + + coverageFile := "" + // Set default name for coverage results + if coverProfile == "" { + coverageFile = t.Suite.PackageName + CoverProfileSuffix + } else { + coverageFile = coverProfile } - args = append(args, coverprofile) + + testCoverProfile += coverageFile + + t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile) + + if t.numCPU > 1 { + testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node) + } + args = append(args, testCoverProfile) } args = append(args, ginkgoArgs...) @@ -434,12 +429,36 @@ func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec. cmd := exec.Command(path, args...) cmd.Dir = t.Suite.Path - cmd.Stderr = stream + cmd.Stderr = io.MultiWriter(stream, t.stderr) cmd.Stdout = stream return cmd } +func (t *TestRunner) shouldCover() bool { + return *t.goOpts["cover"].(*bool) +} + +func (t *TestRunner) shouldRequireSuite() bool { + return *t.goOpts["requireSuite"].(*bool) +} + +func (t *TestRunner) getCoverProfile() string { + return *t.goOpts["coverprofile"].(*string) +} + +func (t *TestRunner) getCoverPackage() string { + return *t.goOpts["coverpkg"].(*string) +} + +func (t *TestRunner) getCoverMode() string { + return *t.goOpts["covermode"].(*string) +} + +func (t *TestRunner) shouldCombineCoverprofiles() bool { + return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != "" +} + func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult { var res RunResult @@ -456,17 +475,34 @@ func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult { } cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + if strings.Contains(t.stderr.String(), "warning: no tests to run") { + if t.shouldRequireSuite() { + res.Passed = false + } + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + } + return res } func (t *TestRunner) combineCoverprofiles() { profiles := []string{} + + coverProfile := t.getCoverProfile() + for cpu := 1; cpu <= t.numCPU; cpu++ { - coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu) + var coverFile string + if coverProfile == "" { + coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu) + } else { + coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu) + } + coverFile = filepath.Join(t.Suite.Path, coverFile) coverProfile, err := ioutil.ReadFile(coverFile) os.Remove(coverFile) @@ -502,5 +538,17 @@ func (t *TestRunner) combineCoverprofiles() { output = append(output, fmt.Sprintf("%s %d", line, lines[line])) } finalOutput := strings.Join(output, "\n") - ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666) + + finalFilename := "" + + if coverProfile != "" { + finalFilename = coverProfile + } else { + finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix) + } + + coverageFilepath := filepath.Join(t.Suite.Path, finalFilename) + ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666) + + t.CoverageFile = coverageFilepath } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go index 3046ce519f6..9de8c2bb4e6 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go @@ -53,7 +53,7 @@ func SuitesInDir(dir string, recurse bool) []TestSuite { } files, _ := ioutil.ReadDir(dir) - re := regexp.MustCompile(`_test\.go$`) + re := regexp.MustCompile(`^[^._].*_test\.go$`) for _, file := range files { if !file.IsDir() && re.Match([]byte(file.Name())) { suites = append(suites, New(dir, files)) @@ -77,7 +77,11 @@ func relPath(dir string) string { dir, _ = filepath.Abs(dir) cwd, _ := os.Getwd() dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) - dir = "." + string(filepath.Separator) + dir + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + return dir } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go index 683c3a9982d..cedc2b59c28 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go @@ -3,7 +3,9 @@ package main import ( "flag" "fmt" + "io/ioutil" "os/exec" + "strings" ) func BuildUnfocusCommand() *Command { @@ -26,13 +28,34 @@ func unfocusSpecs([]string, []string) { unfocus("Measure") unfocus("DescribeTable") unfocus("Entry") + unfocus("Specify") + unfocus("When") } func unfocus(component string) { fmt.Printf("Removing F%s...\n", component) - cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".") - out, _ := cmd.CombinedOutput() - if string(out) != "" { - println(string(out)) + files, err := ioutil.ReadDir(".") + if err != nil { + fmt.Println(err.Error()) + return + } + for _, f := range files { + // Exclude "vendor" directory + if f.IsDir() && f.Name() == "vendor" { + continue + } + // Exclude non-go files in the current directory + if !f.IsDir() && !strings.HasSuffix(f.Name(), ".go") { + continue + } + // Recursively run `gofmt` otherwise + cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", f.Name()) + out, err := cmd.CombinedOutput() + if err != nil { + fmt.Println(err.Error()) + } + if string(out) != "" { + fmt.Println(string(out)) + } } } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go index cdca3a348b6..f586908e87f 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + "github.com/onsi/ginkgo/config" ) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go index 452c07e4d69..a628303d729 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go @@ -3,6 +3,8 @@ package watch import ( "fmt" + "regexp" + "github.com/onsi/ginkgo/ginkgo/testsuite" ) @@ -10,14 +12,16 @@ type SuiteErrors map[testsuite.TestSuite]error type DeltaTracker struct { maxDepth int + watchRegExp *regexp.Regexp suites map[string]*Suite packageHashes *PackageHashes } -func NewDeltaTracker(maxDepth int) *DeltaTracker { +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { return &DeltaTracker{ maxDepth: maxDepth, - packageHashes: NewPackageHashes(), + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), suites: map[string]*Suite{}, } } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go index eaf357c249c..7e1e4192dda 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go @@ -8,7 +8,6 @@ import ( "time" ) -var goRegExp = regexp.MustCompile(`\.go$`) var goTestRegExp = regexp.MustCompile(`_test\.go$`) type PackageHash struct { @@ -16,14 +15,16 @@ type PackageHash struct { TestModifiedTime time.Time Deleted bool - path string - codeHash string - testHash string + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp } -func NewPackageHash(path string) *PackageHash { +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { p := &PackageHash{ - path: path, + path: path, + watchRegExp: watchRegExp, } p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() @@ -82,7 +83,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } - if goRegExp.Match([]byte(info.Name())) { + if p.watchRegExp.Match([]byte(info.Name())) { codeHash += p.hashForFileInfo(info) if info.ModTime().After(codeModifiedTime) { codeModifiedTime = info.ModTime() diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go index 262eaa847ea..b4892bebf26 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go @@ -2,19 +2,22 @@ package watch import ( "path/filepath" + "regexp" "sync" ) type PackageHashes struct { PackageHashes map[string]*PackageHash usedPaths map[string]bool + watchRegExp *regexp.Regexp lock *sync.Mutex } -func NewPackageHashes() *PackageHashes { +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { return &PackageHashes{ PackageHashes: map[string]*PackageHash{}, usedPaths: nil, + watchRegExp: watchRegExp, lock: &sync.Mutex{}, } } @@ -41,7 +44,7 @@ func (p *PackageHashes) Add(path string) *PackageHash { path, _ = filepath.Abs(path) _, ok := p.PackageHashes[path] if !ok { - p.PackageHashes[path] = NewPackageHash(path) + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) } if p.usedPaths != nil { diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go index 0dc2c5cc91e..a6ef053c851 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + "regexp" "time" "github.com/onsi/ginkgo/config" @@ -58,7 +59,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA runners := []*testrunner.TestRunner{} for _, suite := range suites { - runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.GoOpts, additionalArgs)) + runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs)) } return runners @@ -72,7 +73,7 @@ func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) { } fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth) - deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth) + deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth, regexp.MustCompile(w.commandFlags.WatchRegExp)) delta, errors := deltaTracker.Delta(suites) fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites))) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 8fe0b70a62f..158acdd5e73 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -29,6 +29,7 @@ import ( "github.com/onsi/ginkgo/internal/writer" "github.com/onsi/ginkgo/reporters" "github.com/onsi/ginkgo/reporters/stenographer" + colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" "github.com/onsi/ginkgo/types" ) @@ -149,7 +150,8 @@ type GinkgoTestDescription struct { FileName string LineNumber int - Failed bool + Failed bool + Duration time.Duration } //CurrentGinkgoTestDescripton returns information about the current running test. @@ -169,6 +171,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription { FileName: subjectCodeLocation.FileName, LineNumber: subjectCodeLocation.LineNumber, Failed: summary.HasFailureState(), + Duration: summary.RunTime, } } @@ -202,7 +205,7 @@ func RunSpecs(t GinkgoTestingT, description string) bool { //To run your tests with Ginkgo's default reporter and your custom reporter(s), replace //RunSpecs() with this method. func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...) + specReporters = append(specReporters, buildDefaultReporter()) return RunSpecsWithCustomReporters(t, description, specReporters) } @@ -216,7 +219,7 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor reporters[i] = reporter } passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) - if passed && hasFocusedTests { + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { fmt.Println("PASS | FOCUSED") os.Exit(types.GINKGO_FOCUS_EXIT_CODE) } @@ -226,14 +229,18 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor func buildDefaultReporter() Reporter { remoteReportingServer := config.GinkgoConfig.StreamHost if remoteReportingServer == "" { - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1) + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) } else { - return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor()) + debugFile := "" + if config.GinkgoConfig.DebugParallel { + debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode) + } + return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile) } } -//Skip notifies Ginkgo that the current spec should be skipped. +//Skip notifies Ginkgo that the current spec was skipped. func Skip(message string, callerSkip ...int) { skip := 0 if len(callerSkip) > 0 { @@ -275,9 +282,9 @@ func GinkgoRecover() { //Describe blocks allow you to organize your specs. A Describe block can contain any number of //BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. // -//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally //equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts. +//or method and, within that Describe, outline a number of Contexts and Whens. func Describe(text string, body func()) bool { globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) return true @@ -304,9 +311,9 @@ func XDescribe(text string, body func()) bool { //Context blocks allow you to organize your specs. A Context block can contain any number of //BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. // -//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally //equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts. +//or method and, within that Describe, outline a number of Contexts and Whens. func Context(text string, body func()) bool { globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) return true @@ -330,6 +337,35 @@ func XContext(text string, body func()) bool { return true } +//When blocks allow you to organize your specs. A When block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func When(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FWhen +func FWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PWhen +func PWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XWhen +func XWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + //It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks //within an It block. // @@ -362,22 +398,26 @@ func XIt(text string, _ ...interface{}) bool { //which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks //which apply to It blocks. func Specify(text string, body interface{}, timeout ...float64) bool { - return It(text, body, timeout...) + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true } //You can focus individual Specifys using FSpecify func FSpecify(text string, body interface{}, timeout ...float64) bool { - return FIt(text, body, timeout...) + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true } //You can mark Specifys as pending using PSpecify func PSpecify(text string, is ...interface{}) bool { - return PIt(text, is...) + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true } //You can mark Specifys as pending using XSpecify func XSpecify(text string, is ...interface{}) bool { - return XIt(text, is...) + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true } //By allows you to better document large Its. diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go index 9c3eed2b6fe..d6d54234c2f 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -35,15 +35,15 @@ func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elaps } func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) defer b.mu.Unlock() measurement.Results = append(measurement.Results, value) } func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) defer b.mu.Unlock() measurement.Results = append(measurement.Results, value) } diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go index c76fe3a4512..6eded7b763e 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go @@ -1,9 +1,10 @@ package leafnodes import ( + "time" + "github.com/onsi/ginkgo/internal/failer" "github.com/onsi/ginkgo/types" - "time" ) type ItNode struct { diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go index efc3348c1b6..3ab9a6d5524 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go @@ -1,9 +1,10 @@ package leafnodes import ( + "reflect" + "github.com/onsi/ginkgo/internal/failer" "github.com/onsi/ginkgo/types" - "reflect" ) type MeasureNode struct { diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go index 870ad826da0..16cb66c3e49 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -2,11 +2,12 @@ package leafnodes import ( "fmt" + "reflect" + "time" + "github.com/onsi/ginkgo/internal/codelocation" "github.com/onsi/ginkgo/internal/failer" "github.com/onsi/ginkgo/types" - "reflect" - "time" ) type runner struct { @@ -86,6 +87,9 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) finished = true }() + // If this goroutine gets no CPU time before the select block, + // the <-done case may complete even if the test took longer than the timeoutThreshold. + // This can cause flaky behaviour, but we haven't seen it in the wild. select { case <-done: case <-time.After(r.timeoutThreshold): diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go index 6b725a63153..b4654cd2994 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -1,9 +1,10 @@ package leafnodes import ( + "time" + "github.com/onsi/ginkgo/internal/failer" "github.com/onsi/ginkgo/types" - "time" ) type SetupNode struct { diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go index 2ccc7dc0fb0..80f16ed7861 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go @@ -1,9 +1,10 @@ package leafnodes import ( + "time" + "github.com/onsi/ginkgo/internal/failer" "github.com/onsi/ginkgo/types" - "time" ) type SuiteNode interface { diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go index e7030d9149a..a721d0cf7f2 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go @@ -2,11 +2,12 @@ package leafnodes import ( "encoding/json" - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" "io/ioutil" "net/http" "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" ) type synchronizedAfterSuiteNode struct { diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go index 76a9679813f..d5c88931940 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go @@ -3,12 +3,13 @@ package leafnodes import ( "bytes" "encoding/json" - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" "io/ioutil" "net/http" "reflect" "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" ) type synchronizedBeforeSuiteNode struct { @@ -109,8 +110,6 @@ func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecSt time.Sleep(50 * time.Millisecond) } - - return types.SpecStateFailed, failure("Shouldn't get here!") } func (node *synchronizedBeforeSuiteNode) Passed() bool { diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/BUILD b/vendor/github.com/onsi/ginkgo/internal/remote/BUILD index bbfab6a3020..303ef9afa27 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/BUILD +++ b/vendor/github.com/onsi/ginkgo/internal/remote/BUILD @@ -19,11 +19,31 @@ go_library( deps = [ "//vendor/github.com/onsi/ginkgo/config:go_default_library", "//vendor/github.com/onsi/ginkgo/internal/spec_iterator:go_default_library", + "//vendor/github.com/onsi/ginkgo/internal/writer:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library", "//vendor/github.com/onsi/ginkgo/types:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/hpcloud/tail:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/hpcloud/tail:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/hpcloud/tail:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/hpcloud/tail:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/hpcloud/tail:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/hpcloud/tail:go_default_library", + ], "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/hpcloud/tail:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go index 522d44e3573..6b54afe0142 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -207,7 +207,7 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { case types.SpecStatePending: aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) case types.SpecStateSkipped: - aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace) case types.SpecStateTimedOut: aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) case types.SpecStatePanicked: diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go index 025eb506448..284bc62e5e8 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go @@ -3,8 +3,14 @@ package remote import ( "bytes" "encoding/json" + "fmt" "io" "net/http" + "os" + + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/types" @@ -30,14 +36,41 @@ type ForwardingReporter struct { serverHost string poster Poster outputInterceptor OutputInterceptor + debugMode bool + debugFile *os.File + nestedReporter *reporters.DefaultReporter } -func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter { - return &ForwardingReporter{ +func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter { + reporter := &ForwardingReporter{ serverHost: serverHost, poster: poster, outputInterceptor: outputInterceptor, } + + if debugFile != "" { + var err error + reporter.debugMode = true + reporter.debugFile, err = os.Create(debugFile) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + if !config.Verbose { + //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication. + ginkgoWriter.AndRedirectTo(reporter.debugFile) + } + outputInterceptor.StreamTo(reporter.debugFile) //This is not working + + stenographer := stenographer.New(false, true, reporter.debugFile) + config.Succinct = false + config.Verbose = true + config.FullTrace = true + reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer) + } + + return reporter } func (reporter *ForwardingReporter) post(path string, data interface{}) { @@ -56,6 +89,10 @@ func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigT } reporter.outputInterceptor.StartInterceptingOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteWillBegin(conf, summary) + reporter.debugFile.Sync() + } reporter.post("/SpecSuiteWillBegin", data) } @@ -63,10 +100,18 @@ func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupS output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() reporter.outputInterceptor.StartInterceptingOutput() setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.BeforeSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } reporter.post("/BeforeSuiteDidRun", setupSummary) } func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.debugMode { + reporter.nestedReporter.SpecWillRun(specSummary) + reporter.debugFile.Sync() + } reporter.post("/SpecWillRun", specSummary) } @@ -74,6 +119,10 @@ func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSumma output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() reporter.outputInterceptor.StartInterceptingOutput() specSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.SpecDidComplete(specSummary) + reporter.debugFile.Sync() + } reporter.post("/SpecDidComplete", specSummary) } @@ -81,10 +130,18 @@ func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSu output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() reporter.outputInterceptor.StartInterceptingOutput() setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.AfterSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } reporter.post("/AfterSuiteDidRun", setupSummary) } func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { reporter.outputInterceptor.StopInterceptingAndReturnOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteDidEnd(summary) + reporter.debugFile.Sync() + } reporter.post("/SpecSuiteDidEnd", summary) } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go index 093f4513c0b..5154abe87d5 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go @@ -1,5 +1,7 @@ package remote +import "os" + /* The OutputInterceptor is used by the ForwardingReporter to intercept and capture all stdin and stderr output during a test run. @@ -7,4 +9,5 @@ intercept and capture all stdin and stderr output during a test run. type OutputInterceptor interface { StartInterceptingOutput() error StopInterceptingAndReturnOutput() (string, error) + StreamTo(*os.File) } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go index 980065da575..ab6622a29c2 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -6,6 +6,8 @@ import ( "errors" "io/ioutil" "os" + + "github.com/hpcloud/tail" ) func NewOutputInterceptor() OutputInterceptor { @@ -14,7 +16,10 @@ func NewOutputInterceptor() OutputInterceptor { type outputInterceptor struct { redirectFile *os.File + streamTarget *os.File intercepting bool + tailer *tail.Tail + doneTailing chan bool } func (interceptor *outputInterceptor) StartInterceptingOutput() error { @@ -37,6 +42,18 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { syscallDup(int(interceptor.redirectFile.Fd()), 1) syscallDup(int(interceptor.redirectFile.Fd()), 2) + if interceptor.streamTarget != nil { + interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) + interceptor.doneTailing = make(chan bool) + + go func() { + for line := range interceptor.tailer.Lines { + interceptor.streamTarget.Write([]byte(line.Text + "\n")) + } + close(interceptor.doneTailing) + }() + } + return nil } @@ -51,5 +68,16 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, interceptor.intercepting = false + if interceptor.streamTarget != nil { + interceptor.tailer.Stop() + interceptor.tailer.Cleanup() + <-interceptor.doneTailing + interceptor.streamTarget.Sync() + } + return string(output), err } + +func (interceptor *outputInterceptor) StreamTo(out *os.File) { + interceptor.streamTarget = out +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go index c8f97d97f07..40c790336cc 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go @@ -4,6 +4,7 @@ package remote import ( "errors" + "os" ) func NewOutputInterceptor() OutputInterceptor { @@ -31,3 +32,5 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, return "", nil } + +func (interceptor *outputInterceptor) StreamTo(*os.File) {} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go index 297af2ebffc..367c54daff7 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -45,7 +45,7 @@ func NewServer(parallelTotal int) (*Server, error) { listener: listener, lock: &sync.Mutex{}, alives: make([]func() bool, parallelTotal), - beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}, + beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, parallelTotal: parallelTotal, }, nil } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go index 5c59728ea9b..9550d37b36b 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go @@ -8,4 +8,4 @@ import "syscall" // use the nearly identical syscall.Dup3 instead func syscallDup(oldfd int, newfd int) (err error) { return syscall.Dup3(oldfd, newfd, 0) -} \ No newline at end of file +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go index ecf9cafb664..75ef7fb78e3 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go @@ -6,4 +6,4 @@ import "golang.org/x/sys/unix" func syscallDup(oldfd int, newfd int) (err error) { return unix.Dup2(oldfd, newfd) -} \ No newline at end of file +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go index cacdd0e6496..ef625596007 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go @@ -8,4 +8,4 @@ import "syscall" func syscallDup(oldfd int, newfd int) (err error) { return syscall.Dup2(oldfd, newfd) -} \ No newline at end of file +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go index d32dec6997c..77b23a4c773 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -5,6 +5,8 @@ import ( "io" "time" + "sync" + "github.com/onsi/ginkgo/internal/containernode" "github.com/onsi/ginkgo/internal/leafnodes" "github.com/onsi/ginkgo/types" @@ -19,8 +21,11 @@ type Spec struct { state types.SpecState runTime time.Duration + startTime time.Time failure types.SpecFailure previousFailures bool + + stateMutex *sync.Mutex } func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { @@ -29,6 +34,7 @@ func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNod containers: containers, focused: subject.Flag() == types.FlagTypeFocused, announceProgress: announceProgress, + stateMutex: &sync.Mutex{}, } spec.processFlag(subject.Flag()) @@ -43,32 +49,32 @@ func (spec *Spec) processFlag(flag types.FlagType) { if flag == types.FlagTypeFocused { spec.focused = true } else if flag == types.FlagTypePending { - spec.state = types.SpecStatePending + spec.setState(types.SpecStatePending) } } func (spec *Spec) Skip() { - spec.state = types.SpecStateSkipped + spec.setState(types.SpecStateSkipped) } func (spec *Spec) Failed() bool { - return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut + return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut } func (spec *Spec) Passed() bool { - return spec.state == types.SpecStatePassed + return spec.getState() == types.SpecStatePassed } func (spec *Spec) Flaked() bool { - return spec.state == types.SpecStatePassed && spec.previousFailures + return spec.getState() == types.SpecStatePassed && spec.previousFailures } func (spec *Spec) Pending() bool { - return spec.state == types.SpecStatePending + return spec.getState() == types.SpecStatePending } func (spec *Spec) Skipped() bool { - return spec.state == types.SpecStateSkipped + return spec.getState() == types.SpecStateSkipped } func (spec *Spec) Focused() bool { @@ -91,13 +97,18 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary { componentTexts[len(spec.containers)] = spec.subject.Text() componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() + runTime := spec.runTime + if runTime == 0 && !spec.startTime.IsZero() { + runTime = time.Since(spec.startTime) + } + return &types.SpecSummary{ IsMeasurement: spec.IsMeasurement(), NumberOfSamples: spec.subject.Samples(), ComponentTexts: componentTexts, ComponentCodeLocations: componentCodeLocations, - State: spec.state, - RunTime: spec.runTime, + State: spec.getState(), + RunTime: runTime, Failure: spec.failure, Measurements: spec.measurementsReport(), SuiteID: suiteID, @@ -114,26 +125,38 @@ func (spec *Spec) ConcatenatedString() string { } func (spec *Spec) Run(writer io.Writer) { - if spec.state == types.SpecStateFailed { + if spec.getState() == types.SpecStateFailed { spec.previousFailures = true } - startTime := time.Now() + spec.startTime = time.Now() defer func() { - spec.runTime = time.Since(startTime) + spec.runTime = time.Since(spec.startTime) }() for sample := 0; sample < spec.subject.Samples(); sample++ { spec.runSample(sample, writer) - if spec.state != types.SpecStatePassed { + if spec.getState() != types.SpecStatePassed { return } } } +func (spec *Spec) getState() types.SpecState { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + return spec.state +} + +func (spec *Spec) setState(state types.SpecState) { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + spec.state = state +} + func (spec *Spec) runSample(sample int, writer io.Writer) { - spec.state = types.SpecStatePassed + spec.setState(types.SpecStatePassed) spec.failure = types.SpecFailure{} innerMostContainerIndexToUnwind := -1 @@ -143,8 +166,8 @@ func (spec *Spec) runSample(sample int, writer io.Writer) { for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { spec.announceSetupNode(writer, "AfterEach", container, afterEach) afterEachState, afterEachFailure := afterEach.Run() - if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { - spec.state = afterEachState + if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed { + spec.setState(afterEachState) spec.failure = afterEachFailure } } @@ -155,8 +178,10 @@ func (spec *Spec) runSample(sample int, writer io.Writer) { innerMostContainerIndexToUnwind = i for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) - spec.state, spec.failure = beforeEach.Run() - if spec.state != types.SpecStatePassed { + s, f := beforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { return } } @@ -165,15 +190,19 @@ func (spec *Spec) runSample(sample int, writer io.Writer) { for _, container := range spec.containers { for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) - spec.state, spec.failure = justBeforeEach.Run() - if spec.state != types.SpecStatePassed { + s, f := justBeforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { return } } } spec.announceSubject(writer, spec.subject) - spec.state, spec.failure = spec.subject.Run() + s, f := spec.subject.Run() + spec.failure = f + spec.setState(s) } func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go index 54e61ecb465..99f548bca43 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -2,7 +2,6 @@ package spec_iterator import ( "encoding/json" - "errors" "fmt" "net/http" @@ -31,7 +30,7 @@ func (s *ParallelIterator) Next() (*spec.Spec, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode)) + return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) } var counter Counter diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go index d4dd909ecf5..2c683cb8b94 100644 --- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -56,7 +56,9 @@ func (runner *SpecRunner) Run() bool { } runner.reportSuiteWillBegin() - go runner.registerForInterrupts() + signalRegistered := make(chan struct{}) + go runner.registerForInterrupts(signalRegistered) + <-signalRegistered suitePassed := runner.runBeforeSuite() @@ -213,9 +215,10 @@ func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { return runner.runningSpec.Summary(runner.suiteID), true } -func (runner *SpecRunner) registerForInterrupts() { +func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) + close(signalRegistered) <-c signal.Stop(c) diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index 698a6e56898..f311e9a0d65 100644 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -149,35 +149,35 @@ func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagT func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { if suite.running { - suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation) + suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation) } suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) } func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { if suite.running { - suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation) + suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation) } suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) } func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { if suite.running { - suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation) + suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation) } suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) } func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { if suite.running { - suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation) + suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation) } suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) } func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { if suite.running { - suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation) + suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) } suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) } diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go index 6b23b1a6487..98eca3bdd25 100644 --- a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -16,10 +16,11 @@ type WriterInterface interface { } type Writer struct { - buffer *bytes.Buffer - outWriter io.Writer - lock *sync.Mutex - stream bool + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + stream bool + redirector io.Writer } func New(outWriter io.Writer) *Writer { @@ -31,6 +32,10 @@ func New(outWriter io.Writer) *Writer { } } +func (w *Writer) AndRedirectTo(writer io.Writer) { + w.redirector = writer +} + func (w *Writer) SetStream(stream bool) { w.lock.Lock() defer w.lock.Unlock() @@ -42,6 +47,9 @@ func (w *Writer) Write(b []byte) (n int, err error) { defer w.lock.Unlock() n, err = w.buffer.Write(b) + if w.redirector != nil { + w.redirector.Write(b) + } if w.stream { return w.outWriter.Write(b) } diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go index fb82f70a6d8..ac58dd5f7a3 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -66,7 +66,7 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) case types.SpecStatePending: reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) case types.SpecStateSkipped: - reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace) case types.SpecStateTimedOut: reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) case types.SpecStatePanicked: diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 89b03513fd1..65b8964e518 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -11,6 +11,7 @@ package reporters import ( "encoding/xml" "fmt" + "math" "os" "strings" @@ -21,8 +22,10 @@ import ( type JUnitTestSuite struct { XMLName xml.Name `xml:"testsuite"` TestCases []JUnitTestCase `xml:"testcase"` + Name string `xml:"name,attr"` Tests int `xml:"tests,attr"` Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` Time float64 `xml:"time,attr"` } @@ -59,6 +62,7 @@ func NewJUnitReporter(filename string) *JUnitReporter { func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { reporter.suite = JUnitTestSuite{ + Name: summary.SuiteDescription, TestCases: []JUnitTestCase{}, } reporter.testSuiteName = summary.SuiteDescription @@ -117,8 +121,9 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun - reporter.suite.Time = summary.RunTime.Seconds() + reporter.suite.Time = math.Trunc(summary.RunTime.Seconds() * 1000 / 1000) reporter.suite.Failures = summary.NumberOfFailedSpecs + reporter.suite.Errors = 0 file, err := os.Create(reporter.filename) if err != nil { fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/BUILD b/vendor/github.com/onsi/ginkgo/reporters/stenographer/BUILD index 81c6a847152..5b943038527 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/BUILD +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/BUILD @@ -10,10 +10,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/reporters/stenographer", importpath = "github.com/onsi/ginkgo/reporters/stenographer", visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable:go_default_library", - "//vendor/github.com/onsi/ginkgo/types:go_default_library", - ], + deps = ["//vendor/github.com/onsi/ginkgo/types:go_default_library"], ) filegroup( diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go index fefd3e182f5..601c74d66eb 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -12,7 +12,6 @@ import ( "runtime" "strings" - "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" "github.com/onsi/ginkgo/types" ) @@ -62,7 +61,7 @@ type Stenographer interface { SummarizeFailures(summaries []*types.SpecSummary) } -func New(color bool, enableFlakes bool) Stenographer { +func New(color bool, enableFlakes bool, writer io.Writer) Stenographer { denoter := "•" if runtime.GOOS == "windows" { denoter = "+" @@ -72,7 +71,7 @@ func New(color bool, enableFlakes bool) Stenographer { denoter: denoter, cursorState: cursorStateTop, enableFlakes: enableFlakes, - w: colorable.NewColorableStdout(), + w: writer, } } @@ -178,7 +177,7 @@ func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSumm } s.print(0, - "%s -- %s | %s | %s | %s ", + "%s -- %s | %s | %s | %s\n", status, s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go index 657dfe726e2..36ee2a60057 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -10,10 +10,11 @@ package reporters import ( "fmt" - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" "io" "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" ) const ( diff --git a/vendor/gopkg.in/fsnotify.v1/.editorconfig b/vendor/gopkg.in/fsnotify.v1/.editorconfig new file mode 100644 index 00000000000..ba49e3c2349 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/.editorconfig @@ -0,0 +1,5 @@ +root = true + +[*] +indent_style = tab +indent_size = 4 diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/gopkg.in/fsnotify.v1/.gitignore new file mode 100644 index 00000000000..4cd0cbaf432 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/gopkg.in/fsnotify.v1/.travis.yml b/vendor/gopkg.in/fsnotify.v1/.travis.yml new file mode 100644 index 00000000000..981d1bb8132 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go + +go: + - 1.8.x + - 1.9.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +before_script: + - go get -u github.com/golang/lint/golint + +script: + - go test -v --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + +os: + - linux + - osx + +notifications: + email: false diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS new file mode 100644 index 00000000000..5ab5d41c547 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/gopkg.in/fsnotify.v1/BUILD b/vendor/gopkg.in/fsnotify.v1/BUILD new file mode 100644 index 00000000000..d10638a0788 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/BUILD @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "fen.go", + "fsnotify.go", + "inotify.go", + "inotify_poller.go", + "kqueue.go", + "open_mode_bsd.go", + "open_mode_darwin.go", + "windows.go", + ], + importmap = "k8s.io/kubernetes/vendor/gopkg.in/fsnotify.v1", + importpath = "gopkg.in/fsnotify.v1", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md new file mode 100644 index 00000000000..be4d7ea2c14 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md new file mode 100644 index 00000000000..828a60b24ba --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/gopkg.in/fsnotify.v1/LICENSE b/vendor/gopkg.in/fsnotify.v1/LICENSE new file mode 100644 index 00000000000..f21e5408009 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md new file mode 100644 index 00000000000..3993207413a --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/README.md @@ -0,0 +1,79 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +|Adapter |OS |Status | +|----------|----------|----------| +|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| +|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| +|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| +|fanotify |Linux 2.6.37+ | | +|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| +|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/gopkg.in/fsnotify.v1/fen.go b/vendor/gopkg.in/fsnotify.v1/fen.go new file mode 100644 index 00000000000..ced39cb881e --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go new file mode 100644 index 00000000000..190bf0de575 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go new file mode 100644 index 00000000000..d9fd1b88a05 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go new file mode 100644 index 00000000000..cc7db4b22ef --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go new file mode 100644 index 00000000000..86e76a3d676 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go new file mode 100644 index 00000000000..7d8de14513e --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go new file mode 100644 index 00000000000..9139e17161b --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/gopkg.in/fsnotify.v1/windows.go b/vendor/gopkg.in/fsnotify.v1/windows.go new file mode 100644 index 00000000000..09436f31d82 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/gopkg.in/tomb.v1/BUILD b/vendor/gopkg.in/tomb.v1/BUILD new file mode 100644 index 00000000000..25bf1d8bf06 --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["tomb.go"], + importmap = "k8s.io/kubernetes/vendor/gopkg.in/tomb.v1", + importpath = "gopkg.in/tomb.v1", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/gopkg.in/tomb.v1/LICENSE b/vendor/gopkg.in/tomb.v1/LICENSE new file mode 100644 index 00000000000..a4249bb31dd --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/LICENSE @@ -0,0 +1,29 @@ +tomb - support for clean goroutine termination in Go. + +Copyright (c) 2010-2011 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/tomb.v1/README.md b/vendor/gopkg.in/tomb.v1/README.md new file mode 100644 index 00000000000..3ae8788e813 --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/README.md @@ -0,0 +1,4 @@ +Installation and usage +---------------------- + +See [gopkg.in/tomb.v1](https://gopkg.in/tomb.v1) for documentation and usage details. diff --git a/vendor/gopkg.in/tomb.v1/tomb.go b/vendor/gopkg.in/tomb.v1/tomb.go new file mode 100644 index 00000000000..9aec56d821d --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/tomb.go @@ -0,0 +1,176 @@ +// Copyright (c) 2011 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The tomb package offers a conventional API for clean goroutine termination. +// +// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, +// and the reason for its death. +// +// The zero value of a Tomb assumes that a goroutine is about to be +// created or already alive. Once Kill or Killf is called with an +// argument that informs the reason for death, the goroutine is in +// a dying state and is expected to terminate soon. Right before the +// goroutine function or method returns, Done must be called to inform +// that the goroutine is indeed dead and about to stop running. +// +// A Tomb exposes Dying and Dead channels. These channels are closed +// when the Tomb state changes in the respective way. They enable +// explicit blocking until the state changes, and also to selectively +// unblock select statements accordingly. +// +// When the tomb state changes to dying and there's still logic going +// on within the goroutine, nested functions and methods may choose to +// return ErrDying as their error value, as this error won't alter the +// tomb state if provided to the Kill method. This is a convenient way to +// follow standard Go practices in the context of a dying tomb. +// +// For background and a detailed example, see the following blog post: +// +// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control +// +// For a more complex code snippet demonstrating the use of multiple +// goroutines with a single Tomb, see: +// +// http://play.golang.org/p/Xh7qWsDPZP +// +package tomb + +import ( + "errors" + "fmt" + "sync" +) + +// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, +// and the reason for its death. +// +// See the package documentation for details. +type Tomb struct { + m sync.Mutex + dying chan struct{} + dead chan struct{} + reason error +} + +var ( + ErrStillAlive = errors.New("tomb: still alive") + ErrDying = errors.New("tomb: dying") +) + +func (t *Tomb) init() { + t.m.Lock() + if t.dead == nil { + t.dead = make(chan struct{}) + t.dying = make(chan struct{}) + t.reason = ErrStillAlive + } + t.m.Unlock() +} + +// Dead returns the channel that can be used to wait +// until t.Done has been called. +func (t *Tomb) Dead() <-chan struct{} { + t.init() + return t.dead +} + +// Dying returns the channel that can be used to wait +// until t.Kill or t.Done has been called. +func (t *Tomb) Dying() <-chan struct{} { + t.init() + return t.dying +} + +// Wait blocks until the goroutine is in a dead state and returns the +// reason for its death. +func (t *Tomb) Wait() error { + t.init() + <-t.dead + t.m.Lock() + reason := t.reason + t.m.Unlock() + return reason +} + +// Done flags the goroutine as dead, and should be called a single time +// right before the goroutine function or method returns. +// If the goroutine was not already in a dying state before Done is +// called, it will be flagged as dying and dead at once with no +// error. +func (t *Tomb) Done() { + t.Kill(nil) + close(t.dead) +} + +// Kill flags the goroutine as dying for the given reason. +// Kill may be called multiple times, but only the first +// non-nil error is recorded as the reason for termination. +// +// If reason is ErrDying, the previous reason isn't replaced +// even if it is nil. It's a runtime error to call Kill with +// ErrDying if t is not in a dying state. +func (t *Tomb) Kill(reason error) { + t.init() + t.m.Lock() + defer t.m.Unlock() + if reason == ErrDying { + if t.reason == ErrStillAlive { + panic("tomb: Kill with ErrDying while still alive") + } + return + } + if t.reason == nil || t.reason == ErrStillAlive { + t.reason = reason + } + // If the receive on t.dying succeeds, then + // it can only be because we have already closed it. + // If it blocks, then we know that it needs to be closed. + select { + case <-t.dying: + default: + close(t.dying) + } +} + +// Killf works like Kill, but builds the reason providing the received +// arguments to fmt.Errorf. The generated error is also returned. +func (t *Tomb) Killf(f string, a ...interface{}) error { + err := fmt.Errorf(f, a...) + t.Kill(err) + return err +} + +// Err returns the reason for the goroutine death provided via Kill +// or Killf, or ErrStillAlive when the goroutine is still alive. +func (t *Tomb) Err() (reason error) { + t.init() + t.m.Lock() + reason = t.reason + t.m.Unlock() + return +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ec7c97af8f..9039db03063 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -533,6 +533,12 @@ github.com/hashicorp/hcl/json/token github.com/heketi/heketi/client/api/go-client github.com/heketi/heketi/pkg/glusterfs/api github.com/heketi/heketi/pkg/utils +# github.com/hpcloud/tail v1.0.0 => github.com/hpcloud/tail v1.0.0 +github.com/hpcloud/tail +github.com/hpcloud/tail/ratelimiter +github.com/hpcloud/tail/util +github.com/hpcloud/tail/watch +github.com/hpcloud/tail/winfile # github.com/imdario/mergo v0.3.5 => github.com/imdario/mergo v0.3.5 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 => github.com/inconshreveable/mousetrap v1.0.0 @@ -610,7 +616,7 @@ github.com/munnerz/goautoneg github.com/mvdan/xurls # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f => github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo v1.6.0 => github.com/onsi/ginkgo v0.0.0-20170318221715-67b9df7f55fe +# github.com/onsi/ginkgo v1.6.0 => github.com/onsi/ginkgo v1.6.0 github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/ginkgo @@ -994,6 +1000,8 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/transport +# gopkg.in/fsnotify.v1 v1.4.7 => gopkg.in/fsnotify.v1 v1.4.7 +gopkg.in/fsnotify.v1 # gopkg.in/gcfg.v1 v1.2.0 => gopkg.in/gcfg.v1 v1.2.0 gopkg.in/gcfg.v1 gopkg.in/gcfg.v1/scanner @@ -1008,6 +1016,8 @@ gopkg.in/square/go-jose.v2 gopkg.in/square/go-jose.v2/cipher gopkg.in/square/go-jose.v2/json gopkg.in/square/go-jose.v2/jwt +# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 => gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 +gopkg.in/tomb.v1 # gopkg.in/warnings.v0 v0.1.1 => gopkg.in/warnings.v0 v0.1.1 gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.2.1 => gopkg.in/yaml.v2 v2.2.1 From d4ee919fb588d18864a5c40ee43aa44657536f65 Mon Sep 17 00:00:00 2001 From: Stuart McLaren Date: Tue, 16 Apr 2019 18:29:22 +0100 Subject: [PATCH 037/209] Fix kubectl version --client=true Getting the client version fails if the kubeconfig is invalid: $ kubectl version --client=true Error in configuration: * unable to read client-cert .../client.crt: no such file or directory * unable to read client-key .../client.key: no such file or directory * unable to read certificate-authority .../ca.crt: no such file or directory Update to match behaviour on v1.10.13 and earlier: $ kubectl version --client=true Client Version: version.Info{Major:"1", ...} --- pkg/kubectl/cmd/version/version.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/kubectl/cmd/version/version.go b/pkg/kubectl/cmd/version/version.go index 6827da212bf..110742be001 100644 --- a/pkg/kubectl/cmd/version/version.go +++ b/pkg/kubectl/cmd/version/version.go @@ -88,6 +88,9 @@ func NewCmdVersion(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *co // Complete completes all the required options func (o *Options) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error + if o.ClientOnly { + return nil + } o.discoveryClient, err = f.ToDiscoveryClient() // if we had an empty rest.Config, continue and just print out client information. // if we had an error other than being unable to build a rest.Config, fail. From abfc5bbbf7ef4a84edc2d3efb42c3c17debd22b4 Mon Sep 17 00:00:00 2001 From: Sean Sullivan Date: Tue, 16 Apr 2019 12:55:30 -0700 Subject: [PATCH 038/209] Rename TablePrinter interface to TableGenerator --- pkg/printers/humanreadable.go | 12 ++--- pkg/printers/internalversion/printers_test.go | 44 +++++++++---------- pkg/printers/storage/storage.go | 4 +- .../controllerrevision/storage/storage.go | 2 +- .../apps/daemonset/storage/storage.go | 2 +- .../apps/deployment/storage/storage.go | 2 +- .../apps/replicaset/storage/storage.go | 2 +- .../apps/statefulset/storage/storage.go | 2 +- .../storage/storage.go | 2 +- pkg/registry/batch/cronjob/storage/storage.go | 2 +- pkg/registry/batch/job/storage/storage.go | 2 +- .../certificates/storage/storage.go | 2 +- .../coordination/lease/storage/storage.go | 2 +- .../core/configmap/storage/storage.go | 2 +- pkg/registry/core/endpoint/storage/storage.go | 2 +- pkg/registry/core/event/storage/storage.go | 2 +- .../core/namespace/storage/storage.go | 2 +- pkg/registry/core/node/storage/storage.go | 2 +- .../core/persistentvolume/storage/storage.go | 2 +- .../persistentvolumeclaim/storage/storage.go | 2 +- pkg/registry/core/pod/storage/storage.go | 2 +- .../core/podtemplate/storage/storage.go | 2 +- .../replicationcontroller/storage/storage.go | 2 +- pkg/registry/core/secret/storage/storage.go | 2 +- pkg/registry/core/service/storage/storage.go | 2 +- .../core/serviceaccount/storage/storage.go | 2 +- .../networking/ingress/storage/storage.go | 2 +- .../networkpolicy/storage/storage.go | 2 +- .../poddisruptionbudget/storage/storage.go | 2 +- .../podsecuritypolicy/storage/storage.go | 2 +- .../clusterrolebinding/storage/storage.go | 2 +- .../rbac/rolebinding/storage/storage.go | 2 +- .../priorityclass/storage/storage.go | 2 +- .../storage/storageclass/storage/storage.go | 2 +- .../volumeattachment/storage/storage.go | 2 +- 35 files changed, 62 insertions(+), 62 deletions(-) diff --git a/pkg/printers/humanreadable.go b/pkg/printers/humanreadable.go index aba9660e81c..1837ce2dae9 100644 --- a/pkg/printers/humanreadable.go +++ b/pkg/printers/humanreadable.go @@ -34,8 +34,8 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) -type TablePrinter interface { - PrintTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) +type TableGenerator interface { + GenerateTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) } type PrintHandler interface { @@ -75,8 +75,8 @@ func NewHumanReadablePrinter(options PrintOptions) *HumanReadablePrinter { return printer } -// NewTablePrinter creates a HumanReadablePrinter suitable for calling PrintTable(). -func NewTablePrinter() *HumanReadablePrinter { +// NewTableGenerator creates a HumanReadablePrinter suitable for calling GenerateTable(). +func NewTableGenerator() *HumanReadablePrinter { return &HumanReadablePrinter{ handlerMap: make(map[reflect.Type]*handlerEntry), } @@ -445,10 +445,10 @@ func DecorateTable(table *metav1beta1.Table, options PrintOptions) error { return nil } -// PrintTable returns a table for the provided object, using the printer registered for that type. It returns +// GenerateTable returns a table for the provided object, using the printer registered for that type. It returns // a table that includes all of the information requested by options, but will not remove rows or columns. The // caller is responsible for applying rules related to filtering rows or columns. -func (h *HumanReadablePrinter) PrintTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) { +func (h *HumanReadablePrinter) GenerateTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) { t := reflect.TypeOf(obj) handler, ok := h.handlerMap[t] if !ok { diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 03d2d8ed39e..c3b85567e43 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -1094,7 +1094,7 @@ func TestPrintHunmanReadableIngressWithColumnLabels(t *testing.T) { }, } buff := bytes.NewBuffer([]byte{}) - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&ingress, printers.PrintOptions{ColumnLabels: []string{"app_name"}}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&ingress, printers.PrintOptions{ColumnLabels: []string{"app_name"}}) if err != nil { t.Fatal(err) } @@ -1228,7 +1228,7 @@ func TestPrintHumanReadableService(t *testing.T) { for _, svc := range tests { for _, wide := range []bool{false, true} { buff := bytes.NewBuffer([]byte{}) - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&svc, printers.PrintOptions{Wide: wide}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&svc, printers.PrintOptions{Wide: wide}) if err != nil { t.Fatal(err) } @@ -1507,7 +1507,7 @@ func TestPrintPodTable(t *testing.T) { } for i, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(test.obj, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(test.obj, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -1865,7 +1865,7 @@ func TestPrintNonTerminatedPod(t *testing.T) { } for i, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.pod, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.pod, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -1927,7 +1927,7 @@ func TestPrintPodWithLabels(t *testing.T) { } for i, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.pod, printers.PrintOptions{ColumnLabels: test.labelColumns}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.pod, printers.PrintOptions{ColumnLabels: test.labelColumns}) if err != nil { t.Fatal(err) } @@ -2036,7 +2036,7 @@ func TestPrintDeployment(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.deployment, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.deployment, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -2048,7 +2048,7 @@ func TestPrintDeployment(t *testing.T) { t.Fatalf("Expected: %s, got: %s", test.expect, buf.String()) } buf.Reset() - table, err = printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.deployment, printers.PrintOptions{Wide: true}) + table, err = printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.deployment, printers.PrintOptions{Wide: true}) verifyTable(t, table) // print deployment with '-o wide' option if err := printers.PrintTable(table, buf, printers.PrintOptions{Wide: true, NoHeaders: true}); err != nil { @@ -2091,7 +2091,7 @@ func TestPrintDaemonSet(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.ds, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.ds, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -2180,7 +2180,7 @@ func TestPrintJob(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.job, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.job, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -2787,7 +2787,7 @@ func TestPrintHPA(t *testing.T) { buff := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.hpa, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.hpa, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -2850,7 +2850,7 @@ func TestPrintPodShowLabels(t *testing.T) { } for i, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.pod, printers.PrintOptions{ShowLabels: test.showLabels}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.pod, printers.PrintOptions{ShowLabels: test.showLabels}) if err != nil { t.Fatal(err) } @@ -3017,7 +3017,7 @@ func TestPrintService(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.service, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.service, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3075,7 +3075,7 @@ func TestPrintPodDisruptionBudget(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.pdb, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.pdb, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3156,7 +3156,7 @@ func TestPrintControllerRevision(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.history, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.history, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3217,7 +3217,7 @@ func TestPrintReplicaSet(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.replicaSet, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.replicaSet, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3230,7 +3230,7 @@ func TestPrintReplicaSet(t *testing.T) { } buf.Reset() - table, err = printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.replicaSet, printers.PrintOptions{Wide: true}) + table, err = printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.replicaSet, printers.PrintOptions{Wide: true}) if err != nil { t.Fatal(err) } @@ -3329,7 +3329,7 @@ func TestPrintPersistentVolumeClaim(t *testing.T) { } buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.pvc, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.pvc, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3402,7 +3402,7 @@ func TestPrintCronJob(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.cronjob, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.cronjob, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3446,7 +3446,7 @@ func TestPrintStorageClass(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.sc, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.sc, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3496,7 +3496,7 @@ func TestPrintLease(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.sc, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.sc, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3540,7 +3540,7 @@ func TestPrintPriorityClass(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.pc, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.pc, printers.PrintOptions{}) if err != nil { t.Fatal(err) } @@ -3584,7 +3584,7 @@ func TestPrintRuntimeClass(t *testing.T) { buf := bytes.NewBuffer([]byte{}) for _, test := range tests { - table, err := printers.NewTablePrinter().With(AddHandlers).PrintTable(&test.rc, printers.PrintOptions{}) + table, err := printers.NewTableGenerator().With(AddHandlers).GenerateTable(&test.rc, printers.PrintOptions{}) if err != nil { t.Fatal(err) } diff --git a/pkg/printers/storage/storage.go b/pkg/printers/storage/storage.go index c1bd30a94cf..9dcc71e5d38 100644 --- a/pkg/printers/storage/storage.go +++ b/pkg/printers/storage/storage.go @@ -26,7 +26,7 @@ import ( ) type TableConvertor struct { - printers.TablePrinter + printers.TableGenerator } func (c TableConvertor) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*metav1beta1.Table, error) { @@ -41,5 +41,5 @@ func (c TableConvertor) ConvertToTable(ctx context.Context, obj runtime.Object, return nil, fmt.Errorf("unrecognized type %T for table options, can't display tabular output", tableOptions) } } - return c.TablePrinter.PrintTable(obj, printers.PrintOptions{Wide: true, NoHeaders: noHeaders}) + return c.TableGenerator.GenerateTable(obj, printers.PrintOptions{Wide: true, NoHeaders: noHeaders}) } diff --git a/pkg/registry/apps/controllerrevision/storage/storage.go b/pkg/registry/apps/controllerrevision/storage/storage.go index 86f81fd850a..92085b59387 100644 --- a/pkg/registry/apps/controllerrevision/storage/storage.go +++ b/pkg/registry/apps/controllerrevision/storage/storage.go @@ -43,7 +43,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: controllerrevision.Strategy, DeleteStrategy: controllerrevision.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/apps/daemonset/storage/storage.go b/pkg/registry/apps/daemonset/storage/storage.go index 00f771c8121..16f85488182 100644 --- a/pkg/registry/apps/daemonset/storage/storage.go +++ b/pkg/registry/apps/daemonset/storage/storage.go @@ -48,7 +48,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: daemonset.Strategy, DeleteStrategy: daemonset.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/apps/deployment/storage/storage.go b/pkg/registry/apps/deployment/storage/storage.go index bf456b74ef4..3b63bb0a102 100644 --- a/pkg/registry/apps/deployment/storage/storage.go +++ b/pkg/registry/apps/deployment/storage/storage.go @@ -80,7 +80,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Rollbac UpdateStrategy: deployment.Strategy, DeleteStrategy: deployment.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/apps/replicaset/storage/storage.go b/pkg/registry/apps/replicaset/storage/storage.go index d8bce92411d..a2a4a4b6ccd 100644 --- a/pkg/registry/apps/replicaset/storage/storage.go +++ b/pkg/registry/apps/replicaset/storage/storage.go @@ -76,7 +76,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: replicaset.Strategy, DeleteStrategy: replicaset.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: replicaset.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/apps/statefulset/storage/storage.go b/pkg/registry/apps/statefulset/storage/storage.go index 51d595f75fd..0aeb67c9c7a 100644 --- a/pkg/registry/apps/statefulset/storage/storage.go +++ b/pkg/registry/apps/statefulset/storage/storage.go @@ -72,7 +72,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: statefulset.Strategy, DeleteStrategy: statefulset.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go b/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go index b9e7dcad42e..2da0772cfee 100644 --- a/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go +++ b/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go @@ -46,7 +46,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: horizontalpodautoscaler.Strategy, DeleteStrategy: horizontalpodautoscaler.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/batch/cronjob/storage/storage.go b/pkg/registry/batch/cronjob/storage/storage.go index c71092db1b5..3bfacfb777e 100644 --- a/pkg/registry/batch/cronjob/storage/storage.go +++ b/pkg/registry/batch/cronjob/storage/storage.go @@ -47,7 +47,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: cronjob.Strategy, DeleteStrategy: cronjob.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/batch/job/storage/storage.go b/pkg/registry/batch/job/storage/storage.go index 77d853deb75..a874774d420 100644 --- a/pkg/registry/batch/job/storage/storage.go +++ b/pkg/registry/batch/job/storage/storage.go @@ -63,7 +63,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: job.Strategy, DeleteStrategy: job.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: job.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/certificates/certificates/storage/storage.go b/pkg/registry/certificates/certificates/storage/storage.go index 0b9fd6ea531..953bbd1390c 100644 --- a/pkg/registry/certificates/certificates/storage/storage.go +++ b/pkg/registry/certificates/certificates/storage/storage.go @@ -48,7 +48,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Approva DeleteStrategy: csrregistry.Strategy, ExportStrategy: csrregistry.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/coordination/lease/storage/storage.go b/pkg/registry/coordination/lease/storage/storage.go index 37d66095e9f..71589e2b6c3 100644 --- a/pkg/registry/coordination/lease/storage/storage.go +++ b/pkg/registry/coordination/lease/storage/storage.go @@ -43,7 +43,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: lease.Strategy, DeleteStrategy: lease.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/configmap/storage/storage.go b/pkg/registry/core/configmap/storage/storage.go index 124fca6e59d..31cdd1b0e09 100644 --- a/pkg/registry/core/configmap/storage/storage.go +++ b/pkg/registry/core/configmap/storage/storage.go @@ -44,7 +44,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: configmap.Strategy, DeleteStrategy: configmap.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/endpoint/storage/storage.go b/pkg/registry/core/endpoint/storage/storage.go index 902b7df0b55..315b2aa73ee 100644 --- a/pkg/registry/core/endpoint/storage/storage.go +++ b/pkg/registry/core/endpoint/storage/storage.go @@ -43,7 +43,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: endpoint.Strategy, DeleteStrategy: endpoint.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/event/storage/storage.go b/pkg/registry/core/event/storage/storage.go index b36b6c44726..356ed35d334 100644 --- a/pkg/registry/core/event/storage/storage.go +++ b/pkg/registry/core/event/storage/storage.go @@ -53,7 +53,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter, ttl uint64) *REST { UpdateStrategy: event.Strategy, DeleteStrategy: event.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: opts, AttrFunc: event.GetAttrs} // Pass in opts to use UndecoratedStorage if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/namespace/storage/storage.go b/pkg/registry/core/namespace/storage/storage.go index b9d0558ab7f..32ed5d776a0 100644 --- a/pkg/registry/core/namespace/storage/storage.go +++ b/pkg/registry/core/namespace/storage/storage.go @@ -69,7 +69,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Finaliz DeleteStrategy: namespace.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: namespace.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/node/storage/storage.go b/pkg/registry/core/node/storage/storage.go index 78a25f8d752..bf88a8d61d1 100644 --- a/pkg/registry/core/node/storage/storage.go +++ b/pkg/registry/core/node/storage/storage.go @@ -87,7 +87,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, kubeletClientConfig client DeleteStrategy: node.Strategy, ExportStrategy: node.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: node.GetAttrs, TriggerFunc: node.NodeNameTriggerFunc} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/persistentvolume/storage/storage.go b/pkg/registry/core/persistentvolume/storage/storage.go index f1339b3dd13..14b4640c66a 100644 --- a/pkg/registry/core/persistentvolume/storage/storage.go +++ b/pkg/registry/core/persistentvolume/storage/storage.go @@ -48,7 +48,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { DeleteStrategy: persistentvolume.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: persistentvolume.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/persistentvolumeclaim/storage/storage.go b/pkg/registry/core/persistentvolumeclaim/storage/storage.go index 2eeaa3a3651..7e0481941cf 100644 --- a/pkg/registry/core/persistentvolumeclaim/storage/storage.go +++ b/pkg/registry/core/persistentvolumeclaim/storage/storage.go @@ -48,7 +48,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { DeleteStrategy: persistentvolumeclaim.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: persistentvolumeclaim.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/pod/storage/storage.go b/pkg/registry/core/pod/storage/storage.go index 898e1c7d076..1a1fa9bbeb5 100644 --- a/pkg/registry/core/pod/storage/storage.go +++ b/pkg/registry/core/pod/storage/storage.go @@ -76,7 +76,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, k client.ConnectionInfoGet DeleteStrategy: pod.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: pod.GetAttrs, TriggerFunc: pod.NodeNameTriggerFunc} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/podtemplate/storage/storage.go b/pkg/registry/core/podtemplate/storage/storage.go index 1767186b999..73447cdc804 100644 --- a/pkg/registry/core/podtemplate/storage/storage.go +++ b/pkg/registry/core/podtemplate/storage/storage.go @@ -45,7 +45,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/replicationcontroller/storage/storage.go b/pkg/registry/core/replicationcontroller/storage/storage.go index 0422a93367e..85a85e02314 100644 --- a/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/pkg/registry/core/replicationcontroller/storage/storage.go @@ -74,7 +74,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: replicationcontroller.Strategy, DeleteStrategy: replicationcontroller.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: replicationcontroller.GetAttrs} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/secret/storage/storage.go b/pkg/registry/core/secret/storage/storage.go index d6c91ab48e7..fb852682fd3 100644 --- a/pkg/registry/core/secret/storage/storage.go +++ b/pkg/registry/core/secret/storage/storage.go @@ -44,7 +44,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { DeleteStrategy: secret.Strategy, ExportStrategy: secret.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: secret.GetAttrs, TriggerFunc: secret.SecretNameTriggerFunc} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/service/storage/storage.go b/pkg/registry/core/service/storage/storage.go index 7ebe95c15b0..df89d04fd92 100644 --- a/pkg/registry/core/service/storage/storage.go +++ b/pkg/registry/core/service/storage/storage.go @@ -48,7 +48,7 @@ func NewGenericREST(optsGetter generic.RESTOptionsGetter) (*GenericREST, *Status DeleteStrategy: service.Strategy, ExportStrategy: service.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/core/serviceaccount/storage/storage.go b/pkg/registry/core/serviceaccount/storage/storage.go index 898588f5ee1..c9d2832e97f 100644 --- a/pkg/registry/core/serviceaccount/storage/storage.go +++ b/pkg/registry/core/serviceaccount/storage/storage.go @@ -49,7 +49,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter, issuer token.TokenGenerator, DeleteStrategy: serviceaccount.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/networking/ingress/storage/storage.go b/pkg/registry/networking/ingress/storage/storage.go index bf19d3d0f0a..350eba0b974 100644 --- a/pkg/registry/networking/ingress/storage/storage.go +++ b/pkg/registry/networking/ingress/storage/storage.go @@ -48,7 +48,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: ingress.Strategy, DeleteStrategy: ingress.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/networking/networkpolicy/storage/storage.go b/pkg/registry/networking/networkpolicy/storage/storage.go index c2e8a159da1..ba3e87aa4b0 100644 --- a/pkg/registry/networking/networkpolicy/storage/storage.go +++ b/pkg/registry/networking/networkpolicy/storage/storage.go @@ -44,7 +44,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: networkpolicy.Strategy, DeleteStrategy: networkpolicy.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/policy/poddisruptionbudget/storage/storage.go b/pkg/registry/policy/poddisruptionbudget/storage/storage.go index 68bfc32714a..cf4d9873b49 100644 --- a/pkg/registry/policy/poddisruptionbudget/storage/storage.go +++ b/pkg/registry/policy/poddisruptionbudget/storage/storage.go @@ -47,7 +47,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { UpdateStrategy: poddisruptionbudget.Strategy, DeleteStrategy: poddisruptionbudget.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/policy/podsecuritypolicy/storage/storage.go b/pkg/registry/policy/podsecuritypolicy/storage/storage.go index 5e8e5c69639..dfb132e5027 100644 --- a/pkg/registry/policy/podsecuritypolicy/storage/storage.go +++ b/pkg/registry/policy/podsecuritypolicy/storage/storage.go @@ -44,7 +44,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { DeleteStrategy: podsecuritypolicy.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/rbac/clusterrolebinding/storage/storage.go b/pkg/registry/rbac/clusterrolebinding/storage/storage.go index 7567d779795..709b50681a5 100644 --- a/pkg/registry/rbac/clusterrolebinding/storage/storage.go +++ b/pkg/registry/rbac/clusterrolebinding/storage/storage.go @@ -43,7 +43,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: clusterrolebinding.Strategy, DeleteStrategy: clusterrolebinding.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/rbac/rolebinding/storage/storage.go b/pkg/registry/rbac/rolebinding/storage/storage.go index 135a33143ca..cce71ccb7ad 100644 --- a/pkg/registry/rbac/rolebinding/storage/storage.go +++ b/pkg/registry/rbac/rolebinding/storage/storage.go @@ -43,7 +43,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: rolebinding.Strategy, DeleteStrategy: rolebinding.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/scheduling/priorityclass/storage/storage.go b/pkg/registry/scheduling/priorityclass/storage/storage.go index 2c6d7c248b8..dfdd67946bd 100644 --- a/pkg/registry/scheduling/priorityclass/storage/storage.go +++ b/pkg/registry/scheduling/priorityclass/storage/storage.go @@ -49,7 +49,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { UpdateStrategy: priorityclass.Strategy, DeleteStrategy: priorityclass.Strategy, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/storage/storageclass/storage/storage.go b/pkg/registry/storage/storageclass/storage/storage.go index bfdc6fa1e4e..11af2f3443d 100644 --- a/pkg/registry/storage/storageclass/storage/storage.go +++ b/pkg/registry/storage/storageclass/storage/storage.go @@ -44,7 +44,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { DeleteStrategy: storageclass.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { diff --git a/pkg/registry/storage/volumeattachment/storage/storage.go b/pkg/registry/storage/volumeattachment/storage/storage.go index 4f88077d1a0..0a4643d7f89 100644 --- a/pkg/registry/storage/volumeattachment/storage/storage.go +++ b/pkg/registry/storage/volumeattachment/storage/storage.go @@ -54,7 +54,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter) *VolumeAttachmentStorage { DeleteStrategy: volumeattachment.Strategy, ReturnDeletedObject: true, - TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { From 5168e6277fa89267ef8b89ff34d54a6caa5a8ea6 Mon Sep 17 00:00:00 2001 From: Yongkun Gui Date: Fri, 5 Apr 2019 14:52:32 -0700 Subject: [PATCH 039/209] Add e2e for connection reset issue Regression test for #74839 --- test/e2e/network/kube_proxy.go | 113 ++++++++++++++++++++++++++++++++- 1 file changed, 112 insertions(+), 1 deletion(-) diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index 9cf7acf0399..d222ea15e07 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -215,4 +215,115 @@ var _ = SIGDescribe("Network", func() { Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should( BeNumerically("<", (epsilonSeconds))) }) + + // Regression test for #74839, where: + // Packets considered INVALID by conntrack are now dropped. In particular, this fixes + // a problem where spurious retransmits in a long-running TCP connection to a service + // IP could result in the connection being closed with the error "Connection reset by + // peer" + It("should resolve connrection reset issue #74839 [Slow]", func() { + serverLabel := map[string]string{ + "app": "boom-server", + } + clientLabel := map[string]string{ + "app": "client", + } + + serverPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "boom-server", + Labels: serverLabel, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "boom-server", + Image: "gcr.io/kubernetes-e2e-test-images/regression-issue-74839-amd64:1.0", + Ports: []v1.ContainerPort{ + { + ContainerPort: 9000, // Default port exposed by boom-server + }, + }, + }, + }, + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: clientLabel, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + } + _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod) + framework.ExpectNoError(err) + + By("Server pod created") + + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "boom-server", + }, + Spec: v1.ServiceSpec{ + Selector: serverLabel, + Ports: []v1.ServicePort{ + { + Protocol: v1.ProtocolTCP, + Port: 9000, + }, + }, + }, + } + _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc) + framework.ExpectNoError(err) + + By("Server service created") + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "startup-script", + Labels: clientLabel, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "startup-script", + Image: "gcr.io/google-containers/startup-script:v1", + Command: []string{ + "bash", "-c", "while true; do sleep 2; nc boom-server 9000& done", + }, + }, + }, + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: serverLabel, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod) + framework.ExpectNoError(err) + + By("Client pod created") + + for i := 0; i < 20; i++ { + time.Sleep(3 * time.Second) + resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(BeNil()) + } + }) }) From 4a141529129e1f4110e7def30d4ace6bced25d19 Mon Sep 17 00:00:00 2001 From: leigh capili Date: Tue, 16 Apr 2019 15:46:49 -0600 Subject: [PATCH 040/209] Make Serializer.options private and immutable and improve godoc --- .../pkg/runtime/serializer/json/json.go | 54 ++++++++++--------- .../pkg/runtime/serializer/json/json_test.go | 4 +- 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 41972968467..69ada8ecf9c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -37,7 +37,7 @@ import ( // is not nil, the object has the group, version, and kind fields set. // Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, &SerializerOptions{false, pretty, false}) + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer @@ -45,40 +45,46 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim // matches JSON, and will error if constructs are used that do not serialize to JSON. // Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, &SerializerOptions{true, false, false}) + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) } // NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML -// form. If typer is not nil, the object has the group, version, and kind fields set. -func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, serializerOptions *SerializerOptions) *Serializer { +// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer +// and are immutable. +func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - SerializerOptions: serializerOptions, + meta: meta, + creater: creater, + typer: typer, + options: options, } } -// SerializerOptions holds the options which are used to creating a JSON/YAML serializer. -// For example: -// (1) we can creates a JSON serializer once we set `Yaml` to `false`. -// (2) we can creates a YAML serializer once we set `Yaml` to `true`. This serializer supports only the subset of YAML that -// matches JSON, and will error if constructs are used that do not serialize to JSON. -// Please note that `Pretty` is silently ignored when `Yaml` is `true`. -// (3) we can creates a strict JSON/YAML serializer that can also return errors of type strictDecodingError, once we set -// `Strict` to `true`. And note that this serializer is not as performant as the non-strict variant, and should not be -// used in fast paths. +// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. +// example: +// (1) To configure a JSON serializer, set `Yaml` to `false`. +// (2) To configure a YAML serializer, set `Yaml` to `true`. +// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. type SerializerOptions struct { - Yaml bool + // Yaml: configures the Serializer to work with JSON(false) or YAML(true). + // When `Yaml` is enabled, this serializer only supports the subset of YAML that + // matches JSON, and will error if constructs are used that do not serialize to JSON. + Yaml bool + + // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. + // This option is silently ignored when `Yaml` is `true`. Pretty bool + + // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. + // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. Strict bool } type Serializer struct { meta MetaFactory + options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper - *SerializerOptions } // Serializer implements Serializer @@ -193,7 +199,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } data := originalData - if s.Yaml { + if s.options.Yaml { altered, err := yaml.YAMLToJSON(data) if err != nil { return nil, nil, err @@ -251,7 +257,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } // If the deserializer is non-strict, return successfully here. - if !s.Strict { + if !s.options.Strict { return obj, actual, nil } @@ -280,7 +286,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.Yaml { + if s.options.Yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err @@ -293,7 +299,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return err } - if s.Pretty { + if s.options.Pretty { data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err @@ -307,7 +313,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.Yaml { + if s.options.Yaml { // we could potentially look for '---' return false, true, nil } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go index 76e01c60bae..3984cf143f7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go @@ -418,9 +418,9 @@ func TestDecode(t *testing.T) { for i, test := range testCases { var s runtime.Serializer if test.yaml { - s = json.NewSerializerWithOptions(json.DefaultMetaFactory, test.creater, test.typer, &json.SerializerOptions{Yaml: test.yaml, Pretty: false, Strict: test.strict}) + s = json.NewSerializerWithOptions(json.DefaultMetaFactory, test.creater, test.typer, json.SerializerOptions{Yaml: test.yaml, Pretty: false, Strict: test.strict}) } else { - s = json.NewSerializerWithOptions(json.DefaultMetaFactory, test.creater, test.typer, &json.SerializerOptions{Yaml: test.yaml, Pretty: test.pretty, Strict: test.strict}) + s = json.NewSerializerWithOptions(json.DefaultMetaFactory, test.creater, test.typer, json.SerializerOptions{Yaml: test.yaml, Pretty: test.pretty, Strict: test.strict}) } obj, gvk, err := s.Decode([]byte(test.data), test.defaultGVK, test.into) From c76bb083577ff4683184d96adadf97eaa04922f3 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Mon, 15 Apr 2019 18:26:30 +0000 Subject: [PATCH 041/209] Enable conformance requirement check --- hack/verify-conformance-requirements.sh | 51 +++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100755 hack/verify-conformance-requirements.sh diff --git a/hack/verify-conformance-requirements.sh b/hack/verify-conformance-requirements.sh new file mode 100755 index 00000000000..58accea5bff --- /dev/null +++ b/hack/verify-conformance-requirements.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +source "${KUBE_ROOT}/hack/lib/util.sh" + +kube::golang::verify_go_version + +cd "${KUBE_ROOT}" + +errors=() +# Check conformance tests follow the requirements as https://git.k8s.io/community/contributors/devel/sig-architecture/conformance-tests.md#conformance-test-requirements +if ! failedLint=$(go run "${KUBE_ROOT}"/hack/conformance/check_conformance_test_requirements.go "${KUBE_ROOT}"/test/e2e/) +then + errors+=( "${failedLint}" ) +fi + +# Check to be sure all the packages that should pass lint are. +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All e2e test source files have been linted for conformance requirements.' +else + { + echo "Errors from lint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please review the above warnings.' + echo + } >&2 + exit 1 +fi + From bb5b4ad0b23dc0d393391619fe4ffe6be0781ff4 Mon Sep 17 00:00:00 2001 From: Kevin Taylor Date: Sat, 13 Apr 2019 17:02:52 +0100 Subject: [PATCH 042/209] Beta upgrade for feature date VolumeSubpathEnvExpansion --- api/openapi-spec/swagger.json | 2 +- pkg/apis/core/types.go | 2 +- pkg/features/kube_features.go | 4 ++-- staging/src/k8s.io/api/core/v1/generated.proto | 2 +- staging/src/k8s.io/api/core/v1/types.go | 2 +- .../api/core/v1/types_swagger_doc_generated.go | 2 +- test/e2e/common/expansion.go | 12 ++++++------ 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index ec0639d0f8c..51c67195955 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -11122,7 +11122,7 @@ "type": "string" }, "subPathExpr": { - "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.", + "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", "type": "string" } }, diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 157d38ced5f..98a2d4ce9cc 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -1683,7 +1683,7 @@ type VolumeMount struct { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional SubPathExpr string } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 61f2bb93745..ca56aba9996 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -322,7 +322,7 @@ const ( PodReadinessGates utilfeature.Feature = "PodReadinessGates" // owner: @kevtaylor - // alpha: v1.11 + // beta: v1.15 // // Allow subpath environment variable substitution // Only applicable if the VolumeSubpath feature is also enabled @@ -495,7 +495,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, PodReadinessGates: {Default: true, PreRelease: utilfeature.GA, LockToDefault: true}, // remove in 1.16 - VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpathEnvExpansion: {Default: true, PreRelease: utilfeature.Beta}, KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.GA, LockToDefault: true}, // remove in 1.16 ResourceQuotaScopeSelectors: {Default: true, PreRelease: utilfeature.Beta}, CSIBlockVolume: {Default: true, PreRelease: utilfeature.Beta}, diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index b9d569f5f93..11b8ad25822 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -4644,7 +4644,7 @@ message VolumeMount { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional optional string subPathExpr = 6; } diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 3af134400cb..8b24abf87bd 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -1775,7 +1775,7 @@ type VolumeMount struct { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 2c5b04f29e3..e63a9cc18ac 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -2273,7 +2273,7 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", } func (VolumeMount) SwaggerDoc() map[string]string { diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go index f2994b91196..b7a03dc41aa 100644 --- a/test/e2e/common/expansion.go +++ b/test/e2e/common/expansion.go @@ -157,7 +157,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Description: Make sure a container's subpath can be set using an expansion of environment variables. */ - It("should allow substituting values in a volume subpath [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion]", func() { + It("should allow substituting values in a volume subpath [sig-storage][NodeFeature:VolumeSubpathEnvExpansion]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -217,7 +217,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Description: Make sure a container's subpath can not be set using an expansion of environment variables when backticks are supplied. */ - It("should fail substituting values in a volume subpath with backticks [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + It("should fail substituting values in a volume subpath with backticks [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) pod := &v1.Pod{ @@ -266,7 +266,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Description: Make sure a container's subpath can not be set using an expansion of environment variables when absolute path is supplied. */ - It("should fail substituting values in a volume subpath with absolute path [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + It("should fail substituting values in a volume subpath with absolute path [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) pod := &v1.Pod{ @@ -314,7 +314,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Testname: var-expansion-subpath-ready-from-failed-state Description: Verify that a failing subpath expansion can be modified during the lifecycle of a container. */ - It("should verify that a failing subpath expansion can be modified during the lifecycle of a container [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + It("should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) containerName := "dapi-container" @@ -405,7 +405,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { 3. successful expansion of the subpathexpr isn't required for volume cleanup */ - It("should succeed in writing subpaths in container [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + It("should succeed in writing subpaths in container [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) containerName := "dapi-container" @@ -514,7 +514,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { */ - It("should not change the subpath mount on a container restart if the environment variable changes [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + It("should not change the subpath mount on a container restart if the environment variable changes [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() { suffix := string(uuid.NewUUID()) podName := fmt.Sprintf("var-expansion-%s", suffix) From 08e320fa4ef96880f0c8ac7ee69f118306c95702 Mon Sep 17 00:00:00 2001 From: JieJhih Jhang Date: Tue, 9 Apr 2019 15:30:11 +0800 Subject: [PATCH 043/209] support ipv6 in bind address use split host port func instead trim specific character add unit test for metrics and healthz bind address recover import package refactor set default kube proxy configuration fix ipv4 condition fix set default port condition rewrite call function occasion to reduce error set ipv6 default value move get GetBindAddressHostPort to util use one func to handle deprecated series update bazel define address type return earlier in the error case refactor set default kube proxy configuration logic recover import package preserve some of the original comments add get default address func add append port if needed unit test rewrite unit test for deprecated flags remove unused codes --- cmd/kube-proxy/app/BUILD | 1 + cmd/kube-proxy/app/server.go | 44 ++++--------- cmd/kube-proxy/app/server_test.go | 76 ++++++++++++++++++++++ pkg/proxy/apis/config/v1alpha1/BUILD | 1 + pkg/proxy/apis/config/v1alpha1/defaults.go | 31 +++++++-- pkg/proxy/util/utils.go | 21 ++++++ pkg/proxy/util/utils_test.go | 47 +++++++++++++ 7 files changed, 181 insertions(+), 40 deletions(-) diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 38459a7bcec..667faf2f26b 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -31,6 +31,7 @@ go_library( "//pkg/proxy/iptables:go_default_library", "//pkg/proxy/ipvs:go_default_library", "//pkg/proxy/userspace:go_default_library", + "//pkg/proxy/util:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/filesystem:go_default_library", "//pkg/util/flag:go_default_library", diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index c91f93244fc..efa85e4116c 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -61,6 +61,7 @@ import ( "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/proxy/ipvs" "k8s.io/kubernetes/pkg/proxy/userspace" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" "k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/pkg/util/filesystem" utilflag "k8s.io/kubernetes/pkg/util/flag" @@ -212,8 +213,8 @@ func NewOptions() *Options { func (o *Options) Complete() error { if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 { klog.Warning("WARNING: all flags other than --config, --write-config-to, and --cleanup are deprecated. Please begin using a config file ASAP.") - o.applyDeprecatedHealthzPortToConfig() - o.applyDeprecatedMetricsPortToConfig() + o.config.HealthzBindAddress = addressFromDeprecatedFlags(o.config.HealthzBindAddress, o.healthzPort) + o.config.MetricsBindAddress = addressFromDeprecatedFlags(o.config.MetricsBindAddress, o.metricsPort) } // Load the config file here in Complete, so that Validate validates the fully-resolved config. @@ -357,38 +358,15 @@ func (o *Options) writeConfigFile() error { return nil } -// applyDeprecatedHealthzPortToConfig sets o.config.HealthzBindAddress from -// flags passed on the command line based on the following rules: -// -// 1. If --healthz-port is 0, disable the healthz server. -// 2. Otherwise, use the value of --healthz-port for the port portion of -// o.config.HealthzBindAddress -func (o *Options) applyDeprecatedHealthzPortToConfig() { - if o.healthzPort == 0 { - o.config.HealthzBindAddress = "" - return +// addressFromDeprecatedFlags returns server address from flags +// passed on the command line based on the following rules: +// 1. If port is 0, disable the server (e.g. set address to empty). +// 2. Otherwise, set the port portion of the config accordingly. +func addressFromDeprecatedFlags(addr string, port int32) string { + if port == 0 { + return "" } - - index := strings.Index(o.config.HealthzBindAddress, ":") - if index != -1 { - o.config.HealthzBindAddress = o.config.HealthzBindAddress[0:index] - } - - o.config.HealthzBindAddress = fmt.Sprintf("%s:%d", o.config.HealthzBindAddress, o.healthzPort) -} - -func (o *Options) applyDeprecatedMetricsPortToConfig() { - if o.metricsPort == 0 { - o.config.MetricsBindAddress = "" - return - } - - index := strings.Index(o.config.MetricsBindAddress, ":") - if index != -1 { - o.config.MetricsBindAddress = o.config.MetricsBindAddress[0:index] - } - - o.config.MetricsBindAddress = fmt.Sprintf("%s:%d", o.config.MetricsBindAddress, o.metricsPort) + return proxyutil.AppendPortIfNeeded(addr, port) } // loadConfigFromFile loads the contents of file and decodes it as a diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 345e783cf20..dabafa56df3 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -549,3 +549,79 @@ func (s *fakeProxyServerError) Run() error { return fmt.Errorf("mocking error from ProxyServer.Run()") } } + +func TestAddressFromDeprecatedFlags(t *testing.T) { + testCases := []struct { + name string + healthzPort int32 + healthzBindAddress string + metricsPort int32 + metricsBindAddress string + expHealthz string + expMetrics string + }{ + { + name: "IPv4 bind address", + healthzBindAddress: "1.2.3.4", + healthzPort: 12345, + metricsBindAddress: "2.3.4.5", + metricsPort: 23456, + expHealthz: "1.2.3.4:12345", + expMetrics: "2.3.4.5:23456", + }, + { + name: "IPv4 bind address has port", + healthzBindAddress: "1.2.3.4:12345", + healthzPort: 23456, + metricsBindAddress: "2.3.4.5:12345", + metricsPort: 23456, + expHealthz: "1.2.3.4:12345", + expMetrics: "2.3.4.5:12345", + }, + { + name: "IPv6 bind address", + healthzBindAddress: "fd00:1::5", + healthzPort: 12345, + metricsBindAddress: "fd00:1::6", + metricsPort: 23456, + expHealthz: "[fd00:1::5]:12345", + expMetrics: "[fd00:1::6]:23456", + }, + { + name: "IPv6 bind address has port", + healthzBindAddress: "[fd00:1::5]:12345", + healthzPort: 56789, + metricsBindAddress: "[fd00:1::6]:56789", + metricsPort: 12345, + expHealthz: "[fd00:1::5]:12345", + expMetrics: "[fd00:1::6]:56789", + }, + { + name: "Invalid IPv6 Config", + healthzBindAddress: "[fd00:1::5]", + healthzPort: 12345, + metricsBindAddress: "[fd00:1::6]", + metricsPort: 56789, + expHealthz: "[fd00:1::5]", + expMetrics: "[fd00:1::6]", + }, + } + + for i := range testCases { + gotHealthz := addressFromDeprecatedFlags(testCases[i].healthzBindAddress, testCases[i].healthzPort) + gotMetrics := addressFromDeprecatedFlags(testCases[i].metricsBindAddress, testCases[i].metricsPort) + + errFn := func(name, except, got string) { + t.Errorf("case %s: expected %v, got %v", name, except, got) + } + + if gotHealthz != testCases[i].expHealthz { + errFn(testCases[i].name, testCases[i].expHealthz, gotHealthz) + } + + if gotMetrics != testCases[i].expMetrics { + errFn(testCases[i].name, testCases[i].expMetrics, gotMetrics) + } + + } +} diff --git a/pkg/proxy/apis/config/v1alpha1/BUILD b/pkg/proxy/apis/config/v1alpha1/BUILD index aed068d8458..d1f27332a41 100644 --- a/pkg/proxy/apis/config/v1alpha1/BUILD +++ b/pkg/proxy/apis/config/v1alpha1/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/proxy/apis/config:go_default_library", + "//pkg/proxy/util:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/proxy/apis/config/v1alpha1/defaults.go b/pkg/proxy/apis/config/v1alpha1/defaults.go index b6417744a23..5c7489d7e02 100644 --- a/pkg/proxy/apis/config/v1alpha1/defaults.go +++ b/pkg/proxy/apis/config/v1alpha1/defaults.go @@ -18,14 +18,16 @@ package v1alpha1 import ( "fmt" - "strings" + "net" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/master/ports" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" "k8s.io/utils/pointer" ) @@ -34,19 +36,24 @@ func addDefaultingFuncs(scheme *kruntime.Scheme) error { } func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyConfiguration) { + if len(obj.BindAddress) == 0 { obj.BindAddress = "0.0.0.0" } + + defaultHealthzAddress, defaultMetricsAddress := getDefaultAddresses(obj.BindAddress) + if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = fmt.Sprintf("0.0.0.0:%v", ports.ProxyHealthzPort) - } else if !strings.Contains(obj.HealthzBindAddress, ":") { - obj.HealthzBindAddress += fmt.Sprintf(":%v", ports.ProxyHealthzPort) + obj.HealthzBindAddress = fmt.Sprintf("%s:%v", defaultHealthzAddress, ports.ProxyHealthzPort) + } else { + obj.HealthzBindAddress = proxyutil.AppendPortIfNeeded(obj.HealthzBindAddress, ports.ProxyHealthzPort) } if obj.MetricsBindAddress == "" { - obj.MetricsBindAddress = fmt.Sprintf("127.0.0.1:%v", ports.ProxyStatusPort) - } else if !strings.Contains(obj.MetricsBindAddress, ":") { - obj.MetricsBindAddress += fmt.Sprintf(":%v", ports.ProxyStatusPort) + obj.MetricsBindAddress = fmt.Sprintf("%s:%v", defaultMetricsAddress, ports.ProxyStatusPort) + } else { + obj.MetricsBindAddress = proxyutil.AppendPortIfNeeded(obj.MetricsBindAddress, ports.ProxyStatusPort) } + if obj.OOMScoreAdj == nil { temp := int32(qos.KubeProxyOOMScoreAdj) obj.OOMScoreAdj = &temp @@ -121,3 +128,13 @@ func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyCo obj.FeatureGates = make(map[string]bool) } } + +// getDefaultAddresses returns default address of healthz and metrics server +// based on the given bind address. IPv6 addresses are enclosed in square +// brackets for appending port. +func getDefaultAddresses(bindAddress string) (defaultHealthzAddress, defaultMetricsAddress string) { + if net.ParseIP(bindAddress).To4() != nil { + return "0.0.0.0", "127.0.0.1" + } + return "[::]", "[::1]" +} diff --git a/pkg/proxy/util/utils.go b/pkg/proxy/util/utils.go index c513d26a5a9..822da8534d3 100644 --- a/pkg/proxy/util/utils.go +++ b/pkg/proxy/util/utils.go @@ -214,3 +214,24 @@ func filterWithCondition(strs []string, expectedCondition bool, conditionFunc fu } return corrects, incorrects } + +// AppendPortIfNeeded appends the given port to IP address unless it is already in +// "ipv4:port" or "[ipv6]:port" format. +func AppendPortIfNeeded(addr string, port int32) string { + // Return if address is already in "ipv4:port" or "[ipv6]:port" format. + if _, _, err := net.SplitHostPort(addr); err == nil { + return addr + } + + // Simply return for invalid case. This should be caught by validation instead. + ip := net.ParseIP(addr) + if ip == nil { + return addr + } + + // Append port to address. + if ip.To4() != nil { + return fmt.Sprintf("%s:%d", addr, port) + } + return fmt.Sprintf("[%s]:%d", addr, port) +} diff --git a/pkg/proxy/util/utils_test.go b/pkg/proxy/util/utils_test.go index 891a3520f1c..9abf40fed4e 100644 --- a/pkg/proxy/util/utils_test.go +++ b/pkg/proxy/util/utils_test.go @@ -396,3 +396,50 @@ func TestGetNodeAddressses(t *testing.T) { } } } + +func TestAppendPortIfNeeded(t *testing.T) { + testCases := []struct { + name string + addr string + port int32 + expect string + }{ + { + name: "IPv4 all-zeros bind address has port", + addr: "0.0.0.0:12345", + port: 23456, + expect: "0.0.0.0:12345", + }, + { + name: "non-zeros IPv4 config", + addr: "9.8.7.6", + port: 12345, + expect: "9.8.7.6:12345", + }, + { + name: "IPv6 \"[::]\" bind address has port", + addr: "[::]:12345", + port: 23456, + expect: "[::]:12345", + }, + { + name: "IPv6 config", + addr: "fd00:1::5", + port: 23456, + expect: "[fd00:1::5]:23456", + }, + { + name: "Invalid IPv6 Config", + addr: "[fd00:1::5]", + port: 12345, + expect: "[fd00:1::5]", + }, + } + + for i := range testCases { + got := AppendPortIfNeeded(testCases[i].addr, testCases[i].port) + if testCases[i].expect != got { + t.Errorf("case %s: expected %v, got %v", testCases[i].name, testCases[i].expect, got) + } + } +} From 9d0bc4b8dfb824968254ff2a0e36d675769c18d2 Mon Sep 17 00:00:00 2001 From: Akihito INOH Date: Tue, 2 Apr 2019 11:00:23 +0900 Subject: [PATCH 044/209] Fix golint failures of e2e/framework/util.go - part1 This is a part of a series for fixing golint failures for util.go. - fixes `should not use dot imports` about `ginkgo` and `gomega` - fixes golint failures from top of file to line 1394 at original util.go This fixes golint failures of the following file: - test/e2e/framework/util.go This changes following files because of change function name in above file. - test/e2e/e2e.go - test/e2e/network/network_tiers.go - test/e2e/network/service.go --- test/e2e/e2e.go | 2 +- test/e2e/framework/util.go | 176 ++++++++++++++++++------------ test/e2e/network/network_tiers.go | 2 +- test/e2e/network/service.go | 2 +- 4 files changed, 110 insertions(+), 72 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 343c099a71b..27e3000501d 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -237,7 +237,7 @@ func RunE2ETests(t *testing.T) { r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)))) } } - klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode) + klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index f48bf9e1ffd..1a7f6fc90b0 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -44,8 +44,8 @@ import ( "golang.org/x/net/websocket" "k8s.io/klog" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" gomegatypes "github.com/onsi/gomega/types" apps "k8s.io/api/apps/v1" @@ -99,23 +99,25 @@ import ( ) const ( - // How long to wait for the pod to be listable + // PodListTimeout is how long to wait for the pod to be listable. PodListTimeout = time.Minute - // Initial pod start can be delayed O(minutes) by slow docker pulls + // PodStartTimeout is how long to wait for the pod to be started. + // Initial pod start can be delayed O(minutes) by slow docker pulls. // TODO: Make this 30 seconds once #4566 is resolved. PodStartTimeout = 5 * time.Minute - // Same as `PodStartTimeout` to wait for the pod to be started, but shorter. - // Use it case by case when we are sure pod start will not be delayed + // PodStartShortTimeout is same as `PodStartTimeout` to wait for the pod to be started, but shorter. + // Use it case by case when we are sure pod start will not be delayed. // minutes by slow docker pulls or something else. PodStartShortTimeout = 2 * time.Minute - // How long to wait for a pod to be deleted + // PodDeleteTimeout is how long to wait for a pod to be deleted. PodDeleteTimeout = 5 * time.Minute // PodEventTimeout is how much we wait for a pod event to occur. PodEventTimeout = 2 * time.Minute + // NamespaceCleanupTimeout is how long to wait for the namespace to be deleted. // If there are any orphaned namespaces to clean up, this test is running // on a long lived cluster. A long wait here is preferably to spurious test // failures caused by leaked resources from a previous test run. @@ -124,73 +126,76 @@ const ( // Some pods can take much longer to get ready due to volume attach/detach latency. slowPodStartTimeout = 15 * time.Minute - // How long to wait for a service endpoint to be resolvable. + // ServiceStartTimeout is how long to wait for a service endpoint to be resolvable. ServiceStartTimeout = 3 * time.Minute - // How often to Poll pods, nodes and claims. + // Poll is how often to Poll pods, nodes and claims. Poll = 2 * time.Second PollShortTimeout = 1 * time.Minute PollLongTimeout = 5 * time.Minute + // ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned. // service accounts are provisioned after namespace creation // a service account is required to support pod creation in a namespace as part of admission control ServiceAccountProvisionTimeout = 2 * time.Minute - // How long to try single API calls (like 'get' or 'list'). Used to prevent + // SingleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent // transient failures from failing tests. // TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed. SingleCallTimeout = 5 * time.Minute - // How long nodes have to be "ready" when a test begins. They should already + // NodeReadyInitialTimeout is how long nodes have to be "ready" when a test begins. They should already // be "ready" before the test starts, so this is small. NodeReadyInitialTimeout = 20 * time.Second - // How long pods have to be "ready" when a test begins. + // PodReadyBeforeTimeout is how long pods have to be "ready" when a test begins. PodReadyBeforeTimeout = 5 * time.Minute // How long pods have to become scheduled onto nodes podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second) - podRespondingTimeout = 15 * time.Minute + podRespondingTimeout = 15 * time.Minute + // ServiceRespondingTimeout is how long to wait for a service to be responding. ServiceRespondingTimeout = 2 * time.Minute - EndpointRegisterTimeout = time.Minute + // EndpointRegisterTimeout is how long to wait for an endpoint to be registered. + EndpointRegisterTimeout = time.Minute - // How long claims have to become dynamically provisioned + // ClaimProvisionTimeout is how long claims have to become dynamically provisioned. ClaimProvisionTimeout = 5 * time.Minute - // Same as `ClaimProvisionTimeout` to wait for claim to be dynamically provisioned, but shorter. + // ClaimProvisionShortTimeout is same as `ClaimProvisionTimeout` to wait for claim to be dynamically provisioned, but shorter. // Use it case by case when we are sure this timeout is enough. ClaimProvisionShortTimeout = 1 * time.Minute - // How long claims have to become bound + // ClaimBindingTimeout is how long claims have to become bound. ClaimBindingTimeout = 3 * time.Minute - // How long claims have to become deleted + // ClaimDeletingTimeout is How long claims have to become deleted. ClaimDeletingTimeout = 3 * time.Minute - // How long PVs have to beome reclaimed + // PVReclaimingTimeout is how long PVs have to beome reclaimed. PVReclaimingTimeout = 3 * time.Minute - // How long PVs have to become bound + // PVBindingTimeout is how long PVs have to become bound. PVBindingTimeout = 3 * time.Minute - // How long PVs have to become deleted + // PVDeletingTimeout is how long PVs have to become deleted. PVDeletingTimeout = 3 * time.Minute - // How long a node is allowed to become "Ready" after it is recreated before + // RecreateNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is recreated before // the test is considered failed. RecreateNodeReadyAgainTimeout = 10 * time.Minute - // How long a node is allowed to become "Ready" after it is restarted before + // RestartNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is restarted before // the test is considered failed. RestartNodeReadyAgainTimeout = 5 * time.Minute - // How long a pod is allowed to become "running" and "ready" after a node + // RestartPodReadyAgainTimeout is how long a pod is allowed to become "running" and "ready" after a node // restart before test is considered failed. RestartPodReadyAgainTimeout = 5 * time.Minute - // How long for snapshot to create snapshotContent + // SnapshotCreateTimeout is how long for snapshot to create snapshotContent. SnapshotCreateTimeout = 5 * time.Minute // Number of objects that gc can delete in a second. @@ -208,6 +213,7 @@ const ( ) var ( + // BusyBoxImage is the image URI of BusyBox. BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) // For parsing Kubectl version for version-skewed testing. @@ -220,23 +226,27 @@ var ( regexp.MustCompile(".*node-problem-detector.*"), } - // Serve hostname image name + // ServeHostnameImage is a serve hostname image name. ServeHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname) ) +// GetServicesProxyRequest returns a request for a service proxy. func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { return request.Resource("services").SubResource("proxy"), nil } -// unique identifier of the e2e run -var RunId = uuid.NewUUID() +// RunID is a unique identifier of the e2e run. +// Beware that this ID is not the same for all tests in the e2e run, because each Ginkgo node creates it separately. +var RunID = uuid.NewUUID() +// CreateTestingNSFn is a func that is responsible for creating namespace used for executing e2e tests. type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) +// GetMasterHost returns a hostname of a master. func GetMasterHost() string { - masterUrl, err := url.Parse(TestContext.Host) + masterURL, err := url.Parse(TestContext.Host) ExpectNoError(err) - return masterUrl.Hostname() + return masterURL.Hostname() } func nowStamp() string { @@ -244,13 +254,15 @@ func nowStamp() string { } func log(level string, format string, args ...interface{}) { - fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) + fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) } +// Logf logs the info. func Logf(format string, args ...interface{}) { log("INFO", format, args...) } +// Failf logs the fail info. func Failf(format string, args ...interface{}) { FailfWithOffset(1, format, args...) } @@ -269,52 +281,61 @@ func skipInternalf(caller int, format string, args ...interface{}) { ginkgowrapper.Skip(msg, caller+1) } +// Skipf skips with information about why the test is being skipped. func Skipf(format string, args ...interface{}) { skipInternalf(1, format, args...) } +// SkipUnlessNodeCountIsAtLeast skips if the number of nodes is less than the minNodeCount. func SkipUnlessNodeCountIsAtLeast(minNodeCount int) { if TestContext.CloudConfig.NumNodes < minNodeCount { skipInternalf(1, "Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes) } } +// SkipUnlessNodeCountIsAtMost skips if the number of nodes is greater than the maxNodeCount. func SkipUnlessNodeCountIsAtMost(maxNodeCount int) { if TestContext.CloudConfig.NumNodes > maxNodeCount { skipInternalf(1, "Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes) } } +// SkipUnlessAtLeast skips if the value is less than the minValue. func SkipUnlessAtLeast(value int, minValue int, message string) { if value < minValue { skipInternalf(1, message) } } +// SkipIfProviderIs skips if the provider is included in the unsupportedProviders. func SkipIfProviderIs(unsupportedProviders ...string) { if ProviderIs(unsupportedProviders...) { skipInternalf(1, "Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider) } } +// SkipUnlessLocalEphemeralStorageEnabled skips if the LocalStorageCapacityIsolation is not enabled. func SkipUnlessLocalEphemeralStorageEnabled() { if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { skipInternalf(1, "Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation) } } +// SkipUnlessSSHKeyPresent skips if no SSH key is found. func SkipUnlessSSHKeyPresent() { if _, err := GetSigner(TestContext.Provider); err != nil { skipInternalf(1, "No SSH Key for provider %s: '%v'", TestContext.Provider, err) } } +// SkipUnlessProviderIs skips if the provider is not included in the supportedProviders. func SkipUnlessProviderIs(supportedProviders ...string) { if !ProviderIs(supportedProviders...) { skipInternalf(1, "Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider) } } +// SkipUnlessMultizone skips if the cluster does not have multizone. func SkipUnlessMultizone(c clientset.Interface) { zones, err := GetClusterZones(c) if err != nil { @@ -325,6 +346,7 @@ func SkipUnlessMultizone(c clientset.Interface) { } } +// SkipIfMultizone skips if the cluster has multizone. func SkipIfMultizone(c clientset.Interface) { zones, err := GetClusterZones(c) if err != nil { @@ -335,30 +357,35 @@ func SkipIfMultizone(c clientset.Interface) { } } +// SkipUnlessPrometheusMonitoringIsEnabled skips if the prometheus monitoring is not enabled. func SkipUnlessPrometheusMonitoringIsEnabled(supportedMonitoring ...string) { if !TestContext.EnablePrometheusMonitoring { skipInternalf(1, "Skipped because prometheus monitoring is not enabled") } } +// SkipUnlessMasterOSDistroIs skips if the master OS distro is not included in the supportedMasterOsDistros. func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) { if !MasterOSDistroIs(supportedMasterOsDistros...) { skipInternalf(1, "Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro) } } +// SkipUnlessNodeOSDistroIs skips if the node OS distro is not included in the supportedNodeOsDistros. func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) { if !NodeOSDistroIs(supportedNodeOsDistros...) { skipInternalf(1, "Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro) } } +// SkipUnlessTaintBasedEvictionsEnabled skips if the TaintBasedEvictions is not enabled. func SkipUnlessTaintBasedEvictionsEnabled() { if !utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions) { skipInternalf(1, "Only supported when %v feature is enabled", features.TaintBasedEvictions) } } +// SkipIfContainerRuntimeIs skips if the container runtime is included in the runtimes. func SkipIfContainerRuntimeIs(runtimes ...string) { for _, runtime := range runtimes { if runtime == TestContext.ContainerRuntime { @@ -367,6 +394,7 @@ func SkipIfContainerRuntimeIs(runtimes ...string) { } } +// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes. func RunIfContainerRuntimeIs(runtimes ...string) { for _, runtime := range runtimes { if runtime == TestContext.ContainerRuntime { @@ -376,6 +404,7 @@ func RunIfContainerRuntimeIs(runtimes ...string) { skipInternalf(1, "Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes) } +// RunIfSystemSpecNameIs runs if the system spec name is included in the names. func RunIfSystemSpecNameIs(names ...string) { for _, name := range names { if name == TestContext.SystemSpecName { @@ -385,6 +414,7 @@ func RunIfSystemSpecNameIs(names ...string) { skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names) } +// ProviderIs returns true if the provider is included is the providers. Otherwise false. func ProviderIs(providers ...string) bool { for _, provider := range providers { if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) { @@ -394,6 +424,7 @@ func ProviderIs(providers ...string) bool { return false } +// MasterOSDistroIs returns true if the master OS distro is included in the supportedMasterOsDistros. Otherwise false. func MasterOSDistroIs(supportedMasterOsDistros ...string) bool { for _, distro := range supportedMasterOsDistros { if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) { @@ -403,6 +434,7 @@ func MasterOSDistroIs(supportedMasterOsDistros ...string) bool { return false } +// NodeOSDistroIs returns true if the node OS distro is included in the supportedNodeOsDistros. Otherwise false. func NodeOSDistroIs(supportedNodeOsDistros ...string) bool { for _, distro := range supportedNodeOsDistros { if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) { @@ -412,6 +444,7 @@ func NodeOSDistroIs(supportedNodeOsDistros ...string) bool { return false } +// ProxyMode returns a proxyMode of a kube-proxy. func ProxyMode(f *Framework) (string, error) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -441,6 +474,7 @@ func ProxyMode(f *Framework) (string, error) { return stdout, nil } +// SkipUnlessServerVersionGTE skips if the server version is less than v. func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) { gte, err := ServerVersionGTE(v, c) if err != nil { @@ -451,6 +485,7 @@ func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersio } } +// SkipIfMissingResource skips if the gvr resource is missing. func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) { resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) _, err := resourceClient.List(metav1.ListOptions{}) @@ -689,6 +724,7 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string } } +// LogFailedContainers runs `kubectl logs` on a failed containers. func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { @@ -707,7 +743,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri // Filter is by simple strings.Contains; first skip filter, then delete filter. // Returns the list of deleted namespaces or an error. func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { - By("Deleting namespaces") + ginkgo.By("Deleting namespaces") nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) ExpectNoError(err, "Failed to get namespace list") var deleted []string @@ -737,8 +773,8 @@ OUTER: deleted = append(deleted, item.Name) go func(nsName string) { defer wg.Done() - defer GinkgoRecover() - Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(Succeed()) + defer ginkgo.GinkgoRecover() + gomega.Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(gomega.Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } @@ -746,8 +782,9 @@ OUTER: return deleted, nil } +// WaitForNamespacesDeleted waits for the namespaces to be deleted. func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error { - By("Waiting for namespaces to vanish") + ginkgo.By("Waiting for namespaces to vanish") nsMap := map[string]bool{} for _, ns := range namespaces { nsMap[ns] = true @@ -779,6 +816,7 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN return err } +// WaitForPodCondition waits a pods to be matched to the given condition. func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { @@ -850,9 +888,8 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In if pv.Status.Phase == phase { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start)) return nil - } else { - Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase) } + Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase) } } return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout) @@ -870,9 +907,8 @@ func WaitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Int if sts.Status.ReadyReplicas == *sts.Spec.Replicas { Logf("All %d replicas of StatefulSet %s are ready. (%v)", sts.Status.ReadyReplicas, statefulSetName, time.Since(start)) return nil - } else { - Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas) } + Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas) } } return fmt.Errorf("StatefulSet %s still has unready pods within %v", statefulSetName, timeout) @@ -890,9 +926,8 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, if apierrs.IsNotFound(err) { Logf("PersistentVolume %s was removed", pvName) return nil - } else { - Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) } + Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) } } return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout) @@ -903,11 +938,11 @@ func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c cl return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, Poll, timeout, true) } -// WaitForPersistentVolumeClaimPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims +// WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims // to be in a specific phase or until timeout occurs, whichever comes first. func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, Poll, timeout time.Duration, matchAny bool) error { if len(pvcNames) == 0 { - return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0.") + return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0") } Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { @@ -961,7 +996,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s if labels == nil { labels = map[string]string{} } - labels["e2e-run"] = string(RunId) + labels["e2e-run"] = string(RunID) // We don't use ObjectMeta.GenerateName feature, as in case of API call // failure we don't know whether the namespace was created and what is its @@ -1251,6 +1286,7 @@ func hasRemainingContent(c clientset.Interface, dynamicClient dynamic.Interface, return contentRemaining, nil } +// ContainerInitInvariant checks for an init containers are initialized and invariant on both older and newer. func ContainerInitInvariant(older, newer runtime.Object) error { oldPod := older.(*v1.Pod) newPod := newer.(*v1.Pod) @@ -1333,8 +1369,10 @@ func initContainersInvariants(pod *v1.Pod) error { return nil } +// InvariantFunc is a func that checks for invariant. type InvariantFunc func(older, newer runtime.Object) error +// CheckInvariants checks for invariant of the each events. func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { errs := sets.NewString() for i := range events { @@ -1528,7 +1566,7 @@ func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, } switch pod.Status.Phase { case v1.PodSucceeded: - By("Saw pod success") + ginkgo.By("Saw pod success") return true, nil case v1.PodFailed: return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status) @@ -1836,7 +1874,7 @@ func KubectlVersion() (*utilversion.Version, error) { } func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { - By("trying to dial each unique pod") + ginkgo.By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } @@ -1877,7 +1915,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, func podsRunning(c clientset.Interface, pods *v1.PodList) []error { // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. - By("ensuring each pod is running") + ginkgo.By("ensuring each pod is running") e := []error{} error_chan := make(chan error) @@ -1924,7 +1962,7 @@ func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName } func ServiceResponding(c clientset.Interface, ns, name string) error { - By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) + ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) @@ -2022,7 +2060,7 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { if err != nil { Logf("Unexpected error occurred: %v", err) } - ExpectWithOffset(1+offset, err).NotTo(HaveOccurred(), explain...) + gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) } func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { @@ -2034,12 +2072,12 @@ func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf } Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err) } - ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) + gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...) } // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. func Cleanup(filePath, ns string, selectors ...string) { - By("using delete to clean up resources") + ginkgo.By("using delete to clean up resources") var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) @@ -2278,7 +2316,7 @@ func (f *Framework) testContainerOutputMatcher(scenarioName string, containerIndex int, expectedOutput []string, matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) { - By(fmt.Sprintf("Creating a pod to test %v", scenarioName)) + ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName)) if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) { Failf("Invalid container index: %d", containerIndex) } @@ -2300,7 +2338,7 @@ func (f *Framework) MatchContainerOutput( createdPod := podClient.Create(pod) defer func() { - By("delete the pod") + ginkgo.By("delete the pod") podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout) }() @@ -2354,11 +2392,11 @@ func (f *Framework) MatchContainerOutput( type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error) func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { - By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) + ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) events, err := eventsLister(metav1.ListOptions{}, namespace) ExpectNoError(err, "failed to list events in namespace %q", namespace) - By(fmt.Sprintf("Found %d events.", len(events.Items))) + ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items))) // Sort events by their first timestamp sortedEvents := events.Items if len(sortedEvents) > 1 { @@ -2690,10 +2728,10 @@ func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName str } func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { - By("verifying the node has the label " + labelKey + " " + labelValue) + ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue) node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) - Expect(node.Labels[labelKey]).To(Equal(labelValue)) + gomega.Expect(node.Labels[labelKey]).To(gomega.Equal(labelValue)) } func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) { @@ -2708,15 +2746,15 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Tai // RemoveLabelOffNode is for cleaning up labels temporarily added to node, // won't fail if target label doesn't exist or has been removed. func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) { - By("removing the label " + labelKey + " off the node " + nodeName) + ginkgo.By("removing the label " + labelKey + " off the node " + nodeName) ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey})) - By("verifying the node doesn't have the label " + labelKey) + ginkgo.By("verifying the node doesn't have the label " + labelKey) ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) } func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { - By("verifying the node doesn't have the taint " + taint.ToString()) + ginkgo.By("verifying the node doesn't have the taint " + taint.ToString()) nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) { @@ -2725,7 +2763,7 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai } func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) { - By("verifying the node has the taint " + taint.ToString()) + ginkgo.By("verifying the node has the taint " + taint.ToString()) if has, err := NodeHasTaint(c, nodeName, taint); !has { ExpectNoError(err) Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName) @@ -2814,7 +2852,7 @@ func ScaleResource( kind schema.GroupKind, gr schema.GroupResource, ) error { - By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) + ginkgo.By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) if err := testutils.ScaleResourceWithRetries(scalesGetter, ns, name, size, gr); err != nil { return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err) } @@ -3012,7 +3050,7 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) { // DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods. func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error { - By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns)) + ginkgo.By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns)) rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) if err != nil { @@ -3351,7 +3389,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw } func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) { - By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) + ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -3375,7 +3413,7 @@ func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]s } func DeletePodOrFail(c clientset.Interface, ns, name string) { - By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) + ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) err := c.CoreV1().Pods(ns).Delete(name, nil) ExpectNoError(err, "failed to delete pod %s in namespace %s", name, ns) } @@ -4496,13 +4534,13 @@ func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, if !masterNodes.Has(pod.Spec.NodeName) { if pod.Spec.NodeName != "" { _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - Expect(scheduledCondition != nil).To(Equal(true)) - Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue)) + gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) + gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue)) scheduledPods = append(scheduledPods, pod) } else { _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - Expect(scheduledCondition != nil).To(Equal(true)) - Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse)) + gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) + gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse)) if scheduledCondition.Reason == "Unschedulable" { notScheduledPods = append(notScheduledPods, pod) diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index 459be2b2cf4..5d6853119b9 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -102,7 +102,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { // Test 3: create a standard-tierd LB with a user-requested IP. By("reserving a static IP for the load balancer") - requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId) + requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred()) requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 5284923c6b7..742765cb236 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -588,7 +588,7 @@ var _ = SIGDescribe("Services", func() { staticIPName := "" if framework.ProviderIs("gce", "gke") { By("creating a static load balancer IP") - staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunId) + staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() Expect(err).NotTo(HaveOccurred(), "failed to get GCE cloud provider") From 85f21c16acf543bf554cc3c4e4284beb0a554897 Mon Sep 17 00:00:00 2001 From: Akihito INOH Date: Thu, 11 Apr 2019 06:14:47 +0900 Subject: [PATCH 045/209] Fix golint failures of e2e/framework/util.go - part2 This is a part of a series for fixing golint failures for util.go. - fixes golint failures from line 1395 to line 2353 at original util.go This fixes golint failures of the following file: - test/e2e/framework/util.go This changes following files because of change function name in above file. - test/e2e/apps/rc.go - test/e2e/apps/replica_set.go --- test/e2e/apps/rc.go | 2 +- test/e2e/apps/replica_set.go | 2 +- test/e2e/framework/util.go | 97 ++++++++++++++++++++++-------------- 3 files changed, 62 insertions(+), 39 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 75a54b8608a..e488bfc2890 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -164,7 +164,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.Poll(retryInterval, retryTimeout, framework.NewPodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index e9bec52a9d4..cf029be115f 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -166,7 +166,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.Poll(retryInterval, retryTimeout, framework.NewPodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index f48bf9e1ffd..d485326424a 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1354,7 +1354,7 @@ func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { return nil } -// Waits default amount of time (PodStartTimeout) for the specified pod to become running. +// WaitForPodRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error { if pod.Status.Phase == v1.PodRunning { @@ -1363,19 +1363,20 @@ func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error { return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout) } -// Waits default amount of time (PodStartTimeout) for the specified pod to become running. +// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout) } -// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. +// waitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error { return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout) } +// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running. func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace)) } @@ -1396,7 +1397,7 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition } } -// WaitTimeoutForPodEvent waits for an event to occur for a pod +// WaitTimeoutForPodEvent waits the given timeout duration for a pod event to occur. func WaitTimeoutForPodEvent(c clientset.Interface, podName, namespace, eventSelector, msg string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, eventOccurred(c, podName, namespace, eventSelector, msg)) } @@ -1417,12 +1418,13 @@ func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg } } -// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running. +// WaitForPodNoLongerRunningInNamespace waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error { return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout) } +// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop. func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace)) } @@ -1495,9 +1497,8 @@ func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam if pod.Status.Phase == v1.PodFailed { if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop return true, nil - } else { - return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason) } + return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason) } return false, nil }) @@ -1580,6 +1581,7 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D return err } +// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear. func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) @@ -1708,6 +1710,7 @@ func countEndpointsNum(e *v1.Endpoints) int { return num } +// WaitForEndpoint waits for the specified endpoint to be ready. func WaitForEndpoint(c clientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) @@ -1726,9 +1729,9 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error { return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name) } -// Context for checking pods responses by issuing GETs to them (via the API +// PodProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API // proxy) and verifying that they answer with their own pod name. -type podProxyResponseChecker struct { +type PodProxyResponseChecker struct { c clientset.Interface ns string label labels.Selector @@ -1737,13 +1740,14 @@ type podProxyResponseChecker struct { pods *v1.PodList } -func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker { - return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods} +// NewPodProxyResponseChecker returns a context for checking pods responses. +func NewPodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) PodProxyResponseChecker { + return PodProxyResponseChecker{c, ns, label, controllerName, respondName, pods} } // CheckAllResponses issues GETs to all pods in the context and verify they // reply with their own pod name. -func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { +func (r PodProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := metav1.ListOptions{LabelSelector: r.label.String()} currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) @@ -1835,17 +1839,20 @@ func KubectlVersion() (*utilversion.Version, error) { return utilversion.ParseSemantic(matches[1]) } +// PodsResponding waits for the pods to response. func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) + return wait.PollImmediate(Poll, podRespondingTimeout, NewPodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } +// PodsCreated returns a pod list matched by the given name. func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return PodsCreatedByLabel(c, ns, name, replicas, label) } +// PodsCreatedByLabel returns a created pod list matched by the given label. func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { @@ -1879,16 +1886,16 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error { // are running so non-running pods cause a timeout for this test. By("ensuring each pod is running") e := []error{} - error_chan := make(chan error) + errorChan := make(chan error) for _, pod := range pods.Items { go func(p v1.Pod) { - error_chan <- WaitForPodRunningInNamespace(c, &p) + errorChan <- WaitForPodRunningInNamespace(c, &p) }(pod) } for range pods.Items { - err := <-error_chan + err := <-errorChan if err != nil { e = append(e, err) } @@ -1897,10 +1904,12 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error { return e } +// VerifyPods checks if the specified pod is responding. func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { return podRunningMaybeResponding(c, ns, name, wantName, replicas, true) } +// VerifyPodsRunning checks if the specified pod is running. func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { return podRunningMaybeResponding(c, ns, name, wantName, replicas, false) } @@ -1923,6 +1932,7 @@ func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName return nil } +// ServiceResponding waits for the service to be responding. func ServiceResponding(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) @@ -1959,6 +1969,7 @@ func ServiceResponding(c clientset.Interface, ns, name string) error { }) } +// RestclientConfig returns a config holds the information needed to build connection to kubernetes clusters. func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) { Logf(">>> kubeConfig: %s", TestContext.KubeConfig) if TestContext.KubeConfig == "" { @@ -1975,8 +1986,10 @@ func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) { return c, nil } +// ClientConfigGetter is a func that returns getter to return a config. type ClientConfigGetter func() (*restclient.Config, error) +// LoadConfig returns a config for a rest client. func LoadConfig() (*restclient.Config, error) { if TestContext.NodeE2E { // This is a node e2e test, apply the node e2e configuration @@ -1986,14 +1999,14 @@ func LoadConfig() (*restclient.Config, error) { if err != nil { if TestContext.KubeConfig == "" { return restclient.InClusterConfig() - } else { - return nil, err } + return nil, err } return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig() } +// LoadClientset returns clientset for connecting to kubernetes clusters. func LoadClientset() (*clientset.Clientset, error) { config, err := LoadConfig() if err != nil { @@ -2002,7 +2015,7 @@ func LoadClientset() (*clientset.Clientset, error) { return clientset.NewForConfig(config) } -// randomSuffix provides a random string to append to pods,services,rcs. +// RandomSuffix provides a random string to append to pods,services,rcs. // TODO: Allow service names to have the same form as names // for pods and replication controllers so we don't // need to use such a function and can instead @@ -2012,6 +2025,7 @@ func RandomSuffix() string { return strconv.Itoa(r.Int() % 10000) } +// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. func ExpectNoError(err error, explain ...interface{}) { ExpectNoErrorWithOffset(1, err, explain...) } @@ -2025,6 +2039,7 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { ExpectWithOffset(1+offset, err).NotTo(HaveOccurred(), explain...) } +// ExpectNoErrorWithRetries checks if an error occurs with the given retry count. func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { var err error for i := 0; i < maxRetries; i++ { @@ -2037,7 +2052,7 @@ func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) } -// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. +// Cleanup stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. func Cleanup(filePath, ns string, selectors ...string) { By("using delete to clean up resources") var nsArg string @@ -2048,7 +2063,7 @@ func Cleanup(filePath, ns string, selectors ...string) { AssertCleanup(ns, selectors...) } -// Asserts that cleanup of a namespace wrt selectors occurred. +// AssertCleanup asserts that cleanup of a namespace wrt selectors occurred. func AssertCleanup(ns string, selectors ...string) { var nsArg string if ns != "" { @@ -2112,40 +2127,46 @@ func KubectlCmd(args ...string) *exec.Cmd { return cmd } -// kubectlBuilder is used to build, customize and execute a kubectl Command. +// KubectlBuilder is used to build, customize and execute a kubectl Command. // Add more functions to customize the builder as needed. -type kubectlBuilder struct { +type KubectlBuilder struct { cmd *exec.Cmd timeout <-chan time.Time } -func NewKubectlCommand(args ...string) *kubectlBuilder { - b := new(kubectlBuilder) +// NewKubectlCommand returns a KubectlBuilder for running kubectl. +func NewKubectlCommand(args ...string) *KubectlBuilder { + b := new(KubectlBuilder) b.cmd = KubectlCmd(args...) return b } -func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder { +// WithEnv sets the given environment and returns itself. +func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder { b.cmd.Env = env return b } -func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder { +// WithTimeout sets the given timeout and returns itself. +func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder { b.timeout = t return b } -func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder { +// WithStdinData sets the given data to stdin and returns itself. +func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder { b.cmd.Stdin = strings.NewReader(data) return &b } -func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder { +// WithStdinReader sets the given reader and returns itself. +func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder { b.cmd.Stdin = reader return &b } -func (b kubectlBuilder) ExecOrDie() string { +// ExecOrDie runs the kubectl executable or dies if error occurs. +func (b KubectlBuilder) ExecOrDie() string { str, err := b.Exec() // In case of i/o timeout error, try talking to the apiserver again after 2s before dying. // Note that we're still dying after retrying so that we can get visibility to triage it further. @@ -2174,14 +2195,15 @@ func isTimeout(err error) bool { return false } -func (b kubectlBuilder) Exec() (string, error) { +// Exec runs the kubectl executable. +func (b KubectlBuilder) Exec() (string, error) { var stdout, stderr bytes.Buffer cmd := b.cmd cmd.Stdout, cmd.Stderr = &stdout, &stderr Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately if err := cmd.Start(); err != nil { - return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err) + return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err) } errCh := make(chan error, 1) go func() { @@ -2190,19 +2212,19 @@ func (b kubectlBuilder) Exec() (string, error) { select { case err := <-errCh: if err != nil { - var rc int = 127 + var rc = 127 if ee, ok := err.(*exec.ExitError); ok { rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus()) Logf("rc: %d", rc) } return "", uexec.CodeExitError{ - Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err), + Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err), Code: rc, } } case <-b.timeout: b.cmd.Process.Kill() - return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr) + return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr) } Logf("stderr: %q", stderr.String()) Logf("stdout: %q", stdout.String()) @@ -2242,13 +2264,14 @@ func RunKubemciWithKubeconfig(args ...string) (string, error) { func RunKubemciCmd(args ...string) (string, error) { // kubemci is assumed to be in PATH. kubemci := "kubemci" - b := new(kubectlBuilder) + b := new(KubectlBuilder) args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID) b.cmd = exec.Command(kubemci, args...) return b.Exec() } +// StartCmdAndStreamOutput returns stdout and stderr after starting the given cmd. func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { stdout, err = cmd.StdoutPipe() if err != nil { @@ -2263,7 +2286,7 @@ func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err e return } -// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer. +// TryKill is rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer. func TryKill(cmd *exec.Cmd) { if err := cmd.Process.Kill(); err != nil { Logf("ERROR failed to kill command %v! The process may leak", cmd) From 03180ec2b2eafab0541458ae0d619c9b8bcac8c1 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Wed, 17 Apr 2019 19:04:51 +0800 Subject: [PATCH 046/209] fix golint failures of test/e2e/apps --- hack/.golint_failures | 1 - test/e2e/apps/cronjob.go | 197 ++++++++--------- test/e2e/apps/daemon_restart.go | 45 ++-- test/e2e/apps/daemon_set.go | 232 +++++++++---------- test/e2e/apps/deployment.go | 264 +++++++++++----------- test/e2e/apps/disruption.go | 29 ++- test/e2e/apps/framework.go | 1 + test/e2e/apps/job.go | 108 ++++----- test/e2e/apps/network_partition.go | 144 ++++++------ test/e2e/apps/rc.go | 72 +++--- test/e2e/apps/replica_set.go | 64 +++--- test/e2e/apps/statefulset.go | 344 ++++++++++++++--------------- test/e2e/apps/types.go | 25 ++- 13 files changed, 770 insertions(+), 756 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 327a4ce2714..8ab29fc3b7a 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -631,7 +631,6 @@ staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1 test/e2e -test/e2e/apps test/e2e/auth test/e2e/autoscaling test/e2e/chaosmonkey diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 12af593d611..9a7a97022d7 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" @@ -51,221 +51,221 @@ var _ = SIGDescribe("CronJob", func() { // Pod will complete instantly successCommand := []string{"/bin/true"} - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name) }) // multiple jobs running at once - It("should schedule multiple jobs concurrently", func() { - By("Creating a cronjob") + ginkgo.It("should schedule multiple jobs concurrently", func() { + ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) - By("Ensuring more than one job is running at a time") + ginkgo.By("Ensuring more than one job is running at a time") err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) - Expect(err).NotTo(HaveOccurred(), "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) - By("Ensuring at least two running jobs exists by listing jobs explicitly") + ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly") jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) - Expect(len(activeJobs) >= 2).To(BeTrue()) + gomega.Expect(len(activeJobs) >= 2).To(gomega.BeTrue()) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) // suspended should not schedule jobs - It("should not schedule jobs when suspended [Slow]", func() { - By("Creating a suspended cronjob") + ginkgo.It("should not schedule jobs when suspended [Slow]", func() { + ginkgo.By("Creating a suspended cronjob") cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1beta1.AllowConcurrent, sleepCommand, nil) t := true cronJob.Spec.Suspend = &t cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) - By("Ensuring no jobs are scheduled") + ginkgo.By("Ensuring no jobs are scheduled") err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) - By("Ensuring no job exists by listing jobs explicitly") + ginkgo.By("Ensuring no job exists by listing jobs explicitly") jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name) - Expect(jobs.Items).To(HaveLen(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name) + gomega.Expect(jobs.Items).To(gomega.HaveLen(0)) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) // only single active job is allowed for ForbidConcurrent - It("should not schedule new jobs when ForbidConcurrent [Slow]", func() { - By("Creating a ForbidConcurrent cronjob") + ginkgo.It("should not schedule new jobs when ForbidConcurrent [Slow]", func() { + ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) - By("Ensuring a job is scheduled") + ginkgo.By("Ensuring a job is scheduled") err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) - Expect(err).NotTo(HaveOccurred(), "Failed to schedule CronJob %s", cronJob.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s", cronJob.Name) - By("Ensuring exactly one is scheduled") + ginkgo.By("Ensuring exactly one is scheduled") cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to get CronJob %s", cronJob.Name) - Expect(cronJob.Status.Active).Should(HaveLen(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name) + gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) - By("Ensuring exactly one running job exists by listing jobs explicitly") + ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) - Expect(activeJobs).To(HaveLen(1)) + gomega.Expect(activeJobs).To(gomega.HaveLen(1)) - By("Ensuring no more jobs are scheduled") + ginkgo.By("Ensuring no more jobs are scheduled") err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) // only single active job is allowed for ReplaceConcurrent - It("should replace jobs when ReplaceConcurrent", func() { - By("Creating a ReplaceConcurrent cronjob") + ginkgo.It("should replace jobs when ReplaceConcurrent", func() { + ginkgo.By("Creating a ReplaceConcurrent cronjob") cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) - By("Ensuring a job is scheduled") + ginkgo.By("Ensuring a job is scheduled") err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) - Expect(err).NotTo(HaveOccurred(), "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) - By("Ensuring exactly one is scheduled") + ginkgo.By("Ensuring exactly one is scheduled") cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to get CronJob %s", cronJob.Name) - Expect(cronJob.Status.Active).Should(HaveLen(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name) + gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) - By("Ensuring exactly one running job exists by listing jobs explicitly") + ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to list the jobs in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the jobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) - Expect(activeJobs).To(HaveLen(1)) + gomega.Expect(activeJobs).To(gomega.HaveLen(1)) - By("Ensuring the job is replaced with a new one") + ginkgo.By("Ensuring the job is replaced with a new one") err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name) - Expect(err).NotTo(HaveOccurred(), "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) // shouldn't give us unexpected warnings - It("should not emit unexpected warnings", func() { - By("Creating a cronjob") + ginkgo.It("should not emit unexpected warnings", func() { + ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent, nil, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) - By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly") + ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly") err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name) err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name) - By("Ensuring no unexpected event has happened") + ginkgo.By("Ensuring no unexpected event has happened") err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) // deleted jobs should be removed from the active list - It("should remove from active list jobs that have been deleted", func() { - By("Creating a ForbidConcurrent cronjob") + ginkgo.It("should remove from active list jobs that have been deleted", func() { + ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name) - By("Ensuring a job is scheduled") + ginkgo.By("Ensuring a job is scheduled") err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) - By("Ensuring exactly one is scheduled") + ginkgo.By("Ensuring exactly one is scheduled") cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) - Expect(cronJob.Status.Active).Should(HaveLen(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) - By("Deleting the job") + ginkgo.By("Deleting the job") job := cronJob.Status.Active[0] framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) - By("Ensuring job was deleted") + ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) - Expect(err).To(HaveOccurred()) - Expect(errors.IsNotFound(err)).To(BeTrue()) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - By("Ensuring the job is not in the cronjob active list") + ginkgo.By("Ensuring the job is not in the cronjob active list") err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name) - By("Ensuring MissingJob event has occurred") + ginkgo.By("Ensuring MissingJob event has occurred") err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"}) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) }) // cleanup of successful finished jobs, with limit of one successful job - It("should delete successful finished jobs with limit of one successful job", func() { - By("Creating a AllowConcurrent cronjob with custom history limits") + ginkgo.It("should delete successful finished jobs with limit of one successful job", func() { + ginkgo.By("Creating a AllowConcurrent cronjob with custom history limits") successLimit := int32(1) cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent, successCommand, &successLimit) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - Expect(err).NotTo(HaveOccurred(), "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name) // Job is going to complete instantly: do not check for an active job // as we are most likely to miss it - By("Ensuring a finished job exists") + ginkgo.By("Ensuring a finished job exists") err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name) - By("Ensuring a finished job exists by listing jobs explicitly") + ginkgo.By("Ensuring a finished job exists by listing jobs explicitly") jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name) _, finishedJobs := filterActiveJobs(jobs) - Expect(len(finishedJobs) == 1).To(BeTrue()) + gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue()) // Job should get deleted when the next job finishes the next minute - By("Ensuring this job and its pods does not exist anymore") + ginkgo.By("Ensuring this job and its pods does not exist anymore") err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0]) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name) err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0]) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name) - By("Ensuring there is 1 finished job by listing jobs explicitly") + ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly") jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name) _, finishedJobs = filterActiveJobs(jobs) - Expect(len(finishedJobs) == 1).To(BeTrue()) + gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue()) - By("Removing cronjob") + ginkgo.By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) - Expect(err).NotTo(HaveOccurred(), "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) }) }) @@ -360,9 +360,8 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo if failIfNonEmpty { return len(curr.Status.Active) == 0, nil - } else { - return len(curr.Status.Active) != 0, nil } + return len(curr.Status.Active) != 0, nil }) } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 97077476167..7940b8c6973 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -36,8 +36,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) // This test primarily checks 2 things: @@ -52,14 +52,17 @@ const ( restartTimeout = 10 * time.Minute numPods = 10 sshPort = 22 - ADD = "ADD" - DEL = "DEL" - UPDATE = "UPDATE" + // ADD represents the ADD event + ADD = "ADD" + // DEL represents the DEL event + DEL = "DEL" + // UPDATE represents the UPDATE event + UPDATE = "UPDATE" ) -// restartDaemonConfig is a config to restart a running daemon on a node, and wait till +// RestartDaemonConfig is a config to restart a running daemon on a node, and wait till // it comes back up. It uses ssh to send a SIGTERM to the daemon. -type restartDaemonConfig struct { +type RestartDaemonConfig struct { nodeName string daemonName string healthzPort int @@ -67,12 +70,12 @@ type restartDaemonConfig struct { pollTimeout time.Duration } -// NewRestartConfig creates a restartDaemonConfig for the given node and daemon. -func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig { +// NewRestartConfig creates a RestartDaemonConfig for the given node and daemon. +func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig { if !framework.ProviderIs("gce") { framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider) } - return &restartDaemonConfig{ + return &RestartDaemonConfig{ nodeName: nodeName, daemonName: daemonName, healthzPort: healthzPort, @@ -81,12 +84,12 @@ func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval } } -func (r *restartDaemonConfig) String() string { +func (r *RestartDaemonConfig) String() string { return fmt.Sprintf("Daemon %v on node %v", r.daemonName, r.nodeName) } // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout -func (r *restartDaemonConfig) waitUp() { +func (r *RestartDaemonConfig) waitUp() { framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) healthzCheck := fmt.Sprintf( "curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort) @@ -110,14 +113,14 @@ func (r *restartDaemonConfig) waitUp() { } // kill sends a SIGTERM to the daemon -func (r *restartDaemonConfig) kill() { +func (r *RestartDaemonConfig) kill() { framework.Logf("Killing %v", r) _, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Restart checks if the daemon is up, kills it, and waits till it comes back up -func (r *restartDaemonConfig) restart() { +func (r *RestartDaemonConfig) restart() { r.waitUp() r.kill() r.waitUp() @@ -191,7 +194,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { var stopCh chan struct{} var tracker *podTracker - BeforeEach(func() { + ginkgo.BeforeEach(func() { // These tests require SSH framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) ns = f.Namespace.Name @@ -206,7 +209,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { Replicas: numPods, CreatedPods: &[]*v1.Pod{}, } - Expect(framework.RunRC(config)).NotTo(HaveOccurred()) + gomega.Expect(framework.RunRC(config)).NotTo(gomega.HaveOccurred()) replacePods(*config.CreatedPods, existingPods) stopCh = make(chan struct{}) @@ -240,11 +243,11 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { go controller.Run(stopCh) }) - AfterEach(func() { + ginkgo.AfterEach(func() { close(stopCh) }) - It("Controller Manager should not create/delete replicas across restart", func() { + ginkgo.It("Controller Manager should not create/delete replicas across restart", func() { // Requires master ssh access. framework.SkipUnlessProviderIs("gce", "aws") @@ -275,7 +278,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { } }) - It("Scheduler should continue assigning pods to nodes across restart", func() { + ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func() { // Requires master ssh access. framework.SkipUnlessProviderIs("gce", "aws") @@ -293,7 +296,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) }) - It("Kubelet should not restart containers across restart", func() { + ginkgo.It("Kubelet should not restart containers across restart", func() { nodeIPs, err := framework.GetNodePublicIps(f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index ad439448954..1b2be266e3b 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -38,8 +38,8 @@ import ( schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -53,7 +53,7 @@ const ( daemonsetColorLabel = daemonsetLabelPrefix + "color" ) -// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning +// NamespaceNodeSelectors the annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning // node selectors labels to namespaces var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"} @@ -65,16 +65,16 @@ var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-select var _ = SIGDescribe("Daemon set [Serial]", func() { var f *framework.Framework - AfterEach(func() { + ginkgo.AfterEach(func() { // Clean up daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { - By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) + ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to be reaped") } } if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { @@ -88,7 +88,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("unable to dump pods: %v", err) } err = clearDaemonSetNodeLabels(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) f = framework.NewDefaultFramework("daemonsets") @@ -99,18 +99,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { var ns string var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { ns = f.Namespace.Name c = f.ClientSet updatedNS, err := updateNamespaceAnnotations(c, ns) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ns = updatedNS.Name err = clearDaemonSetNodeLabels(c) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -121,23 +121,23 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ConformanceIt("should run and stop simple daemon", func() { label := map[string]string{daemonsetNameLabel: dsName} - By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) + ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods launch on every node of the cluster.") + ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") err = checkDaemonStatus(f, dsName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Stop a daemon pod, check that the daemon pod is revived.") + ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) pod := podList.Items[0] err = c.CoreV1().Pods(ns).Delete(pod.Name, nil) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive") }) /* @@ -152,47 +152,47 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Initially, daemon pods should not be running on any nodes.") + ginkgo.By("Initially, daemon pods should not be running on any nodes.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes") - By("Change node label to blue, check that daemon pod is launched.") + ginkgo.By("Change node label to blue, check that daemon pod is launched.") nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) + gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) - Expect(err).NotTo(HaveOccurred(), "error setting labels on node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) - Expect(len(daemonSetLabels)).To(Equal(1)) + gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name})) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Update the node label to green, and wait for daemons to be unscheduled") + ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") nodeSelector[daemonsetColorLabel] = "green" greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) - Expect(err).NotTo(HaveOccurred(), "error removing labels on node") - Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). - NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node") + gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). + NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes") - By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") + ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) - Expect(err).NotTo(HaveOccurred(), "error patching daemon set") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) - Expect(len(daemonSetLabels)).To(Equal(1)) + gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name})) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the // default scheduler. - It("should run and stop complex daemon with node affinity", func() { + ginkgo.It("should run and stop complex daemon with node affinity", func() { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} framework.Logf("Creating daemon %q with a node affinity", dsName) @@ -215,29 +215,29 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }, } ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Initially, daemon pods should not be running on any nodes.") + ginkgo.By("Initially, daemon pods should not be running on any nodes.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes") - By("Change node label to blue, check that daemon pod is launched.") + ginkgo.By("Change node label to blue, check that daemon pod is launched.") nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) + gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) - Expect(err).NotTo(HaveOccurred(), "error setting labels on node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) - Expect(len(daemonSetLabels)).To(Equal(1)) + gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name})) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes") err = checkDaemonStatus(f, dsName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Remove the node label and wait for daemons to be unscheduled") + ginkgo.By("Remove the node label and wait for daemons to be unscheduled") _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{}) - Expect(err).NotTo(HaveOccurred(), "error removing labels on node") - Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). - NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node") + gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))). + NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes") }) /* @@ -247,75 +247,75 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ConformanceIt("should retry creating failed daemon pods", func() { label := map[string]string{daemonsetNameLabel: dsName} - By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) + ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods launch on every node of the cluster.") + ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") err = checkDaemonStatus(f, dsName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.") + ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) pod := podList.Items[0] pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed _, err = c.CoreV1().Pods(ns).UpdateStatus(&pod) - Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error failing a daemon pod") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive") - By("Wait for the failed daemon pod to be completely deleted.") + ginkgo.By("Wait for the failed daemon pod to be completely deleted.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) - Expect(err).NotTo(HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted") }) // This test should not be added to conformance. We will consider deprecating OnDelete when the // extensions/v1beta1 and apps/v1beta1 are removed. - It("should not update pod when spec was updated and update strategy is OnDelete", func() { + ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() { label := map[string]string{daemonsetNameLabel: dsName} framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods launch on every node of the cluster.") + ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) waitForHistoryCreated(c, ns, label, 1) first := curHistory(listDaemonHistories(c, ns, label), ds) firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey] - Expect(first.Revision).To(Equal(int64(1))) + gomega.Expect(first.Revision).To(gomega.Equal(int64(1))) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) - By("Update daemon pods image.") + ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods images aren't updated.") + ginkgo.By("Check that daemon pods images aren't updated.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods are still running on every node of the cluster.") + ginkgo.By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) waitForHistoryCreated(c, ns, label, 2) cur := curHistory(listDaemonHistories(c, ns, label), ds) - Expect(cur.Revision).To(Equal(int64(2))) - Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash)) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(2))) + gomega.Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash)) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) }) @@ -330,48 +330,48 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods launch on every node of the cluster.") + ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) waitForHistoryCreated(c, ns, label, 1) cur := curHistory(listDaemonHistories(c, ns, label), ds) hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] - Expect(cur.Revision).To(Equal(int64(1))) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(1))) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) - By("Update daemon pods image.") + ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Get the number of nodes, and set the timeout appropriately. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) nodeCount := len(nodes.Items) retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second - By("Check that daemon pods images are updated.") + ginkgo.By("Check that daemon pods images are updated.") err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Check that daemon pods are still running on every node of the cluster.") + ginkgo.By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") // Check history and labels ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) waitForHistoryCreated(c, ns, label, 2) cur = curHistory(listDaemonHistories(c, ns, label), ds) hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] - Expect(cur.Revision).To(Equal(int64(2))) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(2))) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) }) @@ -382,17 +382,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { */ framework.ConformanceIt("should rollback without unnecessary restarts", func() { schedulableNodes := framework.GetReadySchedulableNodesOrDie(c) - Expect(len(schedulableNodes.Items)).To(BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") + gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") framework.Logf("Create a RollingUpdate DaemonSet") label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Check that daemon pods launch on every node of the cluster") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") framework.Logf("Update the DaemonSet to trigger a rollout") // We use a nonexistent image here, so that we make sure it won't finish @@ -400,11 +400,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Make sure we're in the middle of a rollout err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pods := listDaemonPods(c, ns, label) var existingPods, newPods []*v1.Pod @@ -422,21 +422,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { } schedulableNodes = framework.GetReadySchedulableNodesOrDie(c) if len(schedulableNodes.Items) < 2 { - Expect(len(existingPods)).To(Equal(0)) + gomega.Expect(len(existingPods)).To(gomega.Equal(0)) } else { - Expect(len(existingPods)).NotTo(Equal(0)) + gomega.Expect(len(existingPods)).NotTo(gomega.Equal(0)) } - Expect(len(newPods)).NotTo(Equal(0)) + gomega.Expect(len(newPods)).NotTo(gomega.Equal(0)) framework.Logf("Roll back the DaemonSet before rollout is complete") rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = image }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Make sure DaemonSet rollback is complete") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted pods = listDaemonPods(c, ns, label) @@ -445,7 +445,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { rollbackPods[pod.Name] = true } for _, pod := range existingPods { - Expect(rollbackPods[pod.Name]).To(BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name)) + gomega.Expect(rollbackPods[pod.Name]).To(gomega.BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name)) } }) }) @@ -486,8 +486,8 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) * selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(options) - Expect(err).NotTo(HaveOccurred()) - Expect(len(podList.Items)).To(BeNumerically(">", 0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0)) return podList } @@ -569,7 +569,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s if err != nil { return nil, err } else if len(newLabels) != len(labels) { - return nil, fmt.Errorf("Could not set daemon set test labels as expected.") + return nil, fmt.Errorf("Could not set daemon set test labels as expected") } return newNode, nil @@ -593,7 +593,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames continue } if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { - nodesToPodCount[pod.Spec.NodeName] += 1 + nodesToPodCount[pod.Spec.NodeName]++ } } framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount)) @@ -667,7 +667,7 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (b func checkDaemonStatus(f *framework.Framework, dsName string) error { ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("Could not get daemon set from v1.") + return fmt.Errorf("Could not get daemon set from v1") } desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady if desired != scheduled && desired != ready { @@ -694,7 +694,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS if podImage != image { framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage) } else { - nodesToUpdatedPodCount[pod.Spec.NodeName] += 1 + nodesToUpdatedPodCount[pod.Spec.NodeName]++ } if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { framework.Logf("Pod %s is not available", pod.Name) @@ -718,9 +718,9 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) { for _, pod := range podList.Items { podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] - Expect(len(podHash)).To(BeNumerically(">", 0)) + gomega.Expect(len(podHash)).To(gomega.BeNumerically(">", 0)) if len(hash) > 0 { - Expect(podHash).To(Equal(hash)) + gomega.Expect(podHash).To(gomega.Equal(hash)) } } } @@ -740,15 +740,15 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st return false, nil } err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn) - Expect(err).NotTo(HaveOccurred(), "error waiting for controllerrevisions to be created") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for controllerrevisions to be created") } func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} historyList, err := c.AppsV1().ControllerRevisions(ns).List(options) - Expect(err).NotTo(HaveOccurred()) - Expect(len(historyList.Items)).To(BeNumerically(">", 0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0)) return historyList } @@ -758,16 +758,16 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a for i := range historyList.Items { history := &historyList.Items[i] // Every history should have the hash label - Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0)) + gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0)) match, err := daemon.Match(ds, history) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if match { curHistory = history foundCurHistories++ } } - Expect(foundCurHistories).To(Equal(1)) - Expect(curHistory).NotTo(BeNil()) + gomega.Expect(foundCurHistories).To(gomega.Equal(1)) + gomega.Expect(curHistory).NotTo(gomega.BeNil()) return curHistory } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index d365a8e4a55..4af42ab0695 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -22,8 +22,8 @@ import ( "time" "github.com/davecgh/go-spew/spew" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -57,18 +57,18 @@ var _ = SIGDescribe("Deployment", func() { var ns string var c clientset.Interface - AfterEach(func() { + ginkgo.AfterEach(func() { failureTrap(c, ns) }) f := framework.NewDefaultFramework("deployment") - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) - It("deployment reaping should cascade to its replica sets and pods", func() { + ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() { testDeleteDeployment(f) }) /* @@ -102,13 +102,13 @@ var _ = SIGDescribe("Deployment", func() { framework.ConformanceIt("deployment should support rollover", func() { testRolloverDeployment(f) }) - It("deployment should support rollback", func() { + ginkgo.It("deployment should support rollback", func() { testRollbackDeployment(f) }) - It("iterative rollouts should eventually progress", func() { + ginkgo.It("iterative rollouts should eventually progress", func() { testIterativeDeployments(f) }) - It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() { + ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() { testDeploymentsControllerRef(f) }) /* @@ -189,22 +189,22 @@ func newDeploymentRollback(name string, annotations map[string]string, revision func stopDeployment(c clientset.Interface, ns, deploymentName string) { deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Deleting deployment %s", deploymentName) framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)) framework.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) - Expect(errors.IsNotFound(err)).To(BeTrue()) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) options := metav1.ListOptions{LabelSelector: selector.String()} rss, err := c.AppsV1().ReplicaSets(ns).List(options) - Expect(err).NotTo(HaveOccurred()) - Expect(rss.Items).Should(HaveLen(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) var pods *v1.PodList if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { @@ -233,20 +233,20 @@ func testDeleteDeployment(f *framework.Framework) { d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} deploy, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 1 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentComplete(c, deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - Expect(err).NotTo(HaveOccurred()) - Expect(newRS).NotTo(Equal(nilRs)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(newRS).NotTo(gomega.Equal(nilRs)) stopDeployment(c, ns, deploymentName) } @@ -269,34 +269,34 @@ func testRollingUpdateDeployment(f *framework.Framework) { rs.Annotations = annotations framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) _, err := c.AppsV1().ReplicaSets(ns).Create(rs) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) - Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %s", err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %s", err) // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-rolling-update-deployment" framework.Logf("Creating deployment %q", deploymentName) d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 3546343826724305833. framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name) err = framework.WaitForDeploymentComplete(c, deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // There should be 1 old RS (nginx-controller, which is adopted) framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) - Expect(err).NotTo(HaveOccurred()) - Expect(len(allOldRSs)).Should(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1)) } func testRecreateDeployment(f *framework.Framework) { @@ -308,15 +308,15 @@ func testRecreateDeployment(f *framework.Framework) { framework.Logf("Creating deployment %q", deploymentName) d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType) deployment, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 1 framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting deployment %q to complete", deploymentName) - Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) // Update deployment to delete redis pods and bring up nginx pods. framework.Logf("Triggering a new rollout for deployment %q", deploymentName) @@ -324,10 +324,10 @@ func testRecreateDeployment(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Name = NginxImageName update.Spec.Template.Spec.Containers[0].Image = NginxImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) - Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred()) + gomega.Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred()) } // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy @@ -344,18 +344,18 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { replicas := int32(1) revisionHistoryLimit := utilpointer.Int32Ptr(0) _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) - Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-cleanup-deployment" framework.Logf("Creating deployment %s", deploymentName) pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) - Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err) options := metav1.ListOptions{ ResourceVersion: pods.ListMeta.ResourceVersion, @@ -363,7 +363,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { stopCh := make(chan struct{}) defer close(stopCh) w, err := c.CoreV1().Pods(ns).Watch(options) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) go func() { // There should be only one pod being created, which is the pod with the redis image. // The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector. @@ -393,11 +393,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit _, err = c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) + ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // testRolloverDeployment tests that deployment supports rollover. @@ -415,14 +415,14 @@ func testRolloverDeployment(f *framework.Framework) { rsName := "test-rollover-controller" rsReplicas := int32(1) _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas) - Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) // Wait for replica set to become ready before adopting it. framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) - Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred()) + gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred()) // Create a deployment to delete nginx pods and instead bring up redis-slave pods. // We use a nonexistent image here, so that we make sure it won't finish @@ -438,25 +438,25 @@ func testRolloverDeployment(f *framework.Framework) { } newDeployment.Spec.MinReadySeconds = int32(10) _, err = c.AppsV1().Deployments(ns).Create(newDeployment) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify that the pods were scaled up and down as expected. deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 err = framework.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) // Check if it's updated to revision 1 correctly framework.Logf("Check revision of new replica set for deployment %q", deploymentName) err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Ensure that both replica sets have 1 created replica") oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ensureReplicas(oldRS, int32(1)) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ensureReplicas(newRS, int32(1)) // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods. @@ -466,35 +466,35 @@ func testRolloverDeployment(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pod template update. framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 2 framework.Logf("Wait for revision update of deployment %q to 2", deploymentName) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Make sure deployment %q is complete", deploymentName) err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Ensure that both old replica sets have no replicas") oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ensureReplicas(oldRS, int32(0)) // Not really the new replica set anymore but we GET by name so that's fine. newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ensureReplicas(newRS, int32(0)) } func ensureReplicas(rs *apps.ReplicaSet, replicas int32) { - Expect(*rs.Spec.Replicas).Should(Equal(replicas)) - Expect(rs.Status.Replicas).Should(Equal(replicas)) + gomega.Expect(*rs.Spec.Replicas).Should(gomega.Equal(replicas)) + gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas)) } // testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and @@ -518,18 +518,18 @@ func testRollbackDeployment(f *framework.Framework) { createAnnotation := map[string]string{"action": "create", "author": "node"} d.Annotations = createAnnotation deploy, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 1 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentComplete(c, deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Current newRS annotation should be "create" err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // 2. Update the deployment to create redis pods. updatedDeploymentImage := RedisImage @@ -540,66 +540,66 @@ func testRollbackDeployment(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Annotations = updateAnnotation }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pod template update. err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 2 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Current newRS annotation should be "update" err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // 3. Update the deploymentRollback to rollback to revision 1 revision := int64(1) framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for the deployment to start rolling back err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: report RollbackDone in deployment status and check it here // Wait for it to be updated to revision 3 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Current newRS annotation should be "create", after the rollback err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // 4. Update the deploymentRollback to rollback to last revision revision = 0 framework.Logf("rolling back deployment %s to last revision", deploymentName) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for it to be updated to revision 4 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Current newRS annotation should be "update", after the rollback err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // 5. Update the deploymentRollback to rollback to revision 10 // Since there's no revision 10 in history, it should stay as revision 4 @@ -607,17 +607,17 @@ func testRollbackDeployment(f *framework.Framework) { framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for the deployment to start rolling back err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: report RollbackRevisionNotFound in deployment status and check it here // The pod template shouldn't change since there's no revision 10 // Check if it's still revision 4 and still has the old pod template err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // 6. Update the deploymentRollback to rollback to revision 4 // Since it's already revision 4, it should be no-op @@ -625,17 +625,17 @@ func testRollbackDeployment(f *framework.Framework) { framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for the deployment to start rolling back err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: report RollbackTemplateUnchanged in deployment status and check it here // The pod template shouldn't change since it's already revision 4 // Check if it's still revision 4 and still has the old pod template err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func randomScale(d *apps.Deployment, i int) { @@ -669,7 +669,7 @@ func testIterativeDeployments(f *framework.Framework) { d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero framework.Logf("Creating deployment %q", deploymentName) deployment, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) iterations := 20 for i := 0; i < iterations; i++ { @@ -686,7 +686,7 @@ func testIterativeDeployments(f *framework.Framework) { update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) randomScale(update, i) }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) case n < 0.4: // rollback to the previous version @@ -697,7 +697,7 @@ func testIterativeDeployments(f *framework.Framework) { } update.Annotations[apps.DeprecatedRollbackTo] = "0" }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) case n < 0.6: // just scaling @@ -705,7 +705,7 @@ func testIterativeDeployments(f *framework.Framework) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { randomScale(update, i) }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) case n < 0.8: // toggling the deployment @@ -715,24 +715,24 @@ func testIterativeDeployments(f *framework.Framework) { update.Spec.Paused = true randomScale(update, i) }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { framework.Logf("%02d: resuming deployment %q", i, deployment.Name) deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false randomScale(update, i) }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } default: // arbitrarily delete deployment pods framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) opts := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(opts) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(podList.Items) == 0 { framework.Logf("%02d: no deployment pods to delete", i) continue @@ -745,7 +745,7 @@ func testIterativeDeployments(f *framework.Framework) { framework.Logf("%02d: deleting deployment pod %q", i, name) err := c.CoreV1().Pods(ns).Delete(name, nil) if err != nil && !errors.IsNotFound(err) { - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } } @@ -753,7 +753,7 @@ func testIterativeDeployments(f *framework.Framework) { // unpause the deployment if we end up pausing it deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if deployment.Spec.Paused { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false @@ -761,13 +761,13 @@ func testIterativeDeployments(f *framework.Framework) { } framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) - Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for deployment %q status", deploymentName) - Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) framework.Logf("Checking deployment %q for a complete condition", deploymentName) - Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred()) } func testDeploymentsControllerRef(f *framework.Framework) { @@ -780,47 +780,47 @@ func testDeploymentsControllerRef(f *framework.Framework) { replicas := int32(1) d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentComplete(c, deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) rsList := listDeploymentReplicaSets(c, ns, podLabels) - Expect(len(rsList.Items)).Should(Equal(1)) + gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1)) framework.Logf("Obtaining the ReplicaSet's UID") orphanedRSUID := rsList.Items[0].UID framework.Logf("Checking the ReplicaSet has the right controllerRef") err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) err = orphanDeploymentReplicaSets(c, deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Wait for the ReplicaSet to be orphaned") + ginkgo.By("Wait for the ReplicaSet to be orphaned") err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) - Expect(err).NotTo(HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned") deploymentName = "test-adopt-deployment" framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) deploy, err = c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentComplete(c, deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) rsList = listDeploymentReplicaSets(c, ns, podLabels) - Expect(len(rsList.Items)).Should(Equal(1)) + gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1)) framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") - Expect(rsList.Items[0].UID).Should(Equal(orphanedRSUID)) + gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID)) } // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle @@ -842,21 +842,21 @@ func testProportionalScalingDeployment(f *framework.Framework) { framework.Logf("Creating deployment %q", deploymentName) deployment, err := c.AppsV1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for observed generation %d", deployment.Generation) - Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) // Verify that the required pods have come up. framework.Logf("Waiting for all required pods to come up") err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas)) - Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err) framework.Logf("Waiting for deployment %q to complete", deployment.Name) - Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred()) firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Update the deployment with a non-existent image so that the new replica set // will be blocked to simulate a partial rollout. @@ -864,58 +864,58 @@ func testProportionalScalingDeployment(f *framework.Framework) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = "nginx:404" }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for observed generation %d", deployment.Generation) - Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred()) // Checking state of first rollout's replicaset. maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. minAvailableReplicas := replicas - int32(maxUnavailable) framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) - Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred()) + gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) // First rollout's replicaset should have .spec.replicas = 8 too. framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) - Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred()) + gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Checking state of second rollout's replicaset. secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Second rollout's replicaset should have 0 available replicas. framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") - Expect(secondRS.Status.AvailableReplicas).Should(Equal(int32(0))) + gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0))) // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. newReplicas := replicas + int32(maxSurge) - minAvailableReplicas framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) - Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred()) + gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred()) // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Check the deployment's minimum availability. framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) if deployment.Status.AvailableReplicas < minAvailableReplicas { - Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(HaveOccurred()) + gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred()) } // Scale the deployment to 30 replicas. @@ -924,23 +924,23 @@ func testProportionalScalingDeployment(f *framework.Framework) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") - Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred()) + gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred()) // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") - Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred()) + gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred()) } func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error { @@ -971,8 +971,8 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} rsList, err := c.AppsV1().ReplicaSets(ns).List(options) - Expect(err).NotTo(HaveOccurred()) - Expect(len(rsList.Items)).To(BeNumerically(">", 0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0)) return rsList } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 22d0e0fef30..f64b5019170 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -48,16 +48,16 @@ var _ = SIGDescribe("DisruptionController", func() { var ns string var cs kubernetes.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet ns = f.Namespace.Name }) - It("should create a PodDisruptionBudget", func() { + ginkgo.It("should create a PodDisruptionBudget", func() { createPDBMinAvailableOrDie(cs, ns, intstr.FromString("1%")) }) - It("should update PodDisruptionBudget status", func() { + ginkgo.It("should update PodDisruptionBudget status", func() { createPDBMinAvailableOrDie(cs, ns, intstr.FromInt(2)) createPodsOrDie(cs, ns, 3) @@ -72,7 +72,7 @@ var _ = SIGDescribe("DisruptionController", func() { } return pdb.Status.PodDisruptionsAllowed > 0, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) evictionCases := []struct { @@ -145,7 +145,7 @@ var _ = SIGDescribe("DisruptionController", func() { if c.shouldDeny { expectation = "should not allow an eviction" } - It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() { + ginkgo.It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() { if c.skipForBigClusters { framework.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1) } @@ -179,7 +179,7 @@ var _ = SIGDescribe("DisruptionController", func() { return false, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e := &policy.Eviction{ ObjectMeta: metav1.ObjectMeta{ @@ -194,7 +194,7 @@ var _ = SIGDescribe("DisruptionController", func() { time.Sleep(timeout) err = cs.CoreV1().Pods(ns).Evict(e) - Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget.")) + gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget.")) } else { // Only wait for running pods in the "allow" case // because one of shouldDeny cases relies on the @@ -207,11 +207,10 @@ var _ = SIGDescribe("DisruptionController", func() { err = cs.CoreV1().Pods(ns).Evict(e) if err != nil { return false, nil - } else { - return true, nil } + return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) } @@ -229,7 +228,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable }, } _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) { @@ -244,7 +243,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail }, } _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { @@ -272,7 +271,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { } func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { - By("Waiting for all pods to be running") + ginkgo.By("Waiting for all pods to be running") err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"}) if err != nil { diff --git a/test/e2e/apps/framework.go b/test/e2e/apps/framework.go index d4372e145ca..f81dc530fdf 100644 --- a/test/e2e/apps/framework.go +++ b/test/e2e/apps/framework.go @@ -18,6 +18,7 @@ package apps import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-apps] "+text, body) } diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 21018e24f7d..26916ff6bc4 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("Job", func() { @@ -38,20 +38,20 @@ var _ = SIGDescribe("Job", func() { backoffLimit := int32(6) // default value // Simplest case: all pods succeed promptly - It("should run a job to completion when tasks succeed", func() { - By("Creating a job") + ginkgo.It("should run a job to completion when tasks succeed", func() { + ginkgo.By("Creating a job") job := jobutil.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) - By("Ensuring job reaches completions") + ginkgo.By("Ensuring job reaches completions") err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) - Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) // Pods sometimes fail, but eventually succeed. - It("should run a job to completion when tasks sometimes fail and are locally restarted", func() { - By("Creating a job") + ginkgo.It("should run a job to completion when tasks sometimes fail and are locally restarted", func() { + ginkgo.By("Creating a job") // One failure, then a success, local restarts. // We can't use the random failure approach used by the // non-local test below, because kubelet will throttle @@ -61,16 +61,16 @@ var _ = SIGDescribe("Job", func() { // test timeout. job := jobutil.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) - By("Ensuring job reaches completions") + ginkgo.By("Ensuring job reaches completions") err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) - Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) // Pods sometimes fail, but eventually succeed, after pod restarts - It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() { - By("Creating a job") + ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() { + ginkgo.By("Creating a job") // 50% chance of container success, local restarts. // Can't use the failOnce approach because that relies // on an emptyDir, which is not preserved across new pods. @@ -82,22 +82,22 @@ var _ = SIGDescribe("Job", func() { // test less flaky, for now. job := jobutil.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) - By("Ensuring job reaches completions") + ginkgo.By("Ensuring job reaches completions") err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions) - Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) - It("should exceed active deadline", func() { - By("Creating a job") + ginkgo.It("should exceed active deadline", func() { + ginkgo.By("Creating a job") var activeDeadlineSeconds int64 = 1 job := jobutil.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) - By("Ensuring job past active deadline") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + ginkgo.By("Ensuring job past active deadline") err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded") - Expect(err).NotTo(HaveOccurred(), "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name) }) /* @@ -106,49 +106,49 @@ var _ = SIGDescribe("Job", func() { Description: Create a job. Ensure the active pods reflect paralellism in the namespace and delete the job. Job MUST be deleted successfully. */ framework.ConformanceIt("should delete a job", func() { - By("Creating a job") + ginkgo.By("Creating a job") job := jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) - By("Ensuring active pods == parallelism") + ginkgo.By("Ensuring active pods == parallelism") err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) - Expect(err).NotTo(HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) - By("delete a job") + ginkgo.By("delete a job") framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) - By("Ensuring job was deleted") + ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) - Expect(err).To(HaveOccurred(), "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) - Expect(errors.IsNotFound(err)).To(BeTrue()) + gomega.Expect(err).To(gomega.HaveOccurred(), "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) - It("should adopt matching orphans and release non-matching pods", func() { - By("Creating a job") + ginkgo.It("should adopt matching orphans and release non-matching pods", func() { + ginkgo.By("Creating a job") job := jobutil.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) // Replace job with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned job. kind := job.Kind job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) job.Kind = kind - By("Ensuring active pods == parallelism") + ginkgo.By("Ensuring active pods == parallelism") err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) - Expect(err).NotTo(HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) - By("Orphaning one of the Job's Pods") + ginkgo.By("Orphaning one of the Job's Pods") pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) - Expect(err).NotTo(HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) - Expect(pods.Items).To(HaveLen(int(parallelism))) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) + gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism))) pod := pods.Items[0] f.PodClient().Update(pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = nil }) - By("Checking that the Job readopts the Pod") - Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout, + ginkgo.By("Checking that the Job readopts the Pod") + gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -159,15 +159,15 @@ var _ = SIGDescribe("Job", func() { } return true, nil }, - )).To(Succeed(), "wait for pod %q to be readopted", pod.Name) + )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) - By("Removing the labels from the Job's Pod") + ginkgo.By("Removing the labels from the Job's Pod") f.PodClient().Update(pod.Name, func(pod *v1.Pod) { pod.Labels = nil }) - By("Checking that the Job releases the Pod") - Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout, + ginkgo.By("Checking that the Job releases the Pod") + gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef != nil { @@ -175,24 +175,24 @@ var _ = SIGDescribe("Job", func() { } return true, nil }, - )).To(Succeed(), "wait for pod %q to be released", pod.Name) + )).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name) }) - It("should exceed backoffLimit", func() { - By("Creating a job") + ginkgo.It("should exceed backoffLimit", func() { + ginkgo.By("Creating a job") backoff := 1 job := jobutil.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff)) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) - By("Ensuring job exceed backofflimit") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name) + ginkgo.By("Ensuring job exceed backofflimit") err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded") - Expect(err).NotTo(HaveOccurred(), "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name) - By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1)) + ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1)) pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) - Expect(err).NotTo(HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) - // Expect(pods.Items).To(HaveLen(backoff + 1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) + // gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1)) // due to NumRequeus not being stable enough, especially with failed status // updates we need to allow more than backoff+1 // TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed @@ -200,7 +200,7 @@ var _ = SIGDescribe("Job", func() { framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items) } for _, pod := range pods.Items { - Expect(pod.Status.Phase).To(Equal(v1.PodFailed)) + gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed)) } }) }) diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 97089f26619..aff54adb2bc 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -39,8 +39,8 @@ import ( jobutil "k8s.io/kubernetes/test/e2e/framework/job" testutils "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -108,11 +108,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name _, err := framework.GetPodsInNamespace(c, ns, map[string]string{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. framework.SkipUnlessProviderIs("gke", "aws") @@ -122,8 +122,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }) framework.KubeDescribe("Pods", func() { - Context("should return to running and ready state after network partition is healed", func() { - BeforeEach(func() { + ginkgo.Context("should return to running and ready state after network partition is healed", func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessNodeCountIsAtLeast(2) }) @@ -133,13 +133,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // 1. Node is marked NotReady after timeout by nodecontroller (40seconds) // 2. All pods on node are marked NotReady shortly after #1 // 3. Node and pods return to Ready after connectivity recovers - It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+ + ginkgo.It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+ "AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() { - By("choose a node - we will block all network traffic on this node") + ginkgo.By("choose a node - we will block all network traffic on this node") var podOpts metav1.ListOptions nodeOpts := metav1.ListOptions{} nodes, err := c.CoreV1().Nodes().List(nodeOpts) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.FilterNodes(nodes, func(node v1.Node) bool { if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) { return false @@ -160,7 +160,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } - By("Set up watch on node status") + ginkgo.By("Set up watch on node status") nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name) stopCh := make(chan struct{}) newNode := make(chan *v1.Node) @@ -182,7 +182,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { n, ok := newObj.(*v1.Node) - Expect(ok).To(Equal(true)) + gomega.Expect(ok).To(gomega.Equal(true)) newNode <- n }, @@ -196,21 +196,21 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }() go controller.Run(stopCh) - By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) host, err := framework.GetNodeExternalIP(&node) framework.ExpectNoError(err) masterAddresses := framework.GetAllMasterAddresses(c) defer func() { - By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) for _, masterAddress := range masterAddresses { framework.UnblockNetwork(host, masterAddress) } - if CurrentGinkgoTestDescription().Failed { + if ginkgo.CurrentGinkgoTestDescription().Failed { return } - By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") + ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") expectNodeReadiness(true, newNode) if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) @@ -221,7 +221,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.BlockNetwork(host, masterAddress) } - By("Expect to observe node and pod status change from Ready to NotReady after network partition") + ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") expectNodeReadiness(false, newNode) if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) @@ -231,7 +231,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }) framework.KubeDescribe("[ReplicationController]", func() { - It("should recreate pods scheduled on the unreachable node "+ + ginkgo.It("should recreate pods scheduled on the unreachable node "+ "AND allow scheduling of pods on a node after it rejoins the cluster", func() { // Create a replication controller for a service that serves its hostname. @@ -243,32 +243,32 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { replicas := int32(numNodes) common.NewRCByName(c, ns, name, replicas, nil) err = framework.VerifyPods(c, ns, name, true, replicas) - Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding") - By("choose a node with at least one pod - we will block some network traffic on this node") + ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // This creates a temporary network partition, verifies that 'podNameToDisappear', // that belongs to replication controller 'rcName', really disappeared (because its // grace period is set to 0). // Finally, it checks that the replication controller recreates the // pods on another node and that now the number of replicas is equal 'replicas'. - By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) + ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("verifying whether the pod from the unreachable node is recreated") + ginkgo.By("verifying whether the pod from the unreachable node is recreated") err = framework.VerifyPods(c, ns, name, true, replicas) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) @@ -279,26 +279,26 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // sleep a bit, to allow Watch in NodeController to catch up. time.Sleep(5 * time.Second) - By("verify whether new pods can be created on the re-attached node") + ginkgo.By("verify whether new pods can be created on the re-attached node") // increasing the RC size is not a valid way to test this // since we have no guarantees the pod will be scheduled on our node. additionalPod := "additionalpod" err = newPodOnNode(c, ns, additionalPod, node.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.VerifyPods(c, ns, additionalPod, true, 1) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // verify that it is really on the requested node { pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if pod.Spec.NodeName != node.Name { framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) } } }) - It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() { + ginkgo.It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() { // Create a replication controller for a service that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-net" @@ -310,32 +310,32 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { replicas := int32(numNodes) common.NewRCByName(c, ns, name, replicas, &gracePeriod) err = framework.VerifyPods(c, ns, name, true, replicas) - Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding") - By("choose a node with at least one pod - we will block some network traffic on this node") + ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // This creates a temporary network partition, verifies that 'podNameToDisappear', // that belongs to replication controller 'rcName', did not disappear (because its // grace period is set to 30). // Finally, it checks that the replication controller recreates the // pods on another node and that now the number of replicas is equal 'replicas + 1'. - By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) + ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) - Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") + gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") - By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas)) + ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas)) _, err = framework.PodsCreated(c, ns, name, replicas) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) @@ -352,10 +352,10 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } headlessSvcName := "test" - BeforeEach(func() { + ginkgo.BeforeEach(func() { // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. framework.SkipUnlessProviderIs("gke") - By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) + ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err) @@ -363,20 +363,20 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ns = f.Namespace.Name }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DumpDebugInfo(c, ns) } framework.Logf("Deleting all stateful set in ns %v", ns) framework.DeleteAllStatefulSets(c, ns) }) - It("should come back up if node goes down [Slow] [Disruptive]", func() { + ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() { petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) _, err := c.AppsV1().StatefulSets(ns).Create(ps) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pst := framework.NewStatefulSetTester(c) @@ -386,14 +386,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) common.RestartNodes(f.ClientSet, nodes) - By("waiting for pods to be running again") + ginkgo.By("waiting for pods to be running again") pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps) }) - It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() { + ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() { ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels) _, err := c.AppsV1().StatefulSets(ns).Create(ps) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pst := framework.NewStatefulSetTester(c) pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps) @@ -408,7 +408,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name) err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute) - Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") + gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") }) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) @@ -416,13 +416,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } - By("waiting for pods to be running again") + ginkgo.By("waiting for pods to be running again") pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps) }) }) framework.KubeDescribe("[Job]", func() { - It("should create new pods when node is partitioned", func() { + ginkgo.It("should create new pods when node is partitioned", func() { parallelism := int32(2) completions := int32(4) backoffLimit := int32(6) // default value @@ -430,33 +430,33 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { job := jobutil.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name})) - By(fmt.Sprintf("verifying that there are now %v running pods", parallelism)) + ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism)) _, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("choose a node with at least one pod - we will block some network traffic on this node") + ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // This creates a temporary network partition, verifies that the job has 'parallelism' number of // running pods after the node-controller detects node unreachable. - By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) + ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute) - Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") + gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") - By(fmt.Sprintf("verifying that there are now %v running pods", parallelism)) + ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism)) _, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) @@ -467,8 +467,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }) framework.KubeDescribe("Pods", func() { - Context("should be evicted from unready Node", func() { - BeforeEach(func() { + ginkgo.Context("should be evicted from unready Node", func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessNodeCountIsAtLeast(2) }) @@ -478,9 +478,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // 1. Node is marked NotReady after timeout by nodecontroller (40seconds) // 2. All pods on node are marked NotReady shortly after #1 // 3. After enough time passess all Pods are evicted from the given Node - It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+ + ginkgo.It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+ "AND all pods should be evicted after eviction timeout passes", func() { - By("choose a node - we will block all network traffic on this node") + ginkgo.By("choose a node - we will block all network traffic on this node") var podOpts metav1.ListOptions nodes := framework.GetReadySchedulableNodesOrDie(c) framework.FilterNodes(nodes, func(node v1.Node) bool { @@ -542,7 +542,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { maxTolerationTime, ) - By("Set up watch on node status") + ginkgo.By("Set up watch on node status") nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name) stopCh := make(chan struct{}) newNode := make(chan *v1.Node) @@ -564,7 +564,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { n, ok := newObj.(*v1.Node) - Expect(ok).To(Equal(true)) + gomega.Expect(ok).To(gomega.Equal(true)) newNode <- n }, @@ -578,21 +578,21 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }() go controller.Run(stopCh) - By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) host, err := framework.GetNodeExternalIP(&node) framework.ExpectNoError(err) masterAddresses := framework.GetAllMasterAddresses(c) defer func() { - By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) for _, masterAddress := range masterAddresses { framework.UnblockNetwork(host, masterAddress) } - if CurrentGinkgoTestDescription().Failed { + if ginkgo.CurrentGinkgoTestDescription().Failed { return } - By("Expect to observe node status change from NotReady to Ready after network connectivity recovers") + ginkgo.By("Expect to observe node status change from NotReady to Ready after network connectivity recovers") expectNodeReadiness(true, newNode) }() @@ -600,7 +600,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.BlockNetwork(host, masterAddress) } - By("Expect to observe node and pod status change from Ready to NotReady after network partition") + ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") expectNodeReadiness(false, newNode) framework.ExpectNoError(wait.Poll(1*time.Second, timeout, func() (bool, error) { return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate) @@ -610,7 +610,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } sleepTime := maxTolerationTime + 20*time.Second - By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime)) + ginkgo.By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime)) time.Sleep(sleepTime) pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts) framework.ExpectNoError(err) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 75a54b8608a..b25529c3612 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -31,8 +31,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("ReplicationController", func() { @@ -48,7 +48,7 @@ var _ = SIGDescribe("ReplicationController", func() { TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) }) - It("should serve a basic image on each replica with a private image", func() { + ginkgo.It("should serve a basic image on each replica with a private image", func() { // requires private images framework.SkipUnlessProviderIs("gce", "gke") privateimage := imageutils.GetConfig(imageutils.ServeHostname) @@ -110,9 +110,9 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa } } -// A basic test to check the deployment of an image using -// a replication controller. The image serves its hostname -// which is checked for each replica. +// TestReplicationControllerServeImageOrFail is a basic test to check +// the deployment of an image using a replication controller. +// The image serves its hostname which is checked for each replica. func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(uuid.NewUUID()) replicas := int32(1) @@ -121,16 +121,16 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri // that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname - By(fmt.Sprintf("Creating replication controller %s", name)) + ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) newRC := newRC(name, replicas, map[string]string{"name": name}, name, image) newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Check that pods for the new RC were created. // TODO: Maybe switch PodsCreated to just check owner references. pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. @@ -149,14 +149,14 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri err = fmt.Errorf("Pod %q never run: %v", pod.Name, err) } } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) running++ } // Sanity check if running != replicas { - Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred()) + gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred()) } // Verify that something is listening. @@ -182,7 +182,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) quota := newPodQuota(name, "2") _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) @@ -196,14 +196,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { if err == wait.ErrWaitTimeout { err = fmt.Errorf("resource quota %q never synced", name) } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) + ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage) rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) + ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) generation := rc.Generation conditions := rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -223,16 +223,16 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { if err == wait.ErrWaitTimeout { err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions) } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name)) + ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name)) rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) { x := int32(2) update.Spec.Replicas = &x }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Checking rc %q has no failure condition set", name)) + ginkgo.By(fmt.Sprintf("Checking rc %q has no failure condition set", name)) generation = rc.Generation conditions = rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -252,12 +252,12 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { if err == wait.ErrWaitTimeout { err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions) } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func testRCAdoptMatchingOrphans(f *framework.Framework) { name := "pod-adoption" - By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) + ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) p := f.PodClient().CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -275,21 +275,21 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { }, }) - By("When a replication controller with a matching selector is created") + ginkgo.By("When a replication controller with a matching selector is created") replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage) rcSt.Spec.Selector = map[string]string{"name": name} rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Then the orphan pod is adopted") + ginkgo.By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the RC if errors.IsNotFound(err) { return true, nil } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rc.UID { // pod adopted @@ -299,26 +299,26 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { // pod still not adopted return false, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func testRCReleaseControlledNotMatching(f *framework.Framework) { name := "pod-release" - By("Given a ReplicationController is created") + ginkgo.By("Given a ReplicationController is created") replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage) rcSt.Spec.Selector = map[string]string{"name": name} rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("When the matched label of one of its pods change") + ginkgo.By("When the matched label of one of its pods change") pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) p := pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pod.Labels = map[string]string{"name": "not-matching-name"} _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) @@ -330,12 +330,12 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { } return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Then the pod is released") + ginkgo.By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rc.UID { // pod still belonging to the replication controller @@ -345,5 +345,5 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { // pod already released return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index e9bec52a9d4..fe5c571fa3a 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -92,7 +92,7 @@ var _ = SIGDescribe("ReplicaSet", func() { testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage) }) - It("should serve a basic image on each replica with a private image", func() { + ginkgo.It("should serve a basic image on each replica with a private image", func() { // requires private images framework.SkipUnlessProviderIs("gce", "gke") privateimage := imageutils.GetConfig(imageutils.ServeHostname) @@ -100,7 +100,7 @@ var _ = SIGDescribe("ReplicaSet", func() { testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage()) }) - It("should surface a failure condition on a common issue like exceeded quota", func() { + ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func() { testReplicaSetConditionCheck(f) }) @@ -127,12 +127,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s newRS := newRS(name, replicas, map[string]string{"name": name}, name, image) newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Check that pods for the new RS were created. // TODO: Maybe switch PodsCreated to just check owner references. pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. @@ -151,14 +151,14 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s err = fmt.Errorf("Pod %q never run: %v", pod.Name, err) } } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) running++ } // Sanity check if running != replicas { - Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred()) + gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred()) } // Verify that something is listening. @@ -181,10 +181,10 @@ func testReplicaSetConditionCheck(f *framework.Framework) { namespace := f.Namespace.Name name := "condition-test" - By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) + ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) quota := newPodQuota(name, "2") _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) @@ -198,14 +198,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) { if err == wait.ErrWaitTimeout { err = fmt.Errorf("resource quota %q never synced", name) } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) + ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage) rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) + ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) generation := rs.Generation conditions := rs.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -226,16 +226,16 @@ func testReplicaSetConditionCheck(f *framework.Framework) { if err == wait.ErrWaitTimeout { err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions) } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name)) + ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name)) rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) { x := int32(2) update.Spec.Replicas = &x }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Checking replica set %q has no failure condition set", name)) + ginkgo.By(fmt.Sprintf("Checking replica set %q has no failure condition set", name)) generation = rs.Generation conditions = rs.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -255,12 +255,12 @@ func testReplicaSetConditionCheck(f *framework.Framework) { if err == wait.ErrWaitTimeout { err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions) } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { name := "pod-adoption-release" - By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) + ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) p := f.PodClient().CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -278,21 +278,21 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { }, }) - By("When a replicaset with a matching selector is created") + ginkgo.By("When a replicaset with a matching selector is created") replicas := int32(1) rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage) rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Then the orphan pod is adopted") + ginkgo.By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the ReplicaSet if errors.IsNotFound(err) { return true, nil } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rs.UID { // pod adopted @@ -302,16 +302,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { // pod still not adopted return false, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("When the matched label of one of its pods change") + ginkgo.By("When the matched label of one of its pods change") pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) p = &pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pod.Labels = map[string]string{"name": "not-matching-name"} _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) @@ -323,12 +323,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { } return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Then the pod is released") + ginkgo.By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rs.UID { // pod still belonging to the replicaset @@ -338,5 +338,5 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { // pod already released return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 12c57780d54..b3980c4bb5b 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -22,8 +22,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -56,7 +56,7 @@ var _ = SIGDescribe("StatefulSet", func() { var ns string var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) @@ -71,19 +71,19 @@ var _ = SIGDescribe("StatefulSet", func() { var statefulPodMounts, podMounts []v1.VolumeMount var ss *apps.StatefulSet - BeforeEach(func() { + ginkgo.BeforeEach(func() { statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) - By("Creating service " + headlessSvcName + " in namespace " + ns) + ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) _, err := c.CoreV1().Services(ns).Create(headlessService) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) @@ -92,47 +92,47 @@ var _ = SIGDescribe("StatefulSet", func() { // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - It("should provide basic identity", func() { - By("Creating statefulset " + ssName + " in namespace " + ns) + ginkgo.It("should provide basic identity", func() { + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 sst := framework.NewStatefulSetTester(c) sst.PauseNewPods(ss) _, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Saturating stateful set " + ss.Name) + ginkgo.By("Saturating stateful set " + ss.Name) sst.Saturate(ss) - By("Verifying statefulset mounted data directory is usable") + ginkgo.By("Verifying statefulset mounted data directory is usable") framework.ExpectNoError(sst.CheckMount(ss, "/data")) - By("Verifying statefulset provides a stable hostname for each pod") + ginkgo.By("Verifying statefulset provides a stable hostname for each pod") framework.ExpectNoError(sst.CheckHostname(ss)) - By("Verifying statefulset set proper service name") + ginkgo.By("Verifying statefulset set proper service name") framework.ExpectNoError(sst.CheckServiceName(ss, headlessSvcName)) cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync" - By("Running " + cmd + " in all stateful pods") + ginkgo.By("Running " + cmd + " in all stateful pods") framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd)) - By("Restarting statefulset " + ss.Name) + ginkgo.By("Restarting statefulset " + ss.Name) sst.Restart(ss) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) - By("Verifying statefulset mounted data directory is usable") + ginkgo.By("Verifying statefulset mounted data directory is usable") framework.ExpectNoError(sst.CheckMount(ss, "/data")) cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi" - By("Running " + cmd + " in all stateful pods") + ginkgo.By("Running " + cmd + " in all stateful pods") framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd)) }) // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - It("should adopt matching orphans and release non-matching pods", func() { - By("Creating statefulset " + ssName + " in namespace " + ns) + ginkgo.It("should adopt matching orphans and release non-matching pods", func() { + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 1 sst := framework.NewStatefulSetTester(c) sst.PauseNewPods(ss) @@ -141,29 +141,29 @@ var _ = SIGDescribe("StatefulSet", func() { // Save Kind since it won't be populated in the returned ss. kind := ss.Kind ss, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss.Kind = kind - By("Saturating stateful set " + ss.Name) + ginkgo.By("Saturating stateful set " + ss.Name) sst.Saturate(ss) pods := sst.GetPodList(ss) - Expect(pods.Items).To(HaveLen(int(*ss.Spec.Replicas))) + gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas))) - By("Checking that stateful set pods are created with ControllerRef") + ginkgo.By("Checking that stateful set pods are created with ControllerRef") pod := pods.Items[0] controllerRef := metav1.GetControllerOf(&pod) - Expect(controllerRef).ToNot(BeNil()) - Expect(controllerRef.Kind).To(Equal(ss.Kind)) - Expect(controllerRef.Name).To(Equal(ss.Name)) - Expect(controllerRef.UID).To(Equal(ss.UID)) + gomega.Expect(controllerRef).ToNot(gomega.BeNil()) + gomega.Expect(controllerRef.Kind).To(gomega.Equal(ss.Kind)) + gomega.Expect(controllerRef.Name).To(gomega.Equal(ss.Name)) + gomega.Expect(controllerRef.UID).To(gomega.Equal(ss.UID)) - By("Orphaning one of the stateful set's pods") + ginkgo.By("Orphaning one of the stateful set's pods") f.PodClient().Update(pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = nil }) - By("Checking that the stateful set readopts the pod") - Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, + ginkgo.By("Checking that the stateful set readopts the pod") + gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -174,16 +174,16 @@ var _ = SIGDescribe("StatefulSet", func() { } return true, nil }, - )).To(Succeed(), "wait for pod %q to be readopted", pod.Name) + )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) - By("Removing the labels from one of the stateful set's pods") + ginkgo.By("Removing the labels from one of the stateful set's pods") prevLabels := pod.Labels f.PodClient().Update(pod.Name, func(pod *v1.Pod) { pod.Labels = nil }) - By("Checking that the stateful set releases the pod") - Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout, + ginkgo.By("Checking that the stateful set releases the pod") + gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef != nil { @@ -191,16 +191,16 @@ var _ = SIGDescribe("StatefulSet", func() { } return true, nil }, - )).To(Succeed(), "wait for pod %q to be released", pod.Name) + )).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name) // If we don't do this, the test leaks the Pod and PVC. - By("Readding labels to the stateful set's pod") + ginkgo.By("Readding labels to the stateful set's pod") f.PodClient().Update(pod.Name, func(pod *v1.Pod) { pod.Labels = prevLabels }) - By("Checking that the stateful set readopts the pod") - Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, + ginkgo.By("Checking that the stateful set readopts the pod") + gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -211,49 +211,49 @@ var _ = SIGDescribe("StatefulSet", func() { } return true, nil }, - )).To(Succeed(), "wait for pod %q to be readopted", pod.Name) + )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) }) // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - It("should not deadlock when a pod's predecessor fails", func() { - By("Creating statefulset " + ssName + " in namespace " + ns) + ginkgo.It("should not deadlock when a pod's predecessor fails", func() { + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 2 sst := framework.NewStatefulSetTester(c) sst.PauseNewPods(ss) _, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunning(1, 0, ss) - By("Resuming stateful pod at index 0.") + ginkgo.By("Resuming stateful pod at index 0.") sst.ResumeNextPod(ss) - By("Waiting for stateful pod at index 1 to enter running.") + ginkgo.By("Waiting for stateful pod at index 1 to enter running.") sst.WaitForRunning(2, 1, ss) // Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not* // create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till // we set the healthy bit. - By("Deleting healthy stateful pod at index 0.") + ginkgo.By("Deleting healthy stateful pod at index 0.") sst.DeleteStatefulPodAtIndex(0, ss) - By("Confirming stateful pod at index 0 is recreated.") + ginkgo.By("Confirming stateful pod at index 0 is recreated.") sst.WaitForRunning(2, 1, ss) - By("Resuming stateful pod at index 1.") + ginkgo.By("Resuming stateful pod at index 1.") sst.ResumeNextPod(ss) - By("Confirming all stateful pods in statefulset are created.") + ginkgo.By("Confirming all stateful pods in statefulset are created.") sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) }) // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - It("should perform rolling updates and roll backs of template modifications with PVCs", func() { - By("Creating a new StatefulSet with PVCs") + ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func() { + ginkgo.By("Creating a new StatefulSet with PVCs") *(ss.Spec.Replicas) = 3 rollbackTest(c, ns, ss) }) @@ -264,7 +264,7 @@ var _ = SIGDescribe("StatefulSet", func() { Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods one at a time when the Pod template changes. The StatefulSet's status MUST indicate the CurrentRevision and UpdateRevision. If the template is changed to match a prior revision, StatefulSet MUST detect this as a rollback instead of creating a new revision. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() { - By("Creating a new StatefulSet") + ginkgo.By("Creating a new StatefulSet") ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) rollbackTest(c, ns, ss) }) @@ -275,7 +275,7 @@ var _ = SIGDescribe("StatefulSet", func() { Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress, StatefulSet MUST restore the Pod without violating the Partition. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() { - By("Creating a new StatefulSet") + ginkgo.By("Creating a new StatefulSet") ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) sst := framework.NewStatefulSetTester(c) sst.SetHTTPProbe(ss) @@ -290,16 +290,16 @@ var _ = SIGDescribe("StatefulSet", func() { }(), } ss, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).To(Equal(updateRevision), + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) pods := sst.GetPodList(ss) for i := range pods.Items { - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -309,28 +309,28 @@ var _ = SIGDescribe("StatefulSet", func() { newImage := NewNginxImage oldImage := ss.Spec.Template.Spec.Containers[0].Image - By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) - Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image") + ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) + gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Creating a new revision") + ginkgo.By("Creating a new revision") ss = sst.WaitForStatus(ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).NotTo(Equal(updateRevision), + gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during rolling update") - By("Not applying an update when the partition is greater than the number of replicas") + ginkgo.By("Not applying an update when the partition is greater than the number of replicas") for i := range pods.Items { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, oldImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -338,7 +338,7 @@ var _ = SIGDescribe("StatefulSet", func() { currentRevision)) } - By("Performing a canary update") + ginkgo.By("Performing a canary update") ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy { @@ -361,30 +361,30 @@ var _ = SIGDescribe("StatefulSet", func() { }(), } }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss, pods = sst.WaitForPartitionedRollingUpdate(ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, oldImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[apps.StatefulSetRevisionLabel], currentRevision)) } else { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, newImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -393,7 +393,7 @@ var _ = SIGDescribe("StatefulSet", func() { } } - By("Restoring Pods to the correct revision when they are deleted") + ginkgo.By("Restoring Pods to the correct revision when they are deleted") sst.DeleteStatefulPodAtIndex(0, ss) sst.DeleteStatefulPodAtIndex(2, ss) sst.WaitForRunningAndReady(3, ss) @@ -401,26 +401,26 @@ var _ = SIGDescribe("StatefulSet", func() { pods = sst.GetPodList(ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, oldImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[apps.StatefulSetRevisionLabel], currentRevision)) } else { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, newImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -429,7 +429,7 @@ var _ = SIGDescribe("StatefulSet", func() { } } - By("Performing a phased rolling update") + ginkgo.By("Performing a phased rolling update") for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- { ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) { update.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ @@ -442,30 +442,30 @@ var _ = SIGDescribe("StatefulSet", func() { }(), } }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss, pods = sst.WaitForPartitionedRollingUpdate(ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, oldImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[apps.StatefulSetRevisionLabel], currentRevision)) } else { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, newImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -474,7 +474,7 @@ var _ = SIGDescribe("StatefulSet", func() { } } } - Expect(ss.Status.CurrentRevision).To(Equal(updateRevision), + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, ss.Name, @@ -485,8 +485,8 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs. - It("should implement legacy replacement when the update strategy is OnDelete", func() { - By("Creating a new StatefulSet") + ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() { + ginkgo.By("Creating a new StatefulSet") ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) sst := framework.NewStatefulSetTester(c) sst.SetHTTPProbe(ss) @@ -494,16 +494,16 @@ var _ = SIGDescribe("StatefulSet", func() { Type: apps.OnDeleteStatefulSetStrategyType, } ss, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).To(Equal(updateRevision), + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) pods := sst.GetPodList(ss) for i := range pods.Items { - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -511,7 +511,7 @@ var _ = SIGDescribe("StatefulSet", func() { currentRevision)) } - By("Restoring Pods to the current revision") + ginkgo.By("Restoring Pods to the current revision") sst.DeleteStatefulPodAtIndex(0, ss) sst.DeleteStatefulPodAtIndex(1, ss) sst.DeleteStatefulPodAtIndex(2, ss) @@ -519,7 +519,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss = sst.GetStatefulSet(ss.Namespace, ss.Name) pods = sst.GetPodList(ss) for i := range pods.Items { - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -529,20 +529,20 @@ var _ = SIGDescribe("StatefulSet", func() { newImage := NewNginxImage oldImage := ss.Spec.Template.Spec.Containers[0].Image - By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) - Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image") + ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) + gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Creating a new revision") + ginkgo.By("Creating a new revision") ss = sst.WaitForStatus(ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).NotTo(Equal(updateRevision), + gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during rolling update") - By("Recreating Pods at the new revision") + ginkgo.By("Recreating Pods at the new revision") sst.DeleteStatefulPodAtIndex(0, ss) sst.DeleteStatefulPodAtIndex(1, ss) sst.DeleteStatefulPodAtIndex(2, ss) @@ -550,13 +550,13 @@ var _ = SIGDescribe("StatefulSet", func() { ss = sst.GetStatefulSet(ss.Namespace, ss.Name) pods = sst.GetPodList(ss) for i := range pods.Items { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, newImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -572,34 +572,34 @@ var _ = SIGDescribe("StatefulSet", func() { */ framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() { psLabels := klabels.Set(labels) - By("Initializing watcher for selector " + psLabels.String()) + ginkgo.By("Initializing watcher for selector " + psLabels.String()) watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Creating stateful set " + ssName + " in namespace " + ns) + ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) sst := framework.NewStatefulSetTester(c) sst.SetHTTPProbe(ss) ss, err = c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) + ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) - By("Confirming that stateful set scale up will halt with unhealthy stateful pod") + ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod") sst.BreakHTTPProbe(ss) sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.UpdateReplicas(ss, 3) sst.ConfirmStatefulPodCount(1, ss, 10*time.Second, true) - By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) + ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) sst.RestoreHTTPProbe(ss) sst.WaitForRunningAndReady(3, ss) - By("Verifying that stateful set " + ssName + " was scaled up in order") + ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order") expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"} ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout) defer cancel() @@ -614,13 +614,13 @@ var _ = SIGDescribe("StatefulSet", func() { return len(expectedOrder) == 0, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Scale down will halt with unhealthy stateful pod") + ginkgo.By("Scale down will halt with unhealthy stateful pod") watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.BreakHTTPProbe(ss) sst.WaitForStatusReadyReplicas(ss, 0) @@ -628,11 +628,11 @@ var _ = SIGDescribe("StatefulSet", func() { sst.UpdateReplicas(ss, 0) sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, true) - By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) + ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) sst.RestoreHTTPProbe(ss) sst.Scale(ss, 0) - By("Verifying that stateful set " + ssName + " was scaled down in reverse order") + ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order") expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"} ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout) defer cancel() @@ -647,7 +647,7 @@ var _ = SIGDescribe("StatefulSet", func() { return len(expectedOrder) == 0, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -658,36 +658,36 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() { psLabels := klabels.Set(labels) - By("Creating stateful set " + ssName + " in namespace " + ns) + ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss.Spec.PodManagementPolicy = apps.ParallelPodManagement sst := framework.NewStatefulSetTester(c) sst.SetHTTPProbe(ss) ss, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) + ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) - By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") + ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") sst.BreakHTTPProbe(ss) sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.UpdateReplicas(ss, 3) sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, false) - By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) + ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) sst.RestoreHTTPProbe(ss) sst.WaitForRunningAndReady(3, ss) - By("Scale down will not halt with unhealthy stateful pod") + ginkgo.By("Scale down will not halt with unhealthy stateful pod") sst.BreakHTTPProbe(ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.WaitForRunningAndNotReady(3, ss) sst.UpdateReplicas(ss, 0) sst.ConfirmStatefulPodCount(0, ss, 10*time.Second, false) - By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) + ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) sst.RestoreHTTPProbe(ss) sst.Scale(ss, 0) sst.WaitForStatusReplicas(ss, 0) @@ -701,11 +701,11 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ConformanceIt("Should recreate evicted statefulset", func() { podName := "test-pod" statefulPodName := ssName + "-0" - By("Looking for a node to schedule stateful set and pod") + ginkgo.By("Looking for a node to schedule stateful set and pod") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node := nodes.Items[0] - By("Creating pod with conflicting port in namespace " + f.Namespace.Name) + ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name) conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"} pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -725,7 +725,7 @@ var _ = SIGDescribe("StatefulSet", func() { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) - By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) + ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) ss := framework.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels) statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) @@ -733,13 +733,13 @@ var _ = SIGDescribe("StatefulSet", func() { _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ss) framework.ExpectNoError(err) - By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) + ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) if err := f.WaitForPodRunning(podName); err != nil { framework.Failf("Pod %v did not start running: %v", podName, err) } var initialStatefulPodUID types.UID - By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) + ginkgo.By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) framework.ExpectNoError(err) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout) @@ -764,13 +764,13 @@ var _ = SIGDescribe("StatefulSet", func() { framework.Failf("Pod %v expected to be re-created at least once", statefulPodName) } - By("Removing pod with conflicting port in namespace " + f.Namespace.Name) + ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") + ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") // we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry - Eventually(func() error { + gomega.Eventually(func() error { statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{}) if err != nil { return err @@ -781,41 +781,41 @@ var _ = SIGDescribe("StatefulSet", func() { return fmt.Errorf("Pod %v wasn't recreated: %v == %v", statefulPod.Name, statefulPod.UID, initialStatefulPodUID) } return nil - }, framework.StatefulPodTimeout, 2*time.Second).Should(BeNil()) + }, framework.StatefulPodTimeout, 2*time.Second).Should(gomega.BeNil()) }) - It("should have a working scale subresource", func() { - By("Creating statefulset " + ssName + " in namespace " + ns) + ginkgo.It("should have a working scale subresource", func() { + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) sst := framework.NewStatefulSetTester(c) sst.SetHTTPProbe(ss) ss, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) - By("getting scale subresource") + ginkgo.By("getting scale subresource") scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } - Expect(scale.Spec.Replicas).To(Equal(int32(1))) - Expect(scale.Status.Replicas).To(Equal(int32(1))) + gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) + gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) - By("updating a scale subresource") + ginkgo.By("updating a scale subresource") scale.Spec.Replicas = 2 scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } - Expect(scaleResult.Spec.Replicas).To(Equal(int32(2))) + gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) - By("verifying the statefulset Spec.Replicas was modified") + ginkgo.By("verifying the statefulset Spec.Replicas was modified") ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } - Expect(*(ss.Spec.Replicas)).To(Equal(int32(2))) + gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2))) }) }) @@ -823,13 +823,13 @@ var _ = SIGDescribe("StatefulSet", func() { var sst *framework.StatefulSetTester var appTester *clusterAppTester - BeforeEach(func() { + ginkgo.BeforeEach(func() { sst = framework.NewStatefulSetTester(c) appTester = &clusterAppTester{tester: sst, ns: ns} }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) @@ -838,28 +838,28 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - It("should creating a working zookeeper cluster", func() { + ginkgo.It("should creating a working zookeeper cluster", func() { appTester.statefulPod = &zookeeperTester{tester: sst} appTester.run() }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - It("should creating a working redis cluster", func() { + ginkgo.It("should creating a working redis cluster", func() { appTester.statefulPod = &redisTester{tester: sst} appTester.run() }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - It("should creating a working mysql cluster", func() { + ginkgo.It("should creating a working mysql cluster", func() { appTester.statefulPod = &mysqlGaleraTester{tester: sst} appTester.run() }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - It("should creating a working CockroachDB cluster", func() { + ginkgo.It("should creating a working CockroachDB cluster", func() { appTester.statefulPod = &cockroachDBTester{tester: sst} appTester.run() }) @@ -892,10 +892,10 @@ type clusterAppTester struct { } func (c *clusterAppTester) run() { - By("Deploying " + c.statefulPod.name()) + ginkgo.By("Deploying " + c.statefulPod.name()) ss := c.statefulPod.deploy(c.ns) - By("Creating foo:bar in member with index 0") + ginkgo.By("Creating foo:bar in member with index 0") c.statefulPod.write(0, map[string]string{"foo": "bar"}) switch c.statefulPod.(type) { @@ -903,13 +903,13 @@ func (c *clusterAppTester) run() { // Don't restart MySQL cluster since it doesn't handle restarts well default: if restartCluster { - By("Restarting stateful set " + ss.Name) + ginkgo.By("Restarting stateful set " + ss.Name) c.tester.Restart(ss) c.tester.WaitForRunningAndReady(*ss.Spec.Replicas, ss) } } - By("Reading value under foo from member with index 2") + ginkgo.By("Reading value under foo from member with index 2") if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil { framework.Failf("%v", err) } @@ -1085,16 +1085,16 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) { sst := framework.NewStatefulSetTester(c) sst.SetHTTPProbe(ss) ss, err := c.AppsV1().StatefulSets(ns).Create(ss) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).To(Equal(updateRevision), + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) pods := sst.GetPodList(ss) for i := range pods.Items { - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -1103,45 +1103,45 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) { } sst.SortStatefulPods(pods) err = sst.BreakPodHTTPProbe(ss, &pods.Items[1]) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) newImage := NewNginxImage oldImage := ss.Spec.Template.Spec.Containers[0].Image - By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage)) - Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image") + ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage)) + gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Creating a new revision") + ginkgo.By("Creating a new revision") ss = sst.WaitForStatus(ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).NotTo(Equal(updateRevision), + gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during rolling update") - By("Updating Pods in reverse ordinal order") + ginkgo.By("Updating Pods in reverse ordinal order") pods = sst.GetPodList(ss) sst.SortStatefulPods(pods) err = sst.RestorePodHTTPProbe(ss, &pods.Items[1]) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name) ss, pods = sst.WaitForRollingUpdate(ss) - Expect(ss.Status.CurrentRevision).To(Equal(updateRevision), + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, ss.Name, ss.Status.CurrentRevision, updateRevision)) for i := range pods.Items { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf(" Pod %s/%s has image %s not have new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, newImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s", pods.Items[i].Namespace, pods.Items[i].Name, @@ -1149,30 +1149,30 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) { updateRevision)) } - By("Rolling back to a previous revision") + ginkgo.By("Rolling back to a previous revision") err = sst.BreakPodHTTPProbe(ss, &pods.Items[1]) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) priorRevision := currentRevision currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = oldImage }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss = sst.WaitForStatus(ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision - Expect(currentRevision).NotTo(Equal(updateRevision), + gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during roll back") - Expect(priorRevision).To(Equal(updateRevision), + gomega.Expect(priorRevision).To(gomega.Equal(updateRevision), "Prior revision should equal update revision during roll back") - By("Rolling back update in reverse ordinal order") + ginkgo.By("Rolling back update in reverse ordinal order") pods = sst.GetPodList(ss) sst.SortStatefulPods(pods) sst.RestorePodHTTPProbe(ss, &pods.Items[1]) ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name) ss, pods = sst.WaitForRollingUpdate(ss) - Expect(ss.Status.CurrentRevision).To(Equal(priorRevision), + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(priorRevision), fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", ss.Namespace, ss.Name, @@ -1180,13 +1180,13 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) { updateRevision)) for i := range pods.Items { - Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage), + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, oldImage)) - Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision), + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(priorRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s", pods.Items[i].Namespace, pods.Items[i].Name, diff --git a/test/e2e/apps/types.go b/test/e2e/apps/types.go index 0b9d9c70d15..e465019d4bd 100644 --- a/test/e2e/apps/types.go +++ b/test/e2e/apps/types.go @@ -28,11 +28,24 @@ const ( ) var ( + // CronJobGroupVersionResourceAlpha unambiguously identifies a resource of cronjob with alpha status CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} - CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} - NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus) - KittenImage = imageutils.GetE2EImage(imageutils.Kitten) - NginxImage = imageutils.GetE2EImage(imageutils.Nginx) - NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew) - RedisImage = imageutils.GetE2EImage(imageutils.Redis) + + // CronJobGroupVersionResourceBeta unambiguously identifies a resource of cronjob with beta status + CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} + + // NautilusImage is the fully qualified URI to the Nautilus image + NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus) + + // KittenImage is the fully qualified URI to the Kitten image + KittenImage = imageutils.GetE2EImage(imageutils.Kitten) + + // NginxImage is the fully qualified URI to the Nginx image + NginxImage = imageutils.GetE2EImage(imageutils.Nginx) + + // NewNginxImage is the fully qualified URI to the NginxNew image + NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew) + + // RedisImage is the fully qualified URI to the Redis image + RedisImage = imageutils.GetE2EImage(imageutils.Redis) ) From 4c713e94d9037c6e26a9567faf5b91032dd2cc17 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Wed, 17 Apr 2019 19:37:35 +0800 Subject: [PATCH 047/209] fix golint failures of pkg/kubectl/apps --- hack/.golint_failures | 1 - pkg/controller/cronjob/controller_test.go | 18 +++++++++--------- pkg/kubectl/apps/kind_visitor.go | 14 -------------- 3 files changed, 9 insertions(+), 24 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 708080a8cb9..3fd6d1260e5 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -123,7 +123,6 @@ pkg/features pkg/kubeapiserver pkg/kubeapiserver/options pkg/kubectl -pkg/kubectl/apps pkg/kubectl/cmd/annotate pkg/kubectl/cmd/apply pkg/kubectl/cmd/attach diff --git a/pkg/controller/cronjob/controller_test.go b/pkg/controller/cronjob/controller_test.go index acb96656f2c..5c0a0b008e8 100644 --- a/pkg/controller/cronjob/controller_test.go +++ b/pkg/controller/cronjob/controller_test.go @@ -153,15 +153,15 @@ func newJob(UID string) batchv1.Job { } var ( - shortDead int64 = 10 - mediumDead int64 = 2 * 60 * 60 - longDead int64 = 1000000 - noDead int64 = -12345 - A batchV1beta1.ConcurrencyPolicy = batchV1beta1.AllowConcurrent - f batchV1beta1.ConcurrencyPolicy = batchV1beta1.ForbidConcurrent - R batchV1beta1.ConcurrencyPolicy = batchV1beta1.ReplaceConcurrent - T = true - F = false + shortDead int64 = 10 + mediumDead int64 = 2 * 60 * 60 + longDead int64 = 1000000 + noDead int64 = -12345 + A = batchV1beta1.AllowConcurrent + f = batchV1beta1.ForbidConcurrent + R = batchV1beta1.ReplaceConcurrent + T = true + F = false ) func TestSyncOne_RunOrNot(t *testing.T) { diff --git a/pkg/kubectl/apps/kind_visitor.go b/pkg/kubectl/apps/kind_visitor.go index 9c3ad07fe7e..931c63b1834 100644 --- a/pkg/kubectl/apps/kind_visitor.go +++ b/pkg/kubectl/apps/kind_visitor.go @@ -73,17 +73,3 @@ func (elem GroupKindElement) GroupMatch(groups ...string) bool { } return false } - -// NoOpKindVisitor implements KindVisitor with no-op functions. -type NoOpKindVisitor struct{} - -var _ KindVisitor = &NoOpKindVisitor{} - -func (*NoOpKindVisitor) VisitDaemonSet(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitDeployment(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitJob(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitPod(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitReplicaSet(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitReplicationController(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitStatefulSet(kind GroupKindElement) {} -func (*NoOpKindVisitor) VisitCronJob(kind GroupKindElement) {} From 6630d7c587578864d80ad9cb29b14eca02b8b08a Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Thu, 11 Apr 2019 12:05:31 +0800 Subject: [PATCH 048/209] add feature gates for switching between the legacy inflight limiting --- pkg/features/kube_features.go | 1 + .../src/k8s.io/apiserver/pkg/features/kube_features.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 61f2bb93745..b392650913e 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -519,6 +519,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, genericfeatures.DryRun: {Default: true, PreRelease: utilfeature.Beta}, genericfeatures.ServerSideApply: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.RequestManagement: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go index da99c2e667b..517c940b535 100644 --- a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go +++ b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go @@ -115,6 +115,13 @@ const ( // // Enables support for watch bookmark events. WatchBookmark utilfeature.Feature = "WatchBookmark" + + // owner: @MikeSpreitzer @yue9944882 + // alpha: v1.15 + // + // + // Enables managing request concurrency with prioritization and fairness at each server + RequestManagement utilfeature.Feature = "RequestManagement" ) func init() { @@ -137,4 +144,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS WinOverlay: {Default: false, PreRelease: utilfeature.Alpha}, WinDSR: {Default: false, PreRelease: utilfeature.Alpha}, WatchBookmark: {Default: false, PreRelease: utilfeature.Alpha}, + RequestManagement: {Default: false, PreRelease: utilfeature.Alpha}, } From 87d09301e59ac65e55dba32614b8e8b3bae2dd8f Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 15 Apr 2019 14:57:55 +0800 Subject: [PATCH 049/209] add new flag for enabling requests mgmt handler --- .../pkg/server/options/server_run_options.go | 35 +++++++++++++++---- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go b/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go index 02639bf93b9..62af780c5ba 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -18,6 +18,7 @@ package options import ( "fmt" + "k8s.io/apiserver/pkg/features" "net" "time" @@ -49,8 +50,9 @@ type ServerRunOptions struct { // decoded in a write request. 0 means no limit. // We intentionally did not add a flag for this option. Users of the // apiserver library can wire it to a flag. - MaxRequestBodyBytes int64 - TargetRAMMB int + MaxRequestBodyBytes int64 + TargetRAMMB int + EnableInfightQuotaHandler bool } func NewServerRunOptions() *ServerRunOptions { @@ -104,11 +106,27 @@ func (s *ServerRunOptions) Validate() []error { if s.TargetRAMMB < 0 { errors = append(errors, fmt.Errorf("--target-ram-mb can not be negative value")) } - if s.MaxRequestsInFlight < 0 { - errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value")) - } - if s.MaxMutatingRequestsInFlight < 0 { - errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value")) + + if s.EnableInfightQuotaHandler { + if !utilfeature.DefaultFeatureGate.Enabled(features.RequestManagement) { + errors = append(errors, fmt.Errorf("--enable-inflight-quota-handler can not be set if feature "+ + "gate RequestManagement is disabled")) + } + if s.MaxMutatingRequestsInFlight != 0 { + errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight=%v "+ + "can not be set if enabled inflight quota handler", s.MaxMutatingRequestsInFlight)) + } + if s.MaxRequestsInFlight != 0 { + errors = append(errors, fmt.Errorf("--max-requests-inflight=%v "+ + "can not be set if enabled inflight quota handler", s.MaxRequestsInFlight)) + } + } else { + if s.MaxRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value")) + } + if s.MaxMutatingRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value")) + } } if s.RequestTimeout.Nanoseconds() < 0 { @@ -174,5 +192,8 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") + fs.BoolVar(&s.EnableInfightQuotaHandler, "enable-inflight-quota-handler", s.EnableInfightQuotaHandler, ""+ + "If true, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") + utilfeature.DefaultMutableFeatureGate.AddFlag(fs) } From c2fc91adc7ca05e66a6b4a03303236c4e9a5325d Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Wed, 17 Apr 2019 14:50:55 +0200 Subject: [PATCH 050/209] Fix rounding-up of Vsphere volume size --- pkg/volume/vsphere_volume/vsphere_volume_util.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/volume/vsphere_volume/vsphere_volume_util.go b/pkg/volume/vsphere_volume/vsphere_volume_util.go index 35dacb5ce9f..98d4ae28ea8 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -94,11 +94,12 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner, selectedZ } capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - // vSphere works with kilobytes, convert to KiB with rounding up - volSizeKiB, err := volumehelpers.RoundUpToKiBInt(capacity) + // vSphere works with KiB, but its minimum allocation unit is 1 MiB + volSizeMiB, err := volumehelpers.RoundUpToMiBInt(capacity) if err != nil { return nil, err } + volSizeKiB := volSizeMiB * 1024 name := volumeutil.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255) volumeOptions := &vclib.VolumeOptions{ CapacityKB: volSizeKiB, From 1c71a2312b8ecc7070a51b8b7f02f1c033146ab5 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Wed, 17 Apr 2019 14:52:10 +0200 Subject: [PATCH 051/209] Warn to stderr when we encounter PathError listing plugins --- pkg/kubectl/cmd/plugin/BUILD | 1 - pkg/kubectl/cmd/plugin/plugin.go | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/kubectl/cmd/plugin/BUILD b/pkg/kubectl/cmd/plugin/BUILD index 6541bc9fcb9..5d9b2cd2b1c 100644 --- a/pkg/kubectl/cmd/plugin/BUILD +++ b/pkg/kubectl/cmd/plugin/BUILD @@ -11,7 +11,6 @@ go_library( "//pkg/kubectl/util/templates:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/plugin/plugin.go b/pkg/kubectl/cmd/plugin/plugin.go index ebe1f09525e..99c55d604f5 100644 --- a/pkg/kubectl/cmd/plugin/plugin.go +++ b/pkg/kubectl/cmd/plugin/plugin.go @@ -28,7 +28,6 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/klog" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/util/templates" @@ -116,8 +115,8 @@ func (o *PluginListOptions) Run() error { for _, dir := range uniquePathsList(o.PluginPaths) { files, err := ioutil.ReadDir(dir) if err != nil { - if _, ok := err.(*os.PathError); ok && strings.Contains(err.Error(), "no such file") { - klog.V(3).Infof("unable to find directory %q in your PATH. Skipping...", dir) + if _, ok := err.(*os.PathError); ok { + fmt.Fprintf(o.ErrOut, "Unable read directory %q from your PATH: %v. Skipping...", dir, err) continue } From f50696adda97e01b25cca56029ff663cf208a5e8 Mon Sep 17 00:00:00 2001 From: Joel Smith Date: Thu, 11 Apr 2019 11:21:58 -0600 Subject: [PATCH 052/209] Fix potential test flakes in HPA tests TestEventNotCreated and TestAvoidUncessaryUpdates Also, re-work the code so that the lock is never held while writing to the chan --- .../podautoscaler/horizontal_test.go | 89 +++++++++++-------- .../podautoscaler/legacy_horizontal_test.go | 25 +++--- 2 files changed, 66 insertions(+), 48 deletions(-) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 524f6e7c8ae..bf78265d7d7 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -330,38 +330,43 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa }) fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { - tc.Lock() - defer tc.Unlock() + handled, obj, err := func() (handled bool, ret *autoscalingv1.HorizontalPodAutoscaler, err error) { + tc.Lock() + defer tc.Unlock() - obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler) - assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected") - assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") - assert.Equal(t, tc.expectedDesiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") - if tc.verifyCPUCurrent { - if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") { - assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") + obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler) + assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected") + assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") + assert.Equal(t, tc.expectedDesiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") + if tc.verifyCPUCurrent { + if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") { + assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") + } } + var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition + if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil { + return true, nil, err + } + // TODO: it's ok not to sort these becaues statusOk + // contains all the conditions, so we'll never be appending. + // Default to statusOk when missing any specific conditions + if tc.expectedConditions == nil { + tc.expectedConditions = statusOkWithOverrides() + } + // clear the message so that we can easily compare + for i := range actualConditions { + actualConditions[i].Message = "" + actualConditions[i].LastTransitionTime = metav1.Time{} + } + assert.Equal(t, tc.expectedConditions, actualConditions, "the status conditions should have been as expected") + tc.statusUpdated = true + // Every time we reconcile HPA object we are updating status. + return true, obj, nil + }() + if obj != nil { + tc.processed <- obj.Name } - var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition - if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil { - return true, nil, err - } - // TODO: it's ok not to sort these becaues statusOk - // contains all the conditions, so we'll never be appending. - // Default to statusOk when missing any specific conditions - if tc.expectedConditions == nil { - tc.expectedConditions = statusOkWithOverrides() - } - // clear the message so that we can easily compare - for i := range actualConditions { - actualConditions[i].Message = "" - actualConditions[i].LastTransitionTime = metav1.Time{} - } - assert.Equal(t, tc.expectedConditions, actualConditions, "the status conditions should have been as expected") - tc.statusUpdated = true - // Every time we reconcile HPA object we are updating status. - tc.processed <- obj.Name - return true, obj, nil + return handled, obj, err }) fakeScaleClient := &scalefake.FakeScaleClient{} @@ -701,15 +706,25 @@ func (tc *testCase) runTestWithController(t *testing.T, hpaController *Horizonta go hpaController.Run(stop) tc.Lock() - if tc.verifyEvents { - tc.Unlock() + shouldWait := tc.verifyEvents + tc.Unlock() + + if shouldWait { // We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration). - time.Sleep(2 * time.Second) + timeoutTime := time.Now().Add(2 * time.Second) + for now := time.Now(); timeoutTime.After(now); now = time.Now() { + sleepUntil := timeoutTime.Sub(now) + select { + case <-tc.processed: + // drain the chan of any sent events to keep it from filling before the timeout + case <-time.After(sleepUntil): + // timeout reached, ready to verifyResults + } + } } else { - tc.Unlock() + // Wait for HPA to be processed. + <-tc.processed } - // Wait for HPA to be processed. - <-tc.processed tc.verifyResults(t) } @@ -2418,7 +2433,9 @@ func TestAvoidUncessaryUpdates(t *testing.T) { // wait a tick and then mark that we're finished (otherwise, we have no // way to indicate that we're finished, because the function decides not to do anything) time.Sleep(1 * time.Second) + tc.Lock() tc.statusUpdated = true + tc.Unlock() tc.processed <- "test-hpa" }() @@ -2493,8 +2510,6 @@ func TestAvoidUncessaryUpdates(t *testing.T) { return true, objv1, nil }) testClient.PrependReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { - tc.Lock() - defer tc.Unlock() assert.Fail(t, "should not have attempted to update the HPA when nothing changed") // mark that we've processed this HPA tc.processed <- "" diff --git a/pkg/controller/podautoscaler/legacy_horizontal_test.go b/pkg/controller/podautoscaler/legacy_horizontal_test.go index 8b42fe9a8ed..a0621a36d33 100644 --- a/pkg/controller/podautoscaler/legacy_horizontal_test.go +++ b/pkg/controller/podautoscaler/legacy_horizontal_test.go @@ -332,19 +332,22 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *sca }) fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { - tc.Lock() - defer tc.Unlock() + obj := func() *autoscalingv1.HorizontalPodAutoscaler { + tc.Lock() + defer tc.Unlock() - obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler) - assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected") - assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") - assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") - if tc.verifyCPUCurrent { - if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") { - assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") + obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler) + assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected") + assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") + assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") + if tc.verifyCPUCurrent { + if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") { + assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") + } } - } - tc.statusUpdated = true + tc.statusUpdated = true + return obj + }() // Every time we reconcile HPA object we are updating status. tc.processed <- obj.Name return true, obj, nil From 8762bc39a7bb0d0ad447fcea9bc01f42240c8617 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 17 Apr 2019 11:03:34 -0400 Subject: [PATCH 053/209] Remove deprecated centos/local support Change-Id: I4a451ec0125c05e66202daf781a4e5a1d895efb4 --- cluster/centos/.gitignore | 14 - cluster/centos/OWNERS | 4 - cluster/centos/build.sh | 137 ------- cluster/centos/config-build.sh | 52 --- cluster/centos/config-default.sh | 143 ------- cluster/centos/config-test.sh | 19 - cluster/centos/deployAddons.sh | 66 --- cluster/centos/make-ca-cert.sh | 89 ---- cluster/centos/master/scripts/apiserver.sh | 122 ------ .../master/scripts/controller-manager.sh | 61 --- cluster/centos/master/scripts/etcd.sh | 86 ---- cluster/centos/master/scripts/flannel.sh | 72 ---- cluster/centos/master/scripts/post-etcd.sh | 23 -- cluster/centos/master/scripts/scheduler.sh | 64 --- cluster/centos/node/bin/mk-docker-opts.sh | 113 ----- cluster/centos/node/bin/remove-docker0.sh | 27 -- cluster/centos/node/scripts/docker.sh | 48 --- cluster/centos/node/scripts/flannel.sh | 74 ---- cluster/centos/node/scripts/kubelet.sh | 98 ----- cluster/centos/node/scripts/proxy.sh | 56 --- cluster/centos/util.sh | 388 ------------------ cluster/kube-up.sh | 21 - cluster/local/util.sh | 33 -- 23 files changed, 1810 deletions(-) delete mode 100644 cluster/centos/.gitignore delete mode 100644 cluster/centos/OWNERS delete mode 100755 cluster/centos/build.sh delete mode 100755 cluster/centos/config-build.sh delete mode 100755 cluster/centos/config-default.sh delete mode 100644 cluster/centos/config-test.sh delete mode 100755 cluster/centos/deployAddons.sh delete mode 100755 cluster/centos/make-ca-cert.sh delete mode 100755 cluster/centos/master/scripts/apiserver.sh delete mode 100755 cluster/centos/master/scripts/controller-manager.sh delete mode 100755 cluster/centos/master/scripts/etcd.sh delete mode 100644 cluster/centos/master/scripts/flannel.sh delete mode 100644 cluster/centos/master/scripts/post-etcd.sh delete mode 100755 cluster/centos/master/scripts/scheduler.sh delete mode 100755 cluster/centos/node/bin/mk-docker-opts.sh delete mode 100755 cluster/centos/node/bin/remove-docker0.sh delete mode 100755 cluster/centos/node/scripts/docker.sh delete mode 100755 cluster/centos/node/scripts/flannel.sh delete mode 100755 cluster/centos/node/scripts/kubelet.sh delete mode 100755 cluster/centos/node/scripts/proxy.sh delete mode 100755 cluster/centos/util.sh delete mode 100755 cluster/local/util.sh diff --git a/cluster/centos/.gitignore b/cluster/centos/.gitignore deleted file mode 100644 index 8452b51087b..00000000000 --- a/cluster/centos/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -binaries -ca-cert -etcd-cert - -master/bin/etcd -master/bin/etcdctl -master/bin/kube* - -node/bin/docker -node/bin/etcd -node/bin/etcdctl -node/bin/flanneld -node/bin/kube* -local-test.sh diff --git a/cluster/centos/OWNERS b/cluster/centos/OWNERS deleted file mode 100644 index 3a91eff1417..00000000000 --- a/cluster/centos/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - zouyee diff --git a/cluster/centos/build.sh b/cluster/centos/build.sh deleted file mode 100755 index 1f8c0795df9..00000000000 --- a/cluster/centos/build.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Download the flannel, etcd, docker, bridge-utils and K8s binaries automatically -# and store into binaries directory. -# Run as sudoers only - -# author @kevin-wangzefeng - -set -o errexit -set -o nounset -set -o pipefail - -readonly ROOT=$(dirname "${BASH_SOURCE[0]}") -source "${ROOT}/config-build.sh" - -# ensure $RELEASES_DIR is an absolute file path -mkdir -p "${RELEASES_DIR}" -RELEASES_DIR=$(cd "${RELEASES_DIR}"; pwd) - -# get absolute file path of binaries -BINARY_DIR=$(cd "${ROOT}"; pwd)/binaries - -function clean-up() { - rm -rf "${RELEASES_DIR}" - rm -rf "${BINARY_DIR}" -} - -function download-releases() { - rm -rf "${RELEASES_DIR}" - mkdir -p "${RELEASES_DIR}" - - echo "Download flannel release v${FLANNEL_VERSION} ..." - curl -L "${FLANNEL_DOWNLOAD_URL}" -o "${RELEASES_DIR}/flannel.tar.gz" - - echo "Download etcd release v${ETCD_VERSION} ..." - curl -L "${ETCD_DOWNLOAD_URL}" -o "${RELEASES_DIR}/etcd.tar.gz" - - echo "Download kubernetes release v${K8S_VERSION} ..." - curl -L "${K8S_CLIENT_DOWNLOAD_URL}" -o "${RELEASES_DIR}/kubernetes-client-linux-amd64.tar.gz" - curl -L "${K8S_SERVER_DOWNLOAD_URL}" -o "${RELEASES_DIR}/kubernetes-server-linux-amd64.tar.gz" - - echo "Download docker release v${DOCKER_VERSION} ..." - curl -L "${DOCKER_DOWNLOAD_URL}" -o "${RELEASES_DIR}/docker.tar.gz" -} - -function unpack-releases() { - rm -rf "${BINARY_DIR}" - mkdir -p "${BINARY_DIR}/master/bin" - mkdir -p "${BINARY_DIR}/node/bin" - - # flannel - if [[ -f "${RELEASES_DIR}/flannel.tar.gz" ]] ; then - tar xzf "${RELEASES_DIR}/flannel.tar.gz" -C "${RELEASES_DIR}" - cp "${RELEASES_DIR}/flanneld" "${BINARY_DIR}/master/bin" - cp "${RELEASES_DIR}/flanneld" "${BINARY_DIR}/node/bin" - fi - - # etcd - if [[ -f "${RELEASES_DIR}/etcd.tar.gz" ]] ; then - tar xzf "${RELEASES_DIR}/etcd.tar.gz" -C "${RELEASES_DIR}" - ETCD="etcd-v${ETCD_VERSION}-linux-amd64" - cp "${RELEASES_DIR}/${ETCD}/etcd" \ - "${RELEASES_DIR}/${ETCD}/etcdctl" "${BINARY_DIR}/master/bin" - cp "${RELEASES_DIR}/${ETCD}/etcd" \ - "${RELEASES_DIR}/${ETCD}/etcdctl" "${BINARY_DIR}/node/bin" - fi - - # k8s - if [[ -f "${RELEASES_DIR}/kubernetes-client-linux-amd64.tar.gz" ]] ; then - tar xzf "${RELEASES_DIR}/kubernetes-client-linux-amd64.tar.gz" -C "${RELEASES_DIR}" - cp "${RELEASES_DIR}/kubernetes/client/bin/kubectl" "${BINARY_DIR}" - fi - - if [[ -f "${RELEASES_DIR}/kubernetes-server-linux-amd64.tar.gz" ]] ; then - tar xzf "${RELEASES_DIR}/kubernetes-server-linux-amd64.tar.gz" -C "${RELEASES_DIR}" - cp "${RELEASES_DIR}/kubernetes/server/bin/kube-apiserver" \ - "${RELEASES_DIR}/kubernetes/server/bin/kube-controller-manager" \ - "${RELEASES_DIR}/kubernetes/server/bin/kube-scheduler" "${BINARY_DIR}/master/bin" - cp "${RELEASES_DIR}/kubernetes/server/bin/kubelet" \ - "${RELEASES_DIR}/kubernetes/server/bin/kube-proxy" "${BINARY_DIR}/node/bin" - fi - - # docker - if [[ -f "${RELEASES_DIR}/docker.tar.gz" ]]; then - tar xzf "${RELEASES_DIR}/docker.tar.gz" -C "${RELEASES_DIR}" - - cp "${RELEASES_DIR}/docker/docker*" "${BINARY_DIR}/node/bin" - fi - - chmod -R +x "${BINARY_DIR}" - echo "Done! All binaries are stored in ${BINARY_DIR}" -} - -function parse-opt() { - local opt=${1-} - - case $opt in - download) - download-releases - ;; - unpack) - unpack-releases - ;; - clean) - clean-up - ;; - all) - download-releases - unpack-releases - ;; - *) - echo "Usage: " - echo " build.sh " - echo "Commands:" - echo " clean Clean up downloaded releases and unpacked binaries." - echo " download Download releases to \"${RELEASES_DIR}\"." - echo " unpack Unpack releases downloaded in \"${RELEASES_DIR}\", and copy binaries to \"${BINARY_DIR}\"." - echo " all Download releases and unpack them." - ;; - esac -} - -parse-opt "${@}" diff --git a/cluster/centos/config-build.sh b/cluster/centos/config-build.sh deleted file mode 100755 index 9ed75ae7974..00000000000 --- a/cluster/centos/config-build.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for the Binaries downloading and unpacking. - -# Directory to store release packages that will be downloaded. -RELEASES_DIR=${RELEASES_DIR:-/tmp/downloads} - -# Define docker version to use. -DOCKER_VERSION=${DOCKER_VERSION:-"1.12.1"} - -# Define flannel version to use. -FLANNEL_VERSION=${FLANNEL_VERSION:-"0.6.1"} - -# Define etcd version to use. -ETCD_VERSION=${ETCD_VERSION:-"3.0.9"} - -# Define k8s version to use. -K8S_VERSION=${K8S_VERSION:-"1.3.7"} - -# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell -DOCKER_DOWNLOAD_URL=\ -"https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz" - -# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell -FLANNEL_DOWNLOAD_URL=\ -"https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-v${FLANNEL_VERSION}-linux-amd64.tar.gz" - -# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell -ETCD_DOWNLOAD_URL=\ -"https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz" - -# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell -K8S_CLIENT_DOWNLOAD_URL=\ -"https://dl.k8s.io/v${K8S_VERSION}/kubernetes-client-linux-amd64.tar.gz" - -# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell -K8S_SERVER_DOWNLOAD_URL=\ -"https://dl.k8s.io/v${K8S_VERSION}/kubernetes-server-linux-amd64.tar.gz" diff --git a/cluster/centos/config-default.sh b/cluster/centos/config-default.sh deleted file mode 100755 index a08ff2fed92..00000000000 --- a/cluster/centos/config-default.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -readonly root=$(dirname "${BASH_SOURCE[0]}") - -## Contains configuration values for the CentOS cluster -# The user should have sudo privilege -export MASTER=${MASTER:-"centos@172.10.0.11"} -export MASTER_IP=${MASTER#*@} - -# Define all your master nodes, -# And separated with blank space like . -# The user should have sudo privilege -export MASTERS="${MASTERS:-$MASTER}" - -# length-of -# Get the length of specific arg0, could be a space-separate string or array. -function length-of() { - local len=0 - # shellcheck disable=SC2034 # Unused variables left for readability - for part in $1; do - ((++len)) - done - echo $len -} -# Number of nodes in your cluster. -export NUM_MASTERS="${NUM_MASTERS:-$(length-of "$MASTERS")}" - -# Get default master advertise address: first master node. -function default-advertise-address() { - # get the first master node - local masters_array=("${MASTERS}") - local master=${masters_array[0]} - echo "${master#*@}" -} - -# Define advertise address of masters, could be a load balancer address. -# If not provided, the default is ip of first master node. -export MASTER_ADVERTISE_ADDRESS="${MASTER_ADVERTISE_ADDRESS:-$(default-advertise-address)}" -export MASTER_ADVERTISE_IP="${MASTER_ADVERTISE_IP:-$(getent hosts "${MASTER_ADVERTISE_ADDRESS}" | awk '{print $1; exit}')}" - -# Define all your minion nodes, -# And separated with blank space like . -# The user should have sudo privilege -export NODES="${NODES:-"centos@172.10.0.12 centos@172.10.0.13"}" - -# Number of nodes in your cluster. -export NUM_NODES="${NUM_NODES:-$(length-of "$NODES")}" - -# Should be removed when NUM_NODES is deprecated in validate-cluster.sh -export NUM_NODES="${NUM_NODES}" - -# By default, the cluster will use the etcd installed on master. -function concat-etcd-servers() { - local etcd_servers="" - for master in ${MASTERS}; do - local master_ip=${master#*@} - local prefix="" - if [ -n "$etcd_servers" ]; then - prefix="${etcd_servers}," - fi - etcd_servers="${prefix}https://${master_ip}:2379" - done - - echo "$etcd_servers" -} -ETCD_SERVERS="$(concat-etcd-servers)" -export ETCD_SERVERS - -# By default, etcd cluster will use runtime configuration -# https://coreos.com/etcd/docs/latest/v2/runtime-configuration.html -# Get etc initial cluster and store in ETCD_INITIAL_CLUSTER -function concat-etcd-initial-cluster() { - local etcd_initial_cluster="" - local num_infra=0 - for master in ${MASTERS}; do - local master_ip="${master#*@}" - if [ -n "$etcd_initial_cluster" ]; then - etcd_initial_cluster+="," - fi - etcd_initial_cluster+="infra${num_infra}=https://${master_ip}:2380" - ((++num_infra)) - done - - echo "$etcd_initial_cluster" -} -ETCD_INITIAL_CLUSTER="$(concat-etcd-initial-cluster)" -export ETCD_INITIAL_CLUSTER - -CERT_DIR="${CERT_DIR:-${root}/ca-cert}" -mkdir -p "${CERT_DIR}" -# CERT_DIR path must be absolute. -CERT_DIR="$(cd "${CERT_DIR}" && pwd)" -export CERT_DIR - -# define the IP range used for service cluster IPs. -# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. -export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-"192.168.3.0/24"} - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -export ENABLE_CLUSTER_DNS -# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE -DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.3.100"} -DNS_DOMAIN=${DNS_DOMAIN:-"cluster.local"} - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" -export ENABLE_CLUSTER_UI - -# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE -export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"} - -# Admission Controllers to invoke prior to persisting objects in cluster. -# MutatingAdmissionWebhook should be the last controller that modifies the -# request object, otherwise users will be confused if the mutating webhooks' -# modification is overwritten. -# If we included ResourceQuota, we should keep it at the end of the list to -# prevent incrementing quota usage prematurely. -export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"} - -# Extra options to set on the Docker command line. -# This is useful for setting --insecure-registry for local registries. -export DOCKER_OPTS=${DOCKER_OPTS:-""} - - -# Timeouts for process checking on master and minion -export PROCESS_CHECK_TIMEOUT=${PROCESS_CHECK_TIMEOUT:-180} # seconds. - -unset -f default-advertise-address concat-etcd-servers length-of concat-etcd-initial-cluster diff --git a/cluster/centos/config-test.sh b/cluster/centos/config-test.sh deleted file mode 100644 index 45dd75c0da3..00000000000 --- a/cluster/centos/config-test.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## for CentOS/Fedora/RHEL cluster in test mode -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -source "${KUBE_ROOT}/cluster/centos/config-default.sh" diff --git a/cluster/centos/deployAddons.sh b/cluster/centos/deployAddons.sh deleted file mode 100755 index fce75750f18..00000000000 --- a/cluster/centos/deployAddons.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# deploy the add-on services after the cluster is available - -set -e - -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -source "${KUBE_ROOT}/cluster/centos/config-default.sh" -KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" -export KUBECTL_PATH="${KUBE_ROOT}/cluster/centos/binaries/kubectl" -export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/centos/config-default.sh} - -function deploy_dns { - echo "Deploying DNS on Kubernetes" - cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.sed" kube-dns.yaml - sed -i -e "s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g" kube-dns.yaml - sed -i -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" kube-dns.yaml - - KUBEDNS=$("${KUBECTL} get services --namespace=kube-system | grep kube-dns | cat") - - if [ ! "$KUBEDNS" ]; then - # use kubectl to create kube-dns addon - ${KUBECTL} --namespace=kube-system create -f kube-dns.yaml - - echo "Kube-dns addon is successfully deployed." - else - echo "Kube-dns addon is already deployed. Skipping." - fi - - echo -} - -function deploy_dashboard { - echo "Deploying Kubernetes Dashboard" - - ${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml" - ${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml" - ${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml" - ${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml" - ${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml" - - echo -} - - -if [ "${ENABLE_CLUSTER_DNS}" == true ]; then - deploy_dns -fi - -if [ "${ENABLE_CLUSTER_UI}" == true ]; then - deploy_dashboard -fi diff --git a/cluster/centos/make-ca-cert.sh b/cluster/centos/make-ca-cert.sh deleted file mode 100755 index 8206e45f637..00000000000 --- a/cluster/centos/make-ca-cert.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -DEBUG="${DEBUG:-false}" - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -cert_ip=$1 -extra_sans=${2:-} -cert_dir=${CERT_DIR:-/srv/kubernetes} -cert_group=${CERT_GROUP:-kube-cert} - -mkdir -p "$cert_dir" - -use_cn=false - -sans="IP:${cert_ip}" -if [[ -n "${extra_sans}" ]]; then - sans="${sans},${extra_sans}" -fi - -tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX) -trap 'rm -rf "${tmpdir}"' EXIT -cd "${tmpdir}" - -# TODO: For now, this is a patched tool that makes subject-alt-name work, when -# the fix is upstream move back to the upstream easyrsa. This is cached in GCS -# but is originally taken from: -# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz -# -# To update, do the following: -# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz -# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz -# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz -# -# Due to GCS caching of public objects, it may take time for this to be widely -# distributed. -# -# Use ~/kube/easy-rsa.tar.gz if it exists, so that it can be -# pre-pushed in cases where an outgoing connection is not allowed. -if [ -f ~/kube/easy-rsa.tar.gz ]; then - ln -s ~/kube/easy-rsa.tar.gz . -else - curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1 -fi -tar xzf easy-rsa.tar.gz > /dev/null 2>&1 - -cd easy-rsa-master/easyrsa3 -./easyrsa init-pki > /dev/null 2>&1 -./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1 -if [ $use_cn = "true" ]; then - ./easyrsa build-server-full "${cert_ip}" nopass > /dev/null 2>&1 - cp -p "pki/issued/${cert_ip}.crt" "${cert_dir}/server.cert" > /dev/null 2>&1 - cp -p "pki/private/${cert_ip}.key" "${cert_dir}/server.key" > /dev/null 2>&1 -else - ./easyrsa --subject-alt-name="${sans}" build-server-full kubernetes-master nopass > /dev/null 2>&1 - cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1 - cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1 -fi -# Make a superuser client cert with subject "O=system:masters, CN=kubecfg" -./easyrsa --dn-mode=org \ - --req-cn=kubecfg --req-org=system:masters \ - --req-c= --req-st= --req-city= --req-email= --req-ou= \ - build-client-full kubecfg nopass > /dev/null 2>&1 -cp -p pki/ca.crt "${cert_dir}/ca.crt" -cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" -cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" -# Make server certs accessible to apiserver. -chgrp "${cert_group}" "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" -chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" diff --git a/cluster/centos/master/scripts/apiserver.sh b/cluster/centos/master/scripts/apiserver.sh deleted file mode 100755 index be62549ce47..00000000000 --- a/cluster/centos/master/scripts/apiserver.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -MASTER_ADDRESS=${1:-"8.8.8.18"} -ETCD_SERVERS=${2:-"https://8.8.8.18:2379"} -SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/24"} -ADMISSION_CONTROL=${4:-""} - -cat </opt/kubernetes/cfg/kube-apiserver -# --logtostderr=true: log to standard error instead of files -KUBE_LOGTOSTDERR="--logtostderr=true" - -# --v=0: log level for V logs -KUBE_LOG_LEVEL="--v=4" - -# --etcd-servers=[]: List of etcd servers to watch (http://ip:port), -# comma separated. Mutually exclusive with -etcd-config -KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}" - -# --etcd-cafile="": SSL Certificate Authority file used to secure etcd communication. -KUBE_ETCD_CAFILE="--etcd-cafile=/srv/kubernetes/etcd/ca.pem" - -# --etcd-certfile="": SSL certification file used to secure etcd communication. -KUBE_ETCD_CERTFILE="--etcd-certfile=/srv/kubernetes/etcd/client.pem" - -# --etcd-keyfile="": key file used to secure etcd communication. -KUBE_ETCD_KEYFILE="--etcd-keyfile=/srv/kubernetes/etcd/client-key.pem" - -# --insecure-bind-address=127.0.0.1: The IP address on which to serve the --insecure-port. -KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" - -# --insecure-port=8080: The port on which to serve unsecured, unauthenticated access. -KUBE_API_PORT="--insecure-port=8080" - -# --kubelet-port=10250: Kubelet port -NODE_PORT="--kubelet-port=10250" - -# --advertise-address=: The IP address on which to advertise -# the apiserver to members of the cluster. -KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}" - -# --allow-privileged=false: If true, allow privileged containers. -KUBE_ALLOW_PRIV="--allow-privileged=false" - -# --service-cluster-ip-range=: A CIDR notation IP range from which to assign service cluster IPs. -# This must not overlap with any IP ranges assigned to nodes for pods. -KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" - -# --admission-control="AlwaysAdmit": Ordered list of plug-ins -# to do admission control of resources into cluster. -# Comma-delimited list of: -# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists, -# NamespaceLifecycle, NamespaceAutoProvision, AlwaysAdmit, -# ServiceAccount, DefaultStorageClass, DefaultTolerationSeconds, ResourceQuota -# Mark Deprecated. Use --enable-admission-plugins or --disable-admission-plugins instead since v1.10. -# It will be removed in a future version. -KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}" - -# --client-ca-file="": If set, any request presenting a client certificate signed -# by one of the authorities in the client-ca-file is authenticated with an identity -# corresponding to the CommonName of the client certificate. -KUBE_API_CLIENT_CA_FILE="--client-ca-file=/srv/kubernetes/ca.crt" - -# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, -# concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file -# and --tls-private-key-file are not provided, a self-signed certificate and key are -# generated for the public address and saved to /var/run/kubernetes. -KUBE_API_TLS_CERT_FILE="--tls-cert-file=/srv/kubernetes/server.cert" - -# --tls-private-key-file="": File containing x509 private key matching --tls-cert-file. -KUBE_API_TLS_PRIVATE_KEY_FILE="--tls-private-key-file=/srv/kubernetes/server.key" -EOF - -KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\ - \${KUBE_LOG_LEVEL} \\ - \${KUBE_ETCD_SERVERS} \\ - \${KUBE_ETCD_CAFILE} \\ - \${KUBE_ETCD_CERTFILE} \\ - \${KUBE_ETCD_KEYFILE} \\ - \${KUBE_API_ADDRESS} \\ - \${KUBE_API_PORT} \\ - \${NODE_PORT} \\ - \${KUBE_ADVERTISE_ADDR} \\ - \${KUBE_ALLOW_PRIV} \\ - \${KUBE_SERVICE_ADDRESSES} \\ - \${KUBE_ADMISSION_CONTROL} \\ - \${KUBE_API_CLIENT_CA_FILE} \\ - \${KUBE_API_TLS_CERT_FILE} \\ - \${KUBE_API_TLS_PRIVATE_KEY_FILE}" - - -cat </usr/lib/systemd/system/kube-apiserver.service -[Unit] -Description=Kubernetes API Server -Documentation=https://github.com/kubernetes/kubernetes - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver -ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS} -Restart=on-failure - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable kube-apiserver -systemctl restart kube-apiserver diff --git a/cluster/centos/master/scripts/controller-manager.sh b/cluster/centos/master/scripts/controller-manager.sh deleted file mode 100755 index 684825a3e46..00000000000 --- a/cluster/centos/master/scripts/controller-manager.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -MASTER_ADDRESS=${1:-"8.8.8.18"} - -cat </opt/kubernetes/cfg/kube-controller-manager -KUBE_LOGTOSTDERR="--logtostderr=true" -KUBE_LOG_LEVEL="--v=4" -KUBE_MASTER="--master=${MASTER_ADDRESS}:8080" - -# --root-ca-file="": If set, this root certificate authority will be included in -# service account's token secret. This must be a valid PEM-encoded CA bundle. -KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt" - -# --service-account-private-key-file="": Filename containing a PEM-encoded private -# RSA key used to sign service account tokens. -KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key" - -# --leader-elect: Start a leader election client and gain leadership before -# executing the main loop. Enable this when running replicated components for high availability. -KUBE_LEADER_ELECT="--leader-elect" -EOF - -KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\ - \${KUBE_LOG_LEVEL} \\ - \${KUBE_MASTER} \\ - \${KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE} \\ - \${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE}\\ - \${KUBE_LEADER_ELECT}" - -cat </usr/lib/systemd/system/kube-controller-manager.service -[Unit] -Description=Kubernetes Controller Manager -Documentation=https://github.com/kubernetes/kubernetes - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager -ExecStart=/opt/kubernetes/bin/kube-controller-manager ${KUBE_CONTROLLER_MANAGER_OPTS} -Restart=on-failure - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable kube-controller-manager -systemctl restart kube-controller-manager diff --git a/cluster/centos/master/scripts/etcd.sh b/cluster/centos/master/scripts/etcd.sh deleted file mode 100755 index aa73b57b490..00000000000 --- a/cluster/centos/master/scripts/etcd.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Create etcd.conf, etcd.service, and start etcd service. - - -etcd_data_dir=/var/lib/etcd -mkdir -p ${etcd_data_dir} - -ETCD_NAME=${1:-"default"} -ETCD_LISTEN_IP=${2:-"0.0.0.0"} -ETCD_INITIAL_CLUSTER=${3:-} - -cat </opt/kubernetes/cfg/etcd.conf -# [member] -ETCD_NAME="${ETCD_NAME}" -ETCD_DATA_DIR="${etcd_data_dir}/default.etcd" -#ETCD_SNAPSHOT_COUNTER="10000" -#ETCD_HEARTBEAT_INTERVAL="100" -#ETCD_ELECTION_TIMEOUT="1000" -ETCD_LISTEN_PEER_URLS="https://${ETCD_LISTEN_IP}:2380" -ETCD_LISTEN_CLIENT_URLS="https://${ETCD_LISTEN_IP}:2379,https://127.0.0.1:2379" -#ETCD_MAX_SNAPSHOTS="5" -#ETCD_MAX_WALS="5" -#ETCD_CORS="" -# -#[cluster] -ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_LISTEN_IP}:2380" -# if you use different ETCD_NAME (e.g. test), -# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." -ETCD_INITIAL_CLUSTER="${ETCD_INITIAL_CLUSTER}" -ETCD_INITIAL_CLUSTER_STATE="new" -ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster" -ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_LISTEN_IP}:2379" -#ETCD_DISCOVERY="" -#ETCD_DISCOVERY_SRV="" -#ETCD_DISCOVERY_FALLBACK="proxy" -#ETCD_DISCOVERY_PROXY="" -# -#[proxy] -#ETCD_PROXY="off" -# -#[security] -CLIENT_CERT_AUTH="true" -ETCD_CA_FILE="/srv/kubernetes/etcd/ca.pem" -ETCD_CERT_FILE="/srv/kubernetes/etcd/server-${ETCD_NAME}.pem" -ETCD_KEY_FILE="/srv/kubernetes/etcd/server-${ETCD_NAME}-key.pem" -PEER_CLIENT_CERT_AUTH="true" -ETCD_PEER_CA_FILE="/srv/kubernetes/etcd/ca.pem" -ETCD_PEER_CERT_FILE="/srv/kubernetes/etcd/peer-${ETCD_NAME}.pem" -ETCD_PEER_KEY_FILE="/srv/kubernetes/etcd/peer-${ETCD_NAME}-key.pem" -EOF - -cat <//usr/lib/systemd/system/etcd.service -[Unit] -Description=Etcd Server -After=network.target - -[Service] -Type=simple -WorkingDirectory=${etcd_data_dir} -EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf -# set GOMAXPROCS to number of processors -ExecStart=/bin/bash -c "GOMAXPROCS=\$(nproc) /opt/kubernetes/bin/etcd" -Type=notify - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable etcd -systemctl restart etcd diff --git a/cluster/centos/master/scripts/flannel.sh b/cluster/centos/master/scripts/flannel.sh deleted file mode 100644 index 83a7de5b58a..00000000000 --- a/cluster/centos/master/scripts/flannel.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -ETCD_SERVERS=${1:-"http://8.8.8.18:4001"} -FLANNEL_NET=${2:-"172.16.0.0/16"} - -CA_FILE="/srv/kubernetes/etcd/ca.pem" -CERT_FILE="/srv/kubernetes/etcd/client.pem" -KEY_FILE="/srv/kubernetes/etcd/client-key.pem" - -cat </opt/kubernetes/cfg/flannel -FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}" -FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network" -FLANNEL_ETCD_CAFILE="--etcd-cafile=${CA_FILE}" -FLANNEL_ETCD_CERTFILE="--etcd-certfile=${CERT_FILE}" -FLANNEL_ETCD_KEYFILE="--etcd-keyfile=${KEY_FILE}" -EOF - -cat </usr/lib/systemd/system/flannel.service -[Unit] -Description=Flanneld overlay address etcd agent -After=network.target - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/flannel -ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} \${FLANNEL_ETCD_CAFILE} \${FLANNEL_ETCD_CERTFILE} \${FLANNEL_ETCD_KEYFILE} - -Type=notify - -[Install] -WantedBy=multi-user.target -EOF - -# Store FLANNEL_NET to etcd. -attempt=0 -while true; do - if /opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \ - --no-sync -C "${ETCD_SERVERS}" \ - get /coreos.com/network/config >/dev/null 2>&1; then - break - else - if (( attempt > 600 )); then - echo "timeout for waiting network config" > ~/kube/err.log - exit 2 - fi - - /opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \ - --no-sync -C "${ETCD_SERVERS}" \ - mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1 - attempt=$((attempt+1)) - sleep 3 - fi -done -wait - -systemctl enable flannel -systemctl daemon-reload -systemctl restart flannel diff --git a/cluster/centos/master/scripts/post-etcd.sh b/cluster/centos/master/scripts/post-etcd.sh deleted file mode 100644 index 6bdfd53094e..00000000000 --- a/cluster/centos/master/scripts/post-etcd.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Set initial-cluster-state to existing, and restart etcd service. - -sed -i 's/ETCD_INITIAL_CLUSTER_STATE="new"/ETCD_INITIAL_CLUSTER_STATE="existing"/' /opt/kubernetes/cfg/etcd.conf - -systemctl daemon-reload -systemctl enable etcd -systemctl restart etcd diff --git a/cluster/centos/master/scripts/scheduler.sh b/cluster/centos/master/scripts/scheduler.sh deleted file mode 100755 index 22d66650190..00000000000 --- a/cluster/centos/master/scripts/scheduler.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -MASTER_ADDRESS=${1:-"8.8.8.18"} - -cat </opt/kubernetes/cfg/kube-scheduler -### -# kubernetes scheduler config - -# --logtostderr=true: log to standard error instead of files -KUBE_LOGTOSTDERR="--logtostderr=true" - -# --v=0: log level for V logs -KUBE_LOG_LEVEL="--v=4" - -# --master: The address of the Kubernetes API server (overrides any value in kubeconfig). -KUBE_MASTER="--master=${MASTER_ADDRESS}:8080" - -# --leader-elect: Start a leader election client and gain leadership before -# executing the main loop. Enable this when running replicated components for high availability. -KUBE_LEADER_ELECT="--leader-elect" - -# Add your own! -KUBE_SCHEDULER_ARGS="" - -EOF - -KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\ - \${KUBE_LOG_LEVEL} \\ - \${KUBE_MASTER} \\ - \${KUBE_LEADER_ELECT} \\ - \$KUBE_SCHEDULER_ARGS" - -cat </usr/lib/systemd/system/kube-scheduler.service -[Unit] -Description=Kubernetes Scheduler -Documentation=https://github.com/kubernetes/kubernetes - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler -ExecStart=/opt/kubernetes/bin/kube-scheduler ${KUBE_SCHEDULER_OPTS} -Restart=on-failure - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable kube-scheduler -systemctl restart kube-scheduler diff --git a/cluster/centos/node/bin/mk-docker-opts.sh b/cluster/centos/node/bin/mk-docker-opts.sh deleted file mode 100755 index 22a459f5134..00000000000 --- a/cluster/centos/node/bin/mk-docker-opts.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generate Docker daemon options based on flannel env file. - -# exit on any error -set -e - -usage() { - echo "$0 [-f FLANNEL-ENV-FILE] [-d DOCKER-ENV-FILE] [-i] [-c] [-m] [-k COMBINED-KEY] - -Generate Docker daemon options based on flannel env file -OPTIONS: - -f Path to flannel env file. Defaults to /run/flannel/subnet.env - -d Path to Docker env file to write to. Defaults to /run/docker_opts.env - -i Output each Docker option as individual var. e.g. DOCKER_OPT_MTU=1500 - -c Output combined Docker options into DOCKER_OPTS var - -k Set the combined options key to this value (default DOCKER_OPTS=) - -m Do not output --ip-masq (useful for older Docker version) -" >/dev/stderr - exit 1 -} - -flannel_env="/run/flannel/subnet.env" -docker_env="/run/docker_opts.env" -combined_opts_key="DOCKER_OPTS" -indiv_opts=false -combined_opts=false -ipmasq=true -val="" - -while getopts "f:d:icmk:" opt; do - case $opt in - f) - flannel_env=$OPTARG - ;; - d) - docker_env=$OPTARG - ;; - i) - indiv_opts=true - ;; - c) - combined_opts=true - ;; - m) - ipmasq=false - ;; - k) - combined_opts_key=$OPTARG - ;; - \?) - usage - ;; - esac -done - -if [[ $indiv_opts = false ]] && [[ $combined_opts = false ]]; then - indiv_opts=true - combined_opts=true -fi - -if [[ -f "${flannel_env}" ]]; then - source "${flannel_env}" -fi - -if [[ -n "$FLANNEL_SUBNET" ]]; then - # shellcheck disable=SC2034 # Variable name referenced in OPT_LOOP below - DOCKER_OPT_BIP="--bip=$FLANNEL_SUBNET" -fi - -if [[ -n "$FLANNEL_MTU" ]]; then - # shellcheck disable=SC2034 # Variable name referenced in OPT_LOOP below - DOCKER_OPT_MTU="--mtu=$FLANNEL_MTU" -fi - -if [[ "$FLANNEL_IPMASQ" = true ]] && [[ $ipmasq = true ]]; then - # shellcheck disable=SC2034 # Variable name referenced in OPT_LOOP below - DOCKER_OPT_IPMASQ="--ip-masq=false" -fi - -eval docker_opts="\$${combined_opts_key}" -docker_opts+=" " - -echo -n "" >"${docker_env}" - -# OPT_LOOP -for opt in $(compgen -v DOCKER_OPT_); do - eval val=\$"${opt}" - - if [[ "$indiv_opts" = true ]]; then - echo "$opt=\"$val\"" >>"${docker_env}" - fi - - docker_opts+="$val " -done - -if [[ "$combined_opts" = true ]]; then - echo "${combined_opts_key}=\"${docker_opts}\"" >>"${docker_env}" -fi diff --git a/cluster/centos/node/bin/remove-docker0.sh b/cluster/centos/node/bin/remove-docker0.sh deleted file mode 100755 index 8c862fbce31..00000000000 --- a/cluster/centos/node/bin/remove-docker0.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Delete default docker bridge, so that docker can start with flannel network. - -# exit on any error -set -e - -rc=0 -ip link show docker0 >/dev/null 2>&1 || rc="$?" -if [[ "$rc" -eq "0" ]]; then - ip link set dev docker0 down - ip link delete docker0 -fi diff --git a/cluster/centos/node/scripts/docker.sh b/cluster/centos/node/scripts/docker.sh deleted file mode 100755 index 8b878b24c9b..00000000000 --- a/cluster/centos/node/scripts/docker.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -DOCKER_OPTS=${1:-""} - -DOCKER_CONFIG=/opt/kubernetes/cfg/docker - -cat <$DOCKER_CONFIG -DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s overlay --selinux-enabled=false ${DOCKER_OPTS}" -EOF - -cat </usr/lib/systemd/system/docker.service -[Unit] -Description=Docker Application Container Engine -Documentation=http://docs.docker.com -After=network.target flannel.service -Requires=flannel.service - -[Service] -Type=notify -EnvironmentFile=-/run/flannel/docker -EnvironmentFile=-/opt/kubernetes/cfg/docker -WorkingDirectory=/opt/kubernetes/bin -ExecStart=/opt/kubernetes/bin/dockerd \$DOCKER_OPT_BIP \$DOCKER_OPT_MTU \$DOCKER_OPTS -LimitNOFILE=1048576 -LimitNPROC=1048576 - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable docker -systemctl restart docker diff --git a/cluster/centos/node/scripts/flannel.sh b/cluster/centos/node/scripts/flannel.sh deleted file mode 100755 index 41ec84c5375..00000000000 --- a/cluster/centos/node/scripts/flannel.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -ETCD_SERVERS=${1:-"https://8.8.8.18:2379"} -FLANNEL_NET=${2:-"172.16.0.0/16"} - -CA_FILE="/srv/kubernetes/etcd/ca.pem" -CERT_FILE="/srv/kubernetes/etcd/client.pem" -KEY_FILE="/srv/kubernetes/etcd/client-key.pem" - -cat </opt/kubernetes/cfg/flannel -FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}" -FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network" -FLANNEL_ETCD_CAFILE="--etcd-cafile=${CA_FILE}" -FLANNEL_ETCD_CERTFILE="--etcd-certfile=${CERT_FILE}" -FLANNEL_ETCD_KEYFILE="--etcd-keyfile=${KEY_FILE}" -EOF - -cat </usr/lib/systemd/system/flannel.service -[Unit] -Description=Flanneld overlay address etcd agent -After=network.target -Before=docker.service - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/flannel -ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh -ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} \${FLANNEL_ETCD_CAFILE} \${FLANNEL_ETCD_CERTFILE} \${FLANNEL_ETCD_KEYFILE} -ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker - -Type=notify - -[Install] -WantedBy=multi-user.target -RequiredBy=docker.service -EOF - -# Store FLANNEL_NET to etcd. -attempt=0 -while true; do - if /opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \ - --no-sync -C "${ETCD_SERVERS}" \ - get /coreos.com/network/config >/dev/null 2>&1; then - break - else - if (( attempt > 600 )); then - echo "timeout for waiting network config" > ~/kube/err.log - exit 2 - fi - - /opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \ - --no-sync -C "${ETCD_SERVERS}" \ - mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1 - attempt=$((attempt+1)) - sleep 3 - fi -done -wait - -systemctl daemon-reload diff --git a/cluster/centos/node/scripts/kubelet.sh b/cluster/centos/node/scripts/kubelet.sh deleted file mode 100755 index 88d6b149efe..00000000000 --- a/cluster/centos/node/scripts/kubelet.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -MASTER_ADDRESS=${1:-"8.8.8.18"} -NODE_ADDRESS=${2:-"8.8.8.20"} -DNS_SERVER_IP=${3:-"192.168.3.100"} -DNS_DOMAIN=${4:-"cluster.local"} -KUBECONFIG_DIR=${KUBECONFIG_DIR:-/opt/kubernetes/cfg} - -# Generate a kubeconfig file -cat < "${KUBECONFIG_DIR}/kubelet.kubeconfig" -apiVersion: v1 -kind: Config -clusters: - - cluster: - server: http://${MASTER_ADDRESS}:8080/ - name: local -contexts: - - context: - cluster: local - name: local -current-context: local -EOF - -cat </opt/kubernetes/cfg/kubelet -# --logtostderr=true: log to standard error instead of files -KUBE_LOGTOSTDERR="--logtostderr=true" - -# --v=0: log level for V logs -KUBE_LOG_LEVEL="--v=4" - -# --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces) -NODE_ADDRESS="--address=${NODE_ADDRESS}" - -# --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. -NODE_PORT="--port=10250" - -# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. -NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" - -# Path to a kubeconfig file, specifying how to connect to the API server. -KUBELET_KUBECONFIG="--kubeconfig=${KUBECONFIG_DIR}/kubelet.kubeconfig" - -# --allow-privileged=false: If true, allow containers to request privileged mode. [default=false] -KUBE_ALLOW_PRIV="--allow-privileged=false" - -# DNS info -KUBELET__DNS_IP="--cluster-dns=${DNS_SERVER_IP}" -KUBELET_DNS_DOMAIN="--cluster-domain=${DNS_DOMAIN}" - -# Add your own! -KUBELET_ARGS="" -EOF - -KUBELET_OPTS=" \${KUBE_LOGTOSTDERR} \\ - \${KUBE_LOG_LEVEL} \\ - \${NODE_ADDRESS} \\ - \${NODE_PORT} \\ - \${NODE_HOSTNAME} \\ - \${KUBELET_KUBECONFIG} \\ - \${KUBE_ALLOW_PRIV} \\ - \${KUBELET__DNS_IP} \\ - \${KUBELET_DNS_DOMAIN} \\ - \$KUBELET_ARGS" - -cat </usr/lib/systemd/system/kubelet.service -[Unit] -Description=Kubernetes Kubelet -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/kubelet -ExecStart=/opt/kubernetes/bin/kubelet ${KUBELET_OPTS} -Restart=on-failure -KillMode=process -RestartSec=15s - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable kubelet -systemctl restart kubelet diff --git a/cluster/centos/node/scripts/proxy.sh b/cluster/centos/node/scripts/proxy.sh deleted file mode 100755 index 455084866ac..00000000000 --- a/cluster/centos/node/scripts/proxy.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -MASTER_ADDRESS=${1:-"8.8.8.18"} -NODE_ADDRESS=${2:-"8.8.8.20"} - -cat </opt/kubernetes/cfg/kube-proxy -# --logtostderr=true: log to standard error instead of files -KUBE_LOGTOSTDERR="--logtostderr=true" - -# --v=0: log level for V logs -KUBE_LOG_LEVEL="--v=4" - -# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. -NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" - -# --master="": The address of the Kubernetes API server (overrides any value in kubeconfig) -KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080" -EOF - -KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ - \${KUBE_LOG_LEVEL} \\ - \${NODE_HOSTNAME} \\ - \${KUBE_MASTER}" - -cat </usr/lib/systemd/system/kube-proxy.service -[Unit] -Description=Kubernetes Proxy -After=network.target - -[Service] -EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy -ExecStart=/opt/kubernetes/bin/kube-proxy ${KUBE_PROXY_OPTS} -Restart=on-failure - -[Install] -WantedBy=multi-user.target -EOF - -systemctl daemon-reload -systemctl enable kube-proxy -systemctl restart kube-proxy diff --git a/cluster/centos/util.sh b/cluster/centos/util.sh deleted file mode 100755 index d8cae8cf17a..00000000000 --- a/cluster/centos/util.sh +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -# exit on any error -set -e - -SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C" - -# Use the config file specified in $KUBE_CONFIG_FILE, or default to -# config-default.sh. -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -readonly ROOT=$(dirname "${BASH_SOURCE[0]}") -source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}" -source "$KUBE_ROOT/cluster/common.sh" - -# shellcheck disable=SC2034 # Can't tell if this is still needed or not -KUBECTL_PATH=${KUBE_ROOT}/cluster/centos/binaries/kubectl - -# Directory to be used for master and node provisioning. -KUBE_TEMP="${HOME}/kube_temp" - - -# Get master IP addresses and store in KUBE_MASTER_IP_ADDRESSES[] -# Must ensure that the following ENV vars are set: -# MASTERS -function detect-masters() { - KUBE_MASTER_IP_ADDRESSES=() - for master in ${MASTERS}; do - KUBE_MASTER_IP_ADDRESSES+=("${master#*@}") - done - echo "KUBE_MASTERS: ${MASTERS}" 1>&2 - echo "KUBE_MASTER_IP_ADDRESSES: [${KUBE_MASTER_IP_ADDRESSES[*]}]" 1>&2 -} - -# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[] -function detect-nodes() { - KUBE_NODE_IP_ADDRESSES=() - for node in ${NODES}; do - KUBE_NODE_IP_ADDRESSES+=("${node#*@}") - done - echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2 -} - -# Verify prereqs on host machine -function verify-prereqs() { - local rc - rc=0 - ssh-add -L 1> /dev/null 2> /dev/null || rc="$?" - # "Could not open a connection to your authentication agent." - if [[ "${rc}" -eq 2 ]]; then - eval "$(ssh-agent)" > /dev/null - trap-add "kill ${SSH_AGENT_PID}" EXIT - fi - rc=0 - ssh-add -L 1> /dev/null 2> /dev/null || rc="$?" - # "The agent has no identities." - if [[ "${rc}" -eq 1 ]]; then - # Try adding one of the default identities, with or without passphrase. - ssh-add || true - fi - rc=0 - # Expect at least one identity to be available. - if ! ssh-add -L 1> /dev/null 2> /dev/null; then - echo "Could not find or add an SSH identity." - echo "Please start ssh-agent, add your identity, and retry." - exit 1 - fi -} - -# Install handler for signal trap -function trap-add { - local handler="$1" - local signal="${2-EXIT}" - local cur - - cur="$(eval "sh -c 'echo \$3' -- $(trap -p "${signal}")")" - if [[ -n "${cur}" ]]; then - handler="${cur}; ${handler}" - fi - - # shellcheck disable=SC2064 # Early expansion is intentional here. - trap "${handler}" "${signal}" -} - -# Validate a kubernetes cluster -function validate-cluster() { - # by default call the generic validate-cluster.sh script, customizable by - # any cluster provider if this does not fit. - set +e - if ! "${KUBE_ROOT}/cluster/validate-cluster.sh"; then - for master in ${MASTERS}; do - troubleshoot-master "${master}" - done - for node in ${NODES}; do - troubleshoot-node "${node}" - done - exit 1 - fi - set -e -} - -# Instantiate a kubernetes cluster -function kube-up() { - make-ca-cert - - local num_infra=0 - for master in ${MASTERS}; do - provision-master "${master}" "infra${num_infra}" - ((++num_infra)) - done - - for master in ${MASTERS}; do - post-provision-master "${master}" - done - - for node in ${NODES}; do - provision-node "${node}" - done - - detect-masters - - # set CONTEXT and KUBE_SERVER values for create-kubeconfig() and get-password() - export CONTEXT="centos" - export KUBE_SERVER="http://${MASTER_ADVERTISE_ADDRESS}:8080" - source "${KUBE_ROOT}/cluster/common.sh" - - # set kubernetes user and password - get-password - create-kubeconfig -} - -# Delete a kubernetes cluster -function kube-down() { - for master in ${MASTERS}; do - tear-down-master "${master}" - done - - for node in ${NODES}; do - tear-down-node "${node}" - done -} - -function troubleshoot-master() { - # Troubleshooting on master if all required daemons are active. - echo "[INFO] Troubleshooting on master $1" - local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler") - local daemon - local daemon_status - printf "%-24s %-10s \n" "PROCESS" "STATUS" - for daemon in "${required_daemon[@]}"; do - local rc=0 - kube-ssh "${1}" "sudo systemctl is-active ${daemon}" >/dev/null 2>&1 || rc="$?" - if [[ "${rc}" -ne "0" ]]; then - daemon_status="inactive" - else - daemon_status="active" - fi - printf "%-24s %s\n" "${daemon}" ${daemon_status} - done - printf "\n" -} - -function troubleshoot-node() { - # Troubleshooting on node if all required daemons are active. - echo "[INFO] Troubleshooting on node ${1}" - local -a required_daemon=("kube-proxy" "kubelet" "docker" "flannel") - local daemon - local daemon_status - printf "%-24s %-10s \n" "PROCESS" "STATUS" - for daemon in "${required_daemon[@]}"; do - local rc=0 - kube-ssh "${1}" "sudo systemctl is-active ${daemon}" >/dev/null 2>&1 || rc="$?" - if [[ "${rc}" -ne "0" ]]; then - daemon_status="inactive" - else - daemon_status="active" - fi - printf "%-24s %s\n" "${daemon}" ${daemon_status} - done - printf "\n" -} - -# Clean up on master -function tear-down-master() { -echo "[INFO] tear-down-master on $1" - for service_name in etcd kube-apiserver kube-controller-manager kube-scheduler ; do - service_file="/usr/lib/systemd/system/${service_name}.service" - kube-ssh "$1" " \ - if [[ -f $service_file ]]; then \ - sudo systemctl stop $service_name; \ - sudo systemctl disable $service_name; \ - sudo rm -f $service_file; \ - fi" - done - kube-ssh "${1}" "sudo rm -rf /opt/kubernetes" - kube-ssh "${1}" "sudo rm -rf /srv/kubernetes" - kube-ssh "${1}" "sudo rm -rf ${KUBE_TEMP}" - kube-ssh "${1}" "sudo rm -rf /var/lib/etcd" -} - -# Clean up on node -function tear-down-node() { -echo "[INFO] tear-down-node on $1" - for service_name in kube-proxy kubelet docker flannel ; do - service_file="/usr/lib/systemd/system/${service_name}.service" - kube-ssh "$1" " \ - if [[ -f $service_file ]]; then \ - sudo systemctl stop $service_name; \ - sudo systemctl disable $service_name; \ - sudo rm -f $service_file; \ - fi" - done - kube-ssh "$1" "sudo rm -rf /run/flannel" - kube-ssh "$1" "sudo rm -rf /opt/kubernetes" - kube-ssh "$1" "sudo rm -rf /srv/kubernetes" - kube-ssh "$1" "sudo rm -rf ${KUBE_TEMP}" -} - -# Generate the CA certificates for k8s components -function make-ca-cert() { - echo "[INFO] make-ca-cert" - bash "${ROOT}/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local" -} - -# Provision master -# -# Assumed vars: -# $1 (master) -# $2 (etcd_name) -# KUBE_TEMP -# ETCD_SERVERS -# ETCD_INITIAL_CLUSTER -# SERVICE_CLUSTER_IP_RANGE -# MASTER_ADVERTISE_ADDRESS -function provision-master() { - echo "[INFO] Provision master on $1" - local master="$1" - local master_ip="${master#*@}" - local etcd_name="$2" - ensure-setup-dir "${master}" - ensure-etcd-cert "${etcd_name}" "${master_ip}" - - kube-scp "${master}" "${ROOT}/ca-cert ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}" - kube-scp "${master}" "${ROOT}/etcd-cert/ca.pem \ - ${ROOT}/etcd-cert/client.pem \ - ${ROOT}/etcd-cert/client-key.pem \ - ${ROOT}/etcd-cert/server-${etcd_name}.pem \ - ${ROOT}/etcd-cert/server-${etcd_name}-key.pem \ - ${ROOT}/etcd-cert/peer-${etcd_name}.pem \ - ${ROOT}/etcd-cert/peer-${etcd_name}-key.pem" "${KUBE_TEMP}/etcd-cert" - kube-ssh "${master}" " \ - sudo rm -rf /opt/kubernetes/bin; \ - sudo cp -r ${KUBE_TEMP}/master/bin /opt/kubernetes; \ - sudo mkdir -p /srv/kubernetes/; sudo cp -f ${KUBE_TEMP}/ca-cert/* /srv/kubernetes/; \ - sudo mkdir -p /srv/kubernetes/etcd; sudo cp -f ${KUBE_TEMP}/etcd-cert/* /srv/kubernetes/etcd/; \ - sudo chmod -R +x /opt/kubernetes/bin; \ - sudo ln -sf /opt/kubernetes/bin/* /usr/local/bin/; \ - sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh ${etcd_name} ${master_ip} ${ETCD_INITIAL_CLUSTER}; \ - sudo bash ${KUBE_TEMP}/master/scripts/apiserver.sh ${master_ip} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${ADMISSION_CONTROL}; \ - sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${MASTER_ADVERTISE_ADDRESS}; \ - sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${MASTER_ADVERTISE_ADDRESS}" -} - -# Post-provision master, run after all masters were provisioned -# -# Assumed vars: -# $1 (master) -# KUBE_TEMP -# ETCD_SERVERS -# FLANNEL_NET -function post-provision-master() { - echo "[INFO] Post provision master on $1" - local master=$1 - kube-ssh "${master}" " \ - sudo bash ${KUBE_TEMP}/master/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \ - sudo bash ${KUBE_TEMP}/master/scripts/post-etcd.sh" -} - -# Provision node -# -# Assumed vars: -# $1 (node) -# KUBE_TEMP -# ETCD_SERVERS -# FLANNEL_NET -# MASTER_ADVERTISE_ADDRESS -# DOCKER_OPTS -# DNS_SERVER_IP -# DNS_DOMAIN -function provision-node() { - echo "[INFO] Provision node on $1" - local node=$1 - local node_ip=${node#*@} - local dns_ip=${DNS_SERVER_IP#*@} - # shellcheck disable=SC2153 # DNS_DOMAIN sourced from external file - local dns_domain=${DNS_DOMAIN#*@} - ensure-setup-dir "${node}" - - kube-scp "${node}" "${ROOT}/binaries/node ${ROOT}/node ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}" - kube-scp "${node}" "${ROOT}/etcd-cert/ca.pem \ - ${ROOT}/etcd-cert/client.pem \ - ${ROOT}/etcd-cert/client-key.pem" "${KUBE_TEMP}/etcd-cert" - kube-ssh "${node}" " \ - rm -rf /opt/kubernetes/bin; \ - sudo cp -r ${KUBE_TEMP}/node/bin /opt/kubernetes; \ - sudo chmod -R +x /opt/kubernetes/bin; \ - sudo mkdir -p /srv/kubernetes/etcd; sudo cp -f ${KUBE_TEMP}/etcd-cert/* /srv/kubernetes/etcd/; \ - sudo ln -s /opt/kubernetes/bin/* /usr/local/bin/; \ - sudo mkdir -p /srv/kubernetes/etcd; sudo cp -f ${KUBE_TEMP}/etcd-cert/* /srv/kubernetes/etcd/; \ - sudo bash ${KUBE_TEMP}/node/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \ - sudo bash ${KUBE_TEMP}/node/scripts/docker.sh \"${DOCKER_OPTS}\"; \ - sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${MASTER_ADVERTISE_ADDRESS} ${node_ip} ${dns_ip} ${dns_domain}; \ - sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${MASTER_ADVERTISE_ADDRESS}" -} - -# Create dirs that'll be used during setup on target machine. -# -# Assumed vars: -# KUBE_TEMP -function ensure-setup-dir() { - kube-ssh "${1}" "mkdir -p ${KUBE_TEMP}; \ - mkdir -p ${KUBE_TEMP}/etcd-cert; \ - sudo mkdir -p /opt/kubernetes/bin; \ - sudo mkdir -p /opt/kubernetes/cfg" -} - -# Generate certificates for etcd cluster -# -# Assumed vars: -# $1 (etcd member name) -# $2 (master ip) -function ensure-etcd-cert() { - local etcd_name="$1" - local master_ip="$2" - local cert_dir="${ROOT}/etcd-cert" - - if [[ ! -r "${cert_dir}/client.pem" || ! -r "${cert_dir}/client-key.pem" ]]; then - generate-etcd-cert "${cert_dir}" "${master_ip}" "client" "client" - fi - - generate-etcd-cert "${cert_dir}" "${master_ip}" "server" "server-${etcd_name}" - generate-etcd-cert "${cert_dir}" "${master_ip}" "peer" "peer-${etcd_name}" -} - -# Run command over ssh -function kube-ssh() { - local host="$1" - shift - ssh "${SSH_OPTS}" -t "${host}" "$@" >/dev/null 2>&1 -} - -# Copy file recursively over ssh -function kube-scp() { - local host="$1" - local src=("$2") - local dst="$3" - scp -r "${SSH_OPTS}" "${src[*]}" "${host}:${dst}" -} - -# Ensure that we have a password created for validating to the master. Will -# read from kubeconfig if available. -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password { - load-or-gen-kube-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER="admin" - KUBE_PASSWORD=$(python -c 'import string,random; '\ - 'print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))') - fi -} diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index 857fe8e4479..3daf2063f0f 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -32,27 +32,6 @@ fi source "${KUBE_ROOT}/cluster/kube-util.sh" -DEPRECATED_PROVIDERS=( - "centos" - "local" -) - -for provider in "${DEPRECATED_PROVIDERS[@]}"; do - if [[ "${KUBERNETES_PROVIDER}" == "${provider}" ]]; then - cat <&2 - -!!! DEPRECATION NOTICE !!! - -The '${provider}' kube-up provider is deprecated and will be removed in a future -release of kubernetes. Deprecated providers will be removed within 2 releases. - -See https://github.com/kubernetes/kubernetes/issues/49213 for more info. - -EOF - break - fi -done - if [ -z "${ZONE-}" ]; then echo "... Starting cluster using provider: ${KUBERNETES_PROVIDER}" >&2 else diff --git a/cluster/local/util.sh b/cluster/local/util.sh deleted file mode 100755 index da262fdf249..00000000000 --- a/cluster/local/util.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Perform preparations required to run e2e tests -function prepare-e2e() { - echo "Local doesn't need special preparations for e2e tests" 1>&2 -} - -# Detect the IP for the master -# -# Vars set: -# KUBE_MASTER -# KUBE_MASTER_IP -# Vars exported: -# KUBE_MASTER_URL -function detect-master { - KUBE_MASTER=localhost - KUBE_MASTER_IP=127.0.0.1 - export KUBE_MASTER_URL="http://${KUBE_MASTER_IP}:8080" - echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" -} From 81cc384a9afbb4c226b18e51f4d73ccdfc0a0bb6 Mon Sep 17 00:00:00 2001 From: Peter Hornyack Date: Tue, 16 Apr 2019 18:13:54 -0700 Subject: [PATCH 054/209] Pin GCE Windows node image to 1809 v20190312. This is to work around https://github.com/kubernetes/kubernetes/issues/76666. --- cluster/gce/util.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 2175a89b627..2eea8085dc2 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -2031,7 +2031,9 @@ function create-node-template() { if [[ "${os}" == 'linux' ]]; then node_image_flags="--image-project ${NODE_IMAGE_PROJECT} --image ${NODE_IMAGE}" elif [[ "${os}" == 'windows' ]]; then - node_image_flags="--image-project ${WINDOWS_NODE_IMAGE_PROJECT} --image-family ${WINDOWS_NODE_IMAGE_FAMILY}" + # TODO(pjh): revert back to using WINDOWS_NODE_IMAGE_FAMILY instead of + # pinning to the v20190312 image once #76666 is resolved. + node_image_flags="--image-project ${WINDOWS_NODE_IMAGE_PROJECT} --image=windows-server-1809-dc-core-for-containers-v20190312" else echo "Unknown OS ${os}" >&2 exit 1 From b3237ed0b70b31a42b07d89f21344c82e3e91611 Mon Sep 17 00:00:00 2001 From: Sean Sullivan Date: Tue, 16 Apr 2019 16:13:05 -0700 Subject: [PATCH 055/209] Removed unused code in humanreadable.go --- pkg/printers/humanreadable.go | 163 ------------------ pkg/printers/internalversion/printers.go | 12 +- pkg/printers/internalversion/printers_test.go | 2 +- 3 files changed, 12 insertions(+), 165 deletions(-) diff --git a/pkg/printers/humanreadable.go b/pkg/printers/humanreadable.go index 65759e6ddb5..6032b756740 100644 --- a/pkg/printers/humanreadable.go +++ b/pkg/printers/humanreadable.go @@ -17,7 +17,6 @@ limitations under the License. package printers import ( - "bytes" "fmt" "io" "reflect" @@ -30,7 +29,6 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -88,15 +86,6 @@ func (a *HumanReadablePrinter) With(fns ...func(PrintHandler)) *HumanReadablePri return a } -// EnsurePrintHeaders sets the HumanReadablePrinter option "NoHeaders" to false -// and removes the .lastType that was printed, which forces headers to be -// printed in cases where multiple lists of the same resource are printed -// consecutively, but are separated by non-printer related information. -func (h *HumanReadablePrinter) EnsurePrintHeaders() { - h.options.NoHeaders = false - h.lastType = nil -} - // TableHandler adds a print handler with a given set of columns to HumanReadablePrinter instance. // See ValidateRowPrintHandlerFunc for required method signature. func (h *HumanReadablePrinter) TableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { @@ -163,51 +152,6 @@ func ValidateRowPrintHandlerFunc(printFunc reflect.Value) error { return nil } -// ValidatePrintHandlerFunc validates print handler signature. -// printFunc is the function that will be called to print an object. -// It must be of the following type: -// func printFunc(object ObjectType, w io.Writer, options PrintOptions) error -// where ObjectType is the type of the object that will be printed. -// DEPRECATED: will be replaced with ValidateRowPrintHandlerFunc -func ValidatePrintHandlerFunc(printFunc reflect.Value) error { - if printFunc.Kind() != reflect.Func { - return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) - } - funcType := printFunc.Type() - if funcType.NumIn() != 3 || funcType.NumOut() != 1 { - return fmt.Errorf("invalid print handler." + - "Must accept 3 parameters and return 1 value.") - } - if funcType.In(1) != reflect.TypeOf((*io.Writer)(nil)).Elem() || - funcType.In(2) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || - funcType.Out(0) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("invalid print handler. The expected signature is: "+ - "func handler(obj %v, w io.Writer, options PrintOptions) error", funcType.In(0)) - } - return nil -} - -func (h *HumanReadablePrinter) HandledResources() []string { - keys := make([]string, 0) - - for k := range h.handlerMap { - // k.String looks like "*api.PodList" and we want just "pod" - api := strings.Split(k.String(), ".") - resource := api[len(api)-1] - if strings.HasSuffix(resource, "List") { - continue - } - resource = strings.ToLower(resource) - keys = append(keys, resource) - } - return keys -} - -func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { - _, err := fmt.Fprintf(w, "Unknown object: %s", string(data)) - return err -} - func printHeader(columnNames []string, w io.Writer) error { if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { return err @@ -283,15 +227,6 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er return fmt.Errorf("error: unknown type %#v", obj) } -func hasCondition(conditions []metav1beta1.TableRowCondition, t metav1beta1.RowConditionType) bool { - for _, condition := range conditions { - if condition.Type == t { - return condition.Status == metav1beta1.ConditionTrue - } - } - return false -} - // PrintTable prints a table to the provided output respecting the filtering rules for options // for wide columns and filtered rows. It filters out rows that are Completed. You should call // DecorateTable if you receive a table from a remote server before calling PrintTable. @@ -565,65 +500,6 @@ func printRows(output io.Writer, rows []metav1beta1.TableRow, options PrintOptio } } -// TODO: this method assumes the meta/v1 server API, so should be refactored out of this package -func printUnstructured(unstructured runtime.Unstructured, w io.Writer, additionalFields []string, options PrintOptions) error { - metadata, err := meta.Accessor(unstructured) - if err != nil { - return err - } - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", metadata.GetNamespace()); err != nil { - return err - } - } - - content := unstructured.UnstructuredContent() - kind := "" - if objKind, ok := content["kind"]; ok { - if str, ok := objKind.(string); ok { - kind = str - } - } - if objAPIVersion, ok := content["apiVersion"]; ok { - if str, ok := objAPIVersion.(string); ok { - version, err := schema.ParseGroupVersion(str) - if err != nil { - return err - } - kind = kind + "." + version.Version + "." + version.Group - } - } - - name := FormatResourceName(options.Kind, metadata.GetName(), options.WithKind) - - if _, err := fmt.Fprintf(w, "%s\t%s", name, kind); err != nil { - return err - } - for _, field := range additionalFields { - if value, ok := content[field]; ok { - var formattedValue string - switch typedValue := value.(type) { - case []interface{}: - formattedValue = fmt.Sprintf("%d item(s)", len(typedValue)) - default: - formattedValue = fmt.Sprintf("%v", value) - } - if _, err := fmt.Fprintf(w, "\t%s", formattedValue); err != nil { - return err - } - } - } - if _, err := fmt.Fprint(w, AppendLabels(metadata.GetLabels(), options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, metadata.GetLabels())); err != nil { - return err - } - - return nil -} - func formatLabelHeaders(columnLabels []string) []string { formHead := make([]string, len(columnLabels)) for i, l := range columnLabels { @@ -664,42 +540,3 @@ func appendLabelCells(values []interface{}, itemLabels map[string]string, opts P } return values } - -// FormatResourceName receives a resource kind, name, and boolean specifying -// whether or not to update the current name to "kind/name" -func FormatResourceName(kind schema.GroupKind, name string, withKind bool) string { - if !withKind || kind.Empty() { - return name - } - - return strings.ToLower(kind.String()) + "/" + name -} - -func AppendLabels(itemLabels map[string]string, columnLabels []string) string { - var buffer bytes.Buffer - - for _, cl := range columnLabels { - buffer.WriteString(fmt.Sprint("\t")) - if il, ok := itemLabels[cl]; ok { - buffer.WriteString(fmt.Sprint(il)) - } else { - buffer.WriteString("") - } - } - - return buffer.String() -} - -// Append all labels to a single column. We need this even when show-labels flag* is -// false, since this adds newline delimiter to the end of each row. -func AppendAllLabels(showLabels bool, itemLabels map[string]string) string { - var buffer bytes.Buffer - - if showLabels { - buffer.WriteString(fmt.Sprint("\t")) - buffer.WriteString(labels.FormatLabels(itemLabels)) - } - buffer.WriteString("\n") - - return buffer.String() -} diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 50d56860de0..637ba4420a1 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -1902,7 +1902,7 @@ func printControllerRevision(obj *apps.ControllerRevision, options printers.Prin return nil, err } gvk := gv.WithKind(controllerRef.Kind) - controllerName = printers.FormatResourceName(gvk.GroupKind(), controllerRef.Name, withKind) + controllerName = formatResourceName(gvk.GroupKind(), controllerRef.Name, withKind) } revision := obj.Revision age := translateTimestampSince(obj.CreationTimestamp) @@ -1922,6 +1922,16 @@ func printControllerRevisionList(list *apps.ControllerRevisionList, options prin return rows, nil } +// formatResourceName receives a resource kind, name, and boolean specifying +// whether or not to update the current name to "kind/name" +func formatResourceName(kind schema.GroupKind, name string, withKind bool) string { + if !withKind || kind.Empty() { + return name + } + + return strings.ToLower(kind.String()) + "/" + name +} + func printResourceQuota(resourceQuota *api.ResourceQuota, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { row := metav1beta1.TableRow{ Object: runtime.RawExtension{Object: resourceQuota}, diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index e294ec7eee2..82a0d56a383 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -278,7 +278,7 @@ func TestFormatResourceName(t *testing.T) { {schema.GroupKind{Group: "group", Kind: "Kind"}, "name", "kind.group/name"}, } for _, tt := range tests { - if got := printers.FormatResourceName(tt.kind, tt.name, true); got != tt.want { + if got := formatResourceName(tt.kind, tt.name, true); got != tt.want { t.Errorf("formatResourceName(%q, %q) = %q, want %q", tt.kind, tt.name, got, tt.want) } } From 55eb5b93c5e4a12aca627c3f5c808b40f50c6586 Mon Sep 17 00:00:00 2001 From: saad-ali Date: Wed, 17 Apr 2019 13:44:25 -0700 Subject: [PATCH 056/209] API reviewers for SIG Storage --- OWNERS_ALIASES | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 9705b3f4b00..fe330d3cd9f 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -381,9 +381,11 @@ aliases: - bsalamat - k82cn - # sig-storage-api-reviewers: - # - - # - +sig-storage-api-reviewers: + - saad-ali + - msau42 + - jsafrane + # sig-windows-api-reviewers: # - From 028df0451db7c982066930099fc701839d6e9f7e Mon Sep 17 00:00:00 2001 From: John Schnake Date: Tue, 9 Apr 2019 11:52:36 -0500 Subject: [PATCH 057/209] Refactor of e2e/framework/authorizer_util.go - moves these helper functions into e2e/framework/auth - removes logging from helper functions - in some cases explicitly returns errors that were implicitly ignored/logged. In the situations where they should be ignored, we explicitly check that the condition is met before ignoring it. - fixes references of these methods to use the right package and return values --- test/e2e/BUILD | 1 + test/e2e/auth/BUILD | 1 + test/e2e/auth/audit.go | 9 +- test/e2e/auth/audit_dynamic.go | 3 +- test/e2e/auth/pod_security_policy.go | 15 ++-- test/e2e/examples.go | 6 +- test/e2e/framework/BUILD | 6 +- test/e2e/framework/auth/BUILD | 34 +++++++ .../{authorizer_util.go => auth/helpers.go} | 88 ++++++++++++++----- test/e2e/framework/psp_util.go | 10 ++- test/e2e/kubectl/BUILD | 1 + test/e2e/kubectl/kubectl.go | 8 +- test/e2e/network/BUILD | 1 + test/e2e/network/ingress.go | 8 +- test/e2e/storage/BUILD | 1 + test/e2e/storage/drivers/BUILD | 1 + test/e2e/storage/drivers/in_tree.go | 6 +- test/e2e/storage/volume_provisioning.go | 11 ++- 18 files changed, 156 insertions(+), 54 deletions(-) create mode 100644 test/e2e/framework/auth/BUILD rename test/e2e/framework/{authorizer_util.go => auth/helpers.go} (59%) diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 3c088ebad05..73b6ad0d065 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -64,6 +64,7 @@ go_library( "//staging/src/k8s.io/component-base/logs:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/providers/aws:go_default_library", diff --git a/test/e2e/auth/BUILD b/test/e2e/auth/BUILD index 30ae4a39ebf..0354f9ae46e 100644 --- a/test/e2e/auth/BUILD +++ b/test/e2e/auth/BUILD @@ -53,6 +53,7 @@ go_library( "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index a263ebf7b37..d7fece8c28e 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -31,14 +31,15 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" auditinternal "k8s.io/apiserver/pkg/apis/audit" - "k8s.io/apiserver/pkg/apis/audit/v1" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch" . "github.com/onsi/ginkgo" ) @@ -652,7 +653,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { // test authorizer annotations, RBAC is required. It("should audit API calls to get a pod with unauthorized user.", func() { - if !framework.IsRBACEnabled(f) { + if !auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) { framework.Skipf("RBAC not enabled.") } @@ -735,7 +736,7 @@ func expectEvents(f *framework.Framework, expectedEvents []utils.AuditEvent) { return false, err } defer stream.Close() - missingReport, err := utils.CheckAuditLines(stream, expectedEvents, v1.SchemeGroupVersion) + missingReport, err := utils.CheckAuditLines(stream, expectedEvents, auditv1.SchemeGroupVersion) if err != nil { framework.Logf("Failed to observe audit events: %v", err) } else if len(missingReport.MissingEvents) > 0 { diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index e3145fe8c99..db311c43dd6 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -35,6 +35,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -346,7 +347,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { }, } - if framework.IsRBACEnabled(f) { + if auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) { testCases = append(testCases, annotationTestCases...) } expectedEvents := []utils.AuditEvent{} diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 0423e2bad3a..d47cd846fcf 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -19,7 +19,7 @@ package auth import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -33,6 +33,7 @@ import ( psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" imageutils "k8s.io/kubernetes/test/utils/image" utilpointer "k8s.io/utils/pointer" @@ -54,7 +55,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { if !framework.IsPodSecurityPolicyEnabled(f) { framework.Skipf("PodSecurityPolicy not enabled") } - if !framework.IsRBACEnabled(f) { + if !auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) { framework.Skipf("RBAC not enabled") } ns = f.Namespace.Name @@ -70,8 +71,9 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { framework.ExpectNoError(err) By("Binding the edit role to the default SA") - framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "edit", ns, + err = auth.BindClusterRole(f.ClientSet.RbacV1beta1(), "edit", ns, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"}) + framework.ExpectNoError(err) }) It("should forbid pod creation when no PSP is available", func() { @@ -202,7 +204,6 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) { sysadmin.Spec.Containers[0].SecurityContext.RunAsUser = &uid tester(sysadmin) }) - } // createAndBindPSP creates a PSP in the policy API group. @@ -231,12 +232,14 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policy.PodSecurityPol framework.ExpectNoError(err, "Failed to create PSP role") // Bind the role to the namespace. - framework.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{ + err = auth.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{ Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default", }) - framework.ExpectNoError(framework.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), + framework.ExpectNoError(err) + + framework.ExpectNoError(auth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), serviceaccount.MakeUsername(ns, "default"), ns, "use", name, schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true)) diff --git a/test/e2e/examples.go b/test/e2e/examples.go index ec50cc4288f..950691b4dbe 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -30,6 +30,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/testfiles" . "github.com/onsi/ginkgo" @@ -51,10 +52,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { // this test wants powerful permissions. Since the namespace names are unique, we can leave this // lying around so we don't have to race any caches - framework.BindClusterRoleInNamespace(c.RbacV1beta1(), "edit", f.Namespace.Name, + err := auth.BindClusterRoleInNamespace(c.RbacV1beta1(), "edit", f.Namespace.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) + framework.ExpectNoError(err) - err := framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(), + err = auth.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), f.Namespace.Name, "create", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 84e11ce9ada..c142c1420b7 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -5,7 +5,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "authorizer_util.go", "cleanup.go", "create.go", "deployment_util.go", @@ -68,7 +67,6 @@ go_library( "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", - "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", @@ -103,9 +101,7 @@ go_library( "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/restmapper:go_default_library", "//staging/src/k8s.io/client-go/scale:go_default_library", @@ -116,6 +112,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/testfiles:go_default_library", @@ -148,6 +145,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//test/e2e/framework/auth:all-srcs", "//test/e2e/framework/config:all-srcs", "//test/e2e/framework/ginkgowrapper:all-srcs", "//test/e2e/framework/gpu:all-srcs", diff --git a/test/e2e/framework/auth/BUILD b/test/e2e/framework/auth/BUILD new file mode 100644 index 00000000000..f459c20f3eb --- /dev/null +++ b/test/e2e/framework/auth/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/auth", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/authorizer_util.go b/test/e2e/framework/auth/helpers.go similarity index 59% rename from test/e2e/framework/authorizer_util.go rename to test/e2e/framework/auth/helpers.go index 0bc7b678e21..5b3c9ac8d29 100644 --- a/test/e2e/framework/authorizer_util.go +++ b/test/e2e/framework/auth/helpers.go @@ -14,13 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package auth import ( - "k8s.io/klog" + "fmt" "sync" "time" + "github.com/onsi/ginkgo" + "github.com/pkg/errors" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -36,6 +38,12 @@ const ( policyCachePollTimeout = 5 * time.Second ) +type bindingsGetter interface { + v1beta1rbac.RoleBindingsGetter + v1beta1rbac.ClusterRoleBindingsGetter + v1beta1rbac.ClusterRolesGetter +} + // WaitForAuthorizationUpdate checks if the given user can perform the named verb and action. // If policyCachePollTimeout is reached without the expected condition matching, an error is returned func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error { @@ -57,12 +65,15 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews User: user, }, } + err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) { response, err := c.SubjectAccessReviews().Create(review) // GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine // has adjusted as expected. In this case, simply wait one second and hope it's up to date + // TODO: Should have a check for the provider here but that introduces too tight of + // coupling with the `framework` package. See: https://github.com/kubernetes/kubernetes/issues/76726 if apierrors.IsNotFound(err) { - klog.Info("SubjectAccessReview endpoint is missing") + logf("SubjectAccessReview endpoint is missing") time.Sleep(1 * time.Second) return true, nil } @@ -77,8 +88,13 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews return err } -// BindClusterRole binds the cluster role at the cluster scope -func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) { +// BindClusterRole binds the cluster role at the cluster scope. If RBAC is not enabled, nil +// is returned with no action. +func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) error { + if !IsRBACEnabled(c) { + return nil + } + // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches _, err := c.ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -92,23 +108,30 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st Subjects: subjects, }) - // if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled. if err != nil { - klog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects) + return errors.Wrapf(err, "binding clusterrole/%s for %q for %v", clusterRole, ns, subjects) } + + return nil } -// BindClusterRoleInNamespace binds the cluster role at the namespace scope -func BindClusterRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) { - bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...) +// BindClusterRoleInNamespace binds the cluster role at the namespace scope. If RBAC is not enabled, nil +// is returned with no action. +func BindClusterRoleInNamespace(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) error { + return bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...) } -// BindRoleInNamespace binds the role at the namespace scope -func BindRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) { - bindInNamespace(c, "Role", role, ns, subjects...) +// BindRoleInNamespace binds the role at the namespace scope. If RBAC is not enabled, nil +// is returned with no action. +func BindRoleInNamespace(c bindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) error { + return bindInNamespace(c, "Role", role, ns, subjects...) } -func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) { +func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) error { + if !IsRBACEnabled(c) { + return nil + } + // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches _, err := c.RoleBindings(ns).Create(&rbacv1beta1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -122,10 +145,11 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string Subjects: subjects, }) - // if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled. if err != nil { - klog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects) + return errors.Wrapf(err, "binding %s/%s into %q for %v", roleType, role, ns, subjects) } + + return nil } var ( @@ -134,19 +158,41 @@ var ( ) // IsRBACEnabled returns true if RBAC is enabled. Otherwise false. -func IsRBACEnabled(f *Framework) bool { +func IsRBACEnabled(crGetter v1beta1rbac.ClusterRolesGetter) bool { isRBACEnabledOnce.Do(func() { - crs, err := f.ClientSet.RbacV1().ClusterRoles().List(metav1.ListOptions{}) + crs, err := crGetter.ClusterRoles().List(metav1.ListOptions{}) if err != nil { - Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err) + logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err) isRBACEnabled = false } else if crs == nil || len(crs.Items) == 0 { - Logf("No ClusterRoles found; assuming RBAC is disabled.") + logf("No ClusterRoles found; assuming RBAC is disabled.") isRBACEnabled = false } else { - Logf("Found ClusterRoles; assuming RBAC is enabled.") + logf("Found ClusterRoles; assuming RBAC is enabled.") isRBACEnabled = true } }) + return isRBACEnabled } + +// logf logs INFO lines to the GinkgoWriter. +// TODO: Log functions like these should be put into their own package, +// see: https://github.com/kubernetes/kubernetes/issues/76728 +func logf(format string, args ...interface{}) { + log("INFO", format, args...) +} + +// log prints formatted log messages to the global GinkgoWriter. +// TODO: Log functions like these should be put into their own package, +// see: https://github.com/kubernetes/kubernetes/issues/76728 +func log(level string, format string, args ...interface{}) { + fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) +} + +// nowStamp returns the current time formatted for placement in the logs (time.StampMilli). +// TODO: If only used for logging, this should be put into a logging package, +// see: https://github.com/kubernetes/kubernetes/issues/76728 +func nowStamp() string { + return time.Now().Format(time.StampMilli) +} diff --git a/test/e2e/framework/psp_util.go b/test/e2e/framework/psp_util.go index 7558f836567..cb06f91e19c 100644 --- a/test/e2e/framework/psp_util.go +++ b/test/e2e/framework/psp_util.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp" + "k8s.io/kubernetes/test/e2e/framework/auth" "github.com/onsi/ginkgo" ) @@ -118,7 +119,7 @@ func createPrivilegedPSPBinding(f *Framework, namespace string) { ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) } - if IsRBACEnabled(f) { + if auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) { // Create the Role to bind it to the namespace. _, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, @@ -135,10 +136,10 @@ func createPrivilegedPSPBinding(f *Framework, namespace string) { } }) - if IsRBACEnabled(f) { + if auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) { ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s", podSecurityPolicyPrivileged, namespace)) - BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(), + err := auth.BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(), podSecurityPolicyPrivileged, namespace, rbacv1beta1.Subject{ @@ -146,7 +147,8 @@ func createPrivilegedPSPBinding(f *Framework, namespace string) { Namespace: namespace, Name: "default", }) - ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), + ExpectNoError(err) + ExpectNoError(auth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged, schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true)) } diff --git a/test/e2e/kubectl/BUILD b/test/e2e/kubectl/BUILD index e42e36df51b..b776650ed39 100644 --- a/test/e2e/kubectl/BUILD +++ b/test/e2e/kubectl/BUILD @@ -31,6 +31,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/scheduling:go_default_library", diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index a8910f3dc34..149491f3de8 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -42,7 +42,7 @@ import ( "github.com/elazarl/goproxy" "sigs.k8s.io/yaml" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -58,6 +58,7 @@ import ( "k8s.io/kubernetes/pkg/controller" commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" jobutil "k8s.io/kubernetes/test/e2e/framework/job" "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/scheduling" @@ -606,10 +607,11 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should handle in-cluster config", func() { ginkgo.By("adding rbac permissions") // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace - framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "view", f.Namespace.Name, + err := auth.BindClusterRole(f.ClientSet.RbacV1beta1(), "view", f.Namespace.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) + framework.ExpectNoError(err) - err := framework.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), + err = auth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index 4137c71e581..fd215ea1860 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -58,6 +58,7 @@ go_library( "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/ingress:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/network/scale:go_default_library", diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index de97a30190e..6ae9f8e8f50 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -26,7 +26,7 @@ import ( compute "google.golang.org/api/compute/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/ingress" "k8s.io/kubernetes/test/e2e/framework/providers/gce" @@ -62,10 +63,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // this test wants powerful permissions. Since the namespace names are unique, we can leave this // lying around so we don't have to race any caches - framework.BindClusterRole(jig.Client.RbacV1beta1(), "cluster-admin", f.Namespace.Name, + err := auth.BindClusterRole(jig.Client.RbacV1beta1(), "cluster-admin", f.Namespace.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) + framework.ExpectNoError(err) - err := framework.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1beta1(), + err = auth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1beta1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), "", "create", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 3b44b4838c7..902c884f195 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -66,6 +66,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/testfiles:go_default_library", diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD index 7236b560d0d..395075527f8 100644 --- a/test/e2e/storage/drivers/BUILD +++ b/test/e2e/storage/drivers/BUILD @@ -22,6 +22,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/testsuites:go_default_library", diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index e4502aeb801..e397921f0e4 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -54,6 +54,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -153,10 +154,11 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf // TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner // is not enough. We should create new clusterrole for testing. - framework.BindClusterRole(cs.RbacV1beta1(), "cluster-admin", ns.Name, + err := auth.BindClusterRole(cs.RbacV1beta1(), "cluster-admin", ns.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) + framework.ExpectNoError(err) - err := framework.WaitForAuthorizationUpdate(cs.AuthorizationV1beta1(), + err = auth.WaitForAuthorizationUpdate(cs.AuthorizationV1beta1(), serviceaccount.MakeUsername(ns.Name, "default"), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization: %v", err) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 609e9c4b172..9f04b514b2d 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -46,6 +46,7 @@ import ( volumehelpers "k8s.io/cloud-provider/volume/helpers" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -710,10 +711,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Name: serviceAccountName, } - framework.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns, subject) + err := auth.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns, subject) + framework.ExpectNoError(err) roleName := "leader-locking-nfs-provisioner" - _, err := f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{ + _, err = f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, }, @@ -725,9 +727,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) framework.ExpectNoError(err, "Failed to create leader-locking role") - framework.BindRoleInNamespace(c.RbacV1beta1(), roleName, ns, subject) + err = auth.BindRoleInNamespace(c.RbacV1beta1(), roleName, ns, subject) + framework.ExpectNoError(err) - err = framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(), + err = auth.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(), serviceaccount.MakeUsername(ns, serviceAccountName), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization") From 7e01702a8827529b48f131ca287cf498391ff88b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 16 Apr 2019 09:56:38 -0400 Subject: [PATCH 058/209] Move "-s -w" flags to GOLDFLAGS as an overridable default. If GOLDFLAGS is set, whether it is empty or not, we should honor it. Only if the GOLDFLAGS is totally not set, then we use "-s -w" See Parameter Expansion section in the urls below: https://stackoverflow.com/questions/3601515/how-to-check-if-a-variable-is-set-in-bash/16753536 http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_02 Change-Id: I826c34efc63c77f0e3e9677fff30a3eb2219a377 --- hack/.shellcheck_failures | 1 + hack/lib/golang.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 400203d0fe4..e8cc7a4fed8 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -27,6 +27,7 @@ ./hack/cherry_pick_pull.sh ./hack/ginkgo-e2e.sh ./hack/grab-profiles.sh +./hack/lib/golang.sh ./hack/lib/init.sh ./hack/lib/swagger.sh ./hack/lib/test.sh diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 36f9f437e84..47133ad74e3 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -693,7 +693,7 @@ kube::golang::build_binaries() { host_platform=$(kube::golang::host_platform) local goflags goldflags goasmflags gogcflags - goldflags="${GOLDFLAGS:-} -s -w $(kube::version::ldflags)" + goldflags="${GOLDFLAGS=-s -w} $(kube::version::ldflags)" goasmflags="-trimpath=${KUBE_ROOT}" gogcflags="${GOGCFLAGS:-} -trimpath=${KUBE_ROOT}" From ef6f88d992c3c5e001a899a0d3f88638eeff7281 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 17 Apr 2019 11:42:29 +0800 Subject: [PATCH 059/209] Fix Azure SLB support for multiple backend pools Azure VM and vmssVM support multiple backend pools for the same SLB, but not for different LBs. --- .../providers/azure/azure_standard.go | 19 +++-- .../providers/azure/azure_vmss.go | 19 +++-- .../providers/azure/azure_wrap.go | 26 +++++++ .../providers/azure/azure_wrap_test.go | 75 +++++++++++++++++++ 4 files changed, 123 insertions(+), 16 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index 4746e0fe1d2..c2b22e1e6df 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -676,17 +676,20 @@ func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types. // sets, the same network interface couldn't be added to more than one load balancer of // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain // about this. + newBackendPoolsIDs := make([]string, 0, len(newBackendPools)) for _, pool := range newBackendPools { - backendPool := *pool.ID - matches := backendPoolIDRE.FindStringSubmatch(backendPool) - if len(matches) == 2 { - lbName := matches[1] - if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName) - return nil - } + if pool.ID != nil { + newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID) } } + isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs) + if err != nil { + return err + } + if !isSameLB { + klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + return nil + } } newBackendPools = append(newBackendPools, diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index a2ef73b3a97..b0e20b8ce09 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -785,17 +785,20 @@ func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID st // the same network interface couldn't be added to more than one load balancer of // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain // about this. + newBackendPoolsIDs := make([]string, 0, len(newBackendPools)) for _, pool := range newBackendPools { - backendPool := *pool.ID - matches := backendPoolIDRE.FindStringSubmatch(backendPool) - if len(matches) == 2 { - lbName := matches[1] - if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - klog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName) - return nil - } + if pool.ID != nil { + newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID) } } + isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs) + if err != nil { + return err + } + if !isSameLB { + klog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmSetName, oldLBName) + return nil + } } newBackendPools = append(newBackendPools, diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 93d1095b6f8..f35ee755385 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -336,3 +336,29 @@ func convertResourceGroupNameToLower(resourceID string) (string, error) { resourceGroup := matches[1] return strings.Replace(resourceID, resourceGroup, strings.ToLower(resourceGroup), 1), nil } + +// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools. +// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same. +// If not same, the lbName for existingBackendPools would also be returned. +func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) { + matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID) + if len(matches) != 2 { + return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID) + } + + newLBName := matches[1] + newLBNameTrimmed := strings.TrimRight(newLBName, InternalLoadBalancerNameSuffix) + for _, backendPool := range existingBackendPools { + matches := backendPoolIDRE.FindStringSubmatch(backendPool) + if len(matches) != 2 { + return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool) + } + + lbName := matches[1] + if !strings.EqualFold(strings.TrimRight(lbName, InternalLoadBalancerNameSuffix), newLBNameTrimmed) { + return false, lbName, nil + } + } + + return true, "", nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_wrap_test.go b/pkg/cloudprovider/providers/azure/azure_wrap_test.go index 84367b547c3..417f46cee85 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap_test.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap_test.go @@ -198,3 +198,78 @@ func TestConvertResourceGroupNameToLower(t *testing.T) { assert.Equal(t, test.expected, real, test.desc) } } + +func TestIsBackendPoolOnSameLB(t *testing.T) { + tests := []struct { + backendPoolID string + existingBackendPools []string + expected bool + expectedLBName string + expectError bool + }{ + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2", + }, + expected: true, + expectedLBName: "", + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2", + }, + expected: true, + expectedLBName: "", + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool2", + }, + expected: true, + expectedLBName: "", + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2/backendAddressPools/pool2", + }, + expected: false, + expectedLBName: "lb2", + }, + { + backendPoolID: "wrong-backendpool-id", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2", + }, + expectError: true, + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "wrong-existing-backendpool-id", + }, + expectError: true, + }, + { + backendPoolID: "wrong-backendpool-id", + existingBackendPools: []string{ + "wrong-existing-backendpool-id", + }, + expectError: true, + }, + } + + for _, test := range tests { + isSameLB, lbName, err := isBackendPoolOnSameLB(test.backendPoolID, test.existingBackendPools) + if test.expectError { + assert.Error(t, err) + continue + } + + assert.Equal(t, test.expected, isSameLB) + assert.Equal(t, test.expectedLBName, lbName) + } +} From e3dcd3c0a9e4720e5426d9102b3eccf601165ca0 Mon Sep 17 00:00:00 2001 From: PingWang Date: Sat, 13 Apr 2019 11:45:06 +0800 Subject: [PATCH 060/209] Expect directory permissions to be 0700 or less Signed-off-by: PingWang update the permissions to 0660 Signed-off-by: PingWang revert to 0750 Signed-off-by: PingWang update 0750 to 0700 Signed-off-by: PingWang --- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index ca1e51ad593..e49e37bfe57 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -231,7 +231,7 @@ func GetKubeletDir(dryRun bool) (string, error) { // backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory. func backupAPIServerCertAndKey(certAndKeyDir string) error { subDir := filepath.Join(certAndKeyDir, "expired") - if err := os.Mkdir(subDir, 0766); err != nil { + if err := os.Mkdir(subDir, 0700); err != nil { return errors.Wrapf(err, "failed to created backup directory %s", subDir) } From bd7f7fefaec2ab5565693ed8b08a561736d1fe9e Mon Sep 17 00:00:00 2001 From: SataQiu Date: Thu, 18 Apr 2019 20:39:04 +0800 Subject: [PATCH 061/209] fix shellcheck failures of hack/update-generated-runtime-dockerized.sh --- hack/.shellcheck_failures | 1 - hack/update-generated-runtime-dockerized.sh | 14 ++++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 400203d0fe4..a4aa73da9e5 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -43,7 +43,6 @@ ./hack/pin-dependency.sh ./hack/test-integration.sh ./hack/test-update-storage-objects.sh -./hack/update-generated-runtime-dockerized.sh ./hack/update-vendor.sh ./hack/verify-api-groups.sh ./hack/verify-boilerplate.sh diff --git a/hack/update-generated-runtime-dockerized.sh b/hack/update-generated-runtime-dockerized.sh index 771b476661b..8e17dade6ba 100755 --- a/hack/update-generated-runtime-dockerized.sh +++ b/hack/update-generated-runtime-dockerized.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. KUBE_REMOTE_RUNTIME_ROOT="${KUBE_ROOT}/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2" source "${KUBE_ROOT}/hack/lib/init.sh" @@ -40,22 +40,24 @@ if [[ -z "$(which protoc)" || "$(protoc --version)" != "libprotoc 3."* ]]; then fi function cleanup { - rm -f ${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go.bak + rm -f "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go.bak" + rm -f "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go.tmp" } trap cleanup EXIT -gogopath=$(dirname $(kube::util::find-binary "protoc-gen-gogo")) +gogopath=$(dirname "$(kube::util::find-binary "protoc-gen-gogo")") PATH="${gogopath}:${PATH}" \ protoc \ --proto_path="${KUBE_REMOTE_RUNTIME_ROOT}" \ --proto_path="${KUBE_ROOT}/vendor" \ - --gogo_out=plugins=grpc:${KUBE_REMOTE_RUNTIME_ROOT} ${KUBE_REMOTE_RUNTIME_ROOT}/api.proto + --gogo_out=plugins=grpc:"${KUBE_REMOTE_RUNTIME_ROOT}" "${KUBE_REMOTE_RUNTIME_ROOT}/api.proto" # Update boilerplate for the generated file. -echo "$(cat hack/boilerplate/boilerplate.generatego.txt ${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go)" > ${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go +cat hack/boilerplate/boilerplate.generatego.txt "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go" > "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go.tmp" +mv "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go.tmp" "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go" # Run gofmt to clean up the generated code. kube::golang::verify_go_version -gofmt -l -s -w ${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go +gofmt -l -s -w "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go" From 02f282187b1f9539177ebfe782d2c9145bba8ab0 Mon Sep 17 00:00:00 2001 From: Matt Matejczyk Date: Thu, 18 Apr 2019 17:05:33 +0200 Subject: [PATCH 062/209] Create the "internal" firewall rule for kubemark master. This is equivalent to the "internal" firewall rule that is created for the regular masters. The main reason for doing it is to allow prometheus scraping metrics from various kubemark master components, e.g. kubelet. Ref. https://github.com/kubernetes/perf-tests/issues/503 --- test/kubemark/gce/util.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/kubemark/gce/util.sh b/test/kubemark/gce/util.sh index d4ba2c00861..760db164d52 100644 --- a/test/kubemark/gce/util.sh +++ b/test/kubemark/gce/util.sh @@ -102,6 +102,13 @@ function create-master-instance-with-resources { --target-tags "${MASTER_TAG}" \ --allow "tcp:443" & + run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-internal" \ + --project "${PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "10.0.0.0/8" \ + --target-tags "${MASTER_TAG}" \ + --allow "tcp:1-2379,tcp:2382-65535,udp:1-65535,icmp" & + wait } @@ -136,6 +143,10 @@ function delete-master-instance-and-resources { --project "${PROJECT}" \ --quiet || true + gcloud compute firewall-rules delete "${MASTER_NAME}-internal" \ + --project "${PROJECT}" \ + --quiet || true + if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then gcloud compute instances delete "${EVENT_STORE_NAME}" \ "${GCLOUD_COMMON_ARGS[@]}" || true From 3f552264ca28a4738c77ebed8414d0d2dc8e7063 Mon Sep 17 00:00:00 2001 From: Vince Prignano Date: Thu, 11 Apr 2019 15:57:45 -0700 Subject: [PATCH 063/209] Update klog to 0.3.0 Signed-off-by: Vince Prignano --- cmd/kubelet/app/options/globalflags.go | 25 +++------- go.mod | 4 +- go.sum | 4 +- staging/src/k8s.io/api/go.sum | 4 +- .../src/k8s.io/apiextensions-apiserver/go.mod | 2 +- .../src/k8s.io/apiextensions-apiserver/go.sum | 4 +- staging/src/k8s.io/apimachinery/go.mod | 2 +- staging/src/k8s.io/apimachinery/go.sum | 4 +- staging/src/k8s.io/apiserver/go.mod | 2 +- staging/src/k8s.io/apiserver/go.sum | 4 +- staging/src/k8s.io/cli-runtime/go.sum | 4 +- staging/src/k8s.io/client-go/go.mod | 2 +- staging/src/k8s.io/client-go/go.sum | 4 +- staging/src/k8s.io/cloud-provider/go.mod | 2 +- staging/src/k8s.io/cloud-provider/go.sum | 4 +- staging/src/k8s.io/cluster-bootstrap/go.sum | 4 +- staging/src/k8s.io/code-generator/go.mod | 2 +- staging/src/k8s.io/code-generator/go.sum | 4 +- .../component-base/cli/globalflag/BUILD | 1 + .../cli/globalflag/globalflags.go | 25 +++++----- .../cli/globalflag/globalflags_test.go | 3 +- staging/src/k8s.io/component-base/go.mod | 2 +- staging/src/k8s.io/component-base/go.sum | 4 +- .../src/k8s.io/component-base/logs/logs.go | 2 - staging/src/k8s.io/csi-translation-lib/go.sum | 4 +- staging/src/k8s.io/kube-aggregator/go.mod | 2 +- staging/src/k8s.io/kube-aggregator/go.sum | 4 +- .../src/k8s.io/kube-controller-manager/go.sum | 4 +- staging/src/k8s.io/kube-proxy/go.sum | 4 +- staging/src/k8s.io/kube-scheduler/go.sum | 4 +- staging/src/k8s.io/kubelet/go.sum | 4 +- staging/src/k8s.io/metrics/go.sum | 4 +- staging/src/k8s.io/node-api/go.sum | 4 +- staging/src/k8s.io/sample-apiserver/go.mod | 2 +- staging/src/k8s.io/sample-apiserver/go.sum | 4 +- staging/src/k8s.io/sample-cli-plugin/go.sum | 4 +- staging/src/k8s.io/sample-controller/go.mod | 2 +- staging/src/k8s.io/sample-controller/go.sum | 4 +- vendor/k8s.io/klog/README.md | 22 ++++++++- vendor/k8s.io/klog/SECURITY_CONTACTS | 4 +- vendor/k8s.io/klog/klog.go | 46 ++++++++++++++++--- vendor/k8s.io/klog/klog_file.go | 2 +- vendor/modules.txt | 2 +- 43 files changed, 140 insertions(+), 104 deletions(-) diff --git a/cmd/kubelet/app/options/globalflags.go b/cmd/kubelet/app/options/globalflags.go index 2938aec20b5..e8853e92701 100644 --- a/cmd/kubelet/app/options/globalflags.go +++ b/cmd/kubelet/app/options/globalflags.go @@ -26,10 +26,10 @@ import ( // libs that provide registration functions "k8s.io/component-base/logs" + "k8s.io/klog" "k8s.io/kubernetes/pkg/version/verflag" // ensure libs have a chance to globally register their flags - _ "k8s.io/klog" _ "k8s.io/kubernetes/pkg/credentialprovider/azure" _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" ) @@ -38,7 +38,7 @@ import ( // against the global flagsets from "flag" and "github.com/spf13/pflag". // We do this in order to prevent unwanted flags from leaking into the Kubelet's flagset. func AddGlobalFlags(fs *pflag.FlagSet) { - addGlogFlags(fs) + addKlogFlags(fs) addCadvisorFlags(fs) addCredentialProviderFlags(fs) verflag.AddFlags(fs) @@ -91,20 +91,9 @@ func addCredentialProviderFlags(fs *pflag.FlagSet) { fs.AddFlagSet(local) } -// addGlogFlags adds flags from k8s.io/klog -func addGlogFlags(fs *pflag.FlagSet) { - // lookup flags in global flag set and re-register the values with our flagset - global := flag.CommandLine - local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) - - register(global, local, "logtostderr") - register(global, local, "alsologtostderr") - register(global, local, "v") - register(global, local, "stderrthreshold") - register(global, local, "vmodule") - register(global, local, "log_backtrace_at") - register(global, local, "log_dir") - register(global, local, "log_file") - - fs.AddFlagSet(local) +// addKlogFlags adds flags from k8s.io/klog +func addKlogFlags(fs *pflag.FlagSet) { + local := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + klog.InitFlags(local) + fs.AddGoFlagSet(local) } diff --git a/go.mod b/go.mod index dd20ed7f9bb..0cb73395f27 100644 --- a/go.mod +++ b/go.mod @@ -184,7 +184,7 @@ require ( k8s.io/csi-translation-lib v0.0.0 k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af k8s.io/heapster v1.2.0-beta.1 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/kube-aggregator v0.0.0 k8s.io/kube-controller-manager v0.0.0 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 @@ -451,7 +451,7 @@ replace ( k8s.io/csi-translation-lib => ./staging/src/k8s.io/csi-translation-lib k8s.io/gengo => k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 - k8s.io/klog => k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog => k8s.io/klog v0.3.0 k8s.io/kube-aggregator => ./staging/src/k8s.io/kube-aggregator k8s.io/kube-controller-manager => ./staging/src/k8s.io/kube-controller-manager k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 diff --git a/go.sum b/go.sum index ba3a1420764..b73c0e88726 100644 --- a/go.sum +++ b/go.sum @@ -470,8 +470,8 @@ k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8 k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/heapster v1.2.0-beta.1 h1:lUsE/AHOMHpi3MLlBEkaU8Esxm5QhdyCrv1o7ot0s84= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3 h1:WD6cPA3q7qxZe6Fwir0XjjGwGMaWbHlHUcjCcOzuRG0= diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index d27d9409e3e..867b39b2fd6 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -46,8 +46,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index 22711f8a4f1..e661f0d799d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -31,7 +31,7 @@ require ( k8s.io/client-go v0.0.0 k8s.io/code-generator v0.0.0 k8s.io/component-base v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 k8s.io/utils v0.0.0-20190221042446-c2654d5206da sigs.k8s.io/yaml v1.1.0 diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 57b2d5773b9..4d86f18bf22 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -217,8 +217,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index bc774897197..0f33940f7f6 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -30,7 +30,7 @@ require ( golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db // indirect gopkg.in/inf.v0 v0.9.0 gopkg.in/yaml.v2 v2.2.1 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 sigs.k8s.io/yaml v1.1.0 ) diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index 0c791855a14..a090719bf51 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -61,8 +61,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index bfeb6245432..69b9c4d8625 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -66,7 +66,7 @@ require ( k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/component-base v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 k8s.io/utils v0.0.0-20190221042446-c2654d5206da sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2 diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 5f6c802e820..584c6f9c493 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -188,8 +188,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index 3f5bb0ea74e..915e446a947 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -99,8 +99,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 252d62d8bbb..737331bf424 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -29,7 +29,7 @@ require ( google.golang.org/appengine v1.5.0 // indirect k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/utils v0.0.0-20190221042446-c2654d5206da sigs.k8s.io/yaml v1.1.0 ) diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 9c8b749ca8b..e5587543331 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -83,8 +83,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index 5be82242375..8ba55cc8b01 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -9,7 +9,7 @@ require ( k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 k8s.io/client-go v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/utils v0.0.0-20190221042446-c2654d5206da ) diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index 33336a1244d..f01a2ed4624 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -128,8 +128,8 @@ gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index c5c617c5e23..75ea41f0387 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -44,8 +44,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index 931888c794b..6d5e45a7518 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -11,7 +11,7 @@ require ( gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e // indirect k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 ) replace ( diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum index aef70274f7c..c2d8a90dfe9 100644 --- a/staging/src/k8s.io/code-generator/go.sum +++ b/staging/src/k8s.io/code-generator/go.sum @@ -18,8 +18,8 @@ gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGB gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/staging/src/k8s.io/component-base/cli/globalflag/BUILD b/staging/src/k8s.io/component-base/cli/globalflag/BUILD index 1026eb654c0..22749e8e7d7 100644 --- a/staging/src/k8s.io/component-base/cli/globalflag/BUILD +++ b/staging/src/k8s.io/component-base/cli/globalflag/BUILD @@ -9,6 +9,7 @@ go_library( deps = [ "//staging/src/k8s.io/component-base/logs:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/component-base/cli/globalflag/globalflags.go b/staging/src/k8s.io/component-base/cli/globalflag/globalflags.go index 455153f680c..78e4b56f186 100644 --- a/staging/src/k8s.io/component-base/cli/globalflag/globalflags.go +++ b/staging/src/k8s.io/component-base/cli/globalflag/globalflags.go @@ -19,35 +19,32 @@ package globalflag import ( "flag" "fmt" + "os" "strings" "github.com/spf13/pflag" - "k8s.io/component-base/logs" + "k8s.io/klog" ) // AddGlobalFlags explicitly registers flags that libraries (klog, verflag, etc.) register // against the global flagsets from "flag" and "k8s.io/klog". // We do this in order to prevent unwanted flags from leaking into the component's flagset. func AddGlobalFlags(fs *pflag.FlagSet, name string) { - addGlogFlags(fs) + addKlogFlags(fs) logs.AddFlags(fs) fs.BoolP("help", "h", false, fmt.Sprintf("help for %s", name)) } -// addGlogFlags explicitly registers flags that klog libraries(k8s.io/klog) register. -func addGlogFlags(fs *pflag.FlagSet) { - // lookup flags of klog libraries in global flag set and re-register the values with our flagset - Register(fs, "logtostderr") - Register(fs, "alsologtostderr") - Register(fs, "v") - Register(fs, "skip_headers") - Register(fs, "stderrthreshold") - Register(fs, "vmodule") - Register(fs, "log_backtrace_at") - Register(fs, "log_dir") - Register(fs, "log_file") +// addKlogFlags adds flags from k8s.io/klog +func addKlogFlags(fs *pflag.FlagSet) { + local := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + klog.InitFlags(local) + local.VisitAll(func(fl *flag.Flag) { + fl.Name = normalize(fl.Name) + fs.AddGoFlag(fl) + }) } // normalize replaces underscores with hyphens diff --git a/staging/src/k8s.io/component-base/cli/globalflag/globalflags_test.go b/staging/src/k8s.io/component-base/cli/globalflag/globalflags_test.go index c3ee85304ea..28451f400f8 100644 --- a/staging/src/k8s.io/component-base/cli/globalflag/globalflags_test.go +++ b/staging/src/k8s.io/component-base/cli/globalflag/globalflags_test.go @@ -24,7 +24,6 @@ import ( "testing" "github.com/spf13/pflag" - cliflag "k8s.io/component-base/cli/flag" ) @@ -58,7 +57,7 @@ func TestAddGlobalFlags(t *testing.T) { }{ { // Happy case - expectedFlag: []string{"alsologtostderr", "help", "log-backtrace-at", "log-dir", "log-file", "log-flush-frequency", "logtostderr", "skip-headers", "stderrthreshold", "v", "vmodule"}, + expectedFlag: []string{"alsologtostderr", "help", "log-backtrace-at", "log-dir", "log-file", "log-file-max-size", "log-flush-frequency", "logtostderr", "skip-headers", "skip-log-headers", "stderrthreshold", "v", "vmodule"}, matchExpected: false, }, { diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index a8c11392756..afee884b36b 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -7,7 +7,7 @@ go 1.12 require ( github.com/spf13/pflag v1.0.1 k8s.io/apimachinery v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/utils v0.0.0-20190221042446-c2654d5206da ) diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index 2b27de2a7a1..698c02811f1 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -44,8 +44,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= diff --git a/staging/src/k8s.io/component-base/logs/logs.go b/staging/src/k8s.io/component-base/logs/logs.go index 3ffe9eeb29b..4c1adf86a6f 100644 --- a/staging/src/k8s.io/component-base/logs/logs.go +++ b/staging/src/k8s.io/component-base/logs/logs.go @@ -31,10 +31,8 @@ const logFlushFreqFlagName = "log-flush-frequency" var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum number of seconds between log flushes") -// TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { klog.InitFlags(flag.CommandLine) - flag.Set("logtostderr", "true") } // AddFlags registers this package's flags on arbitrary FlagSets, such that they point to the diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index 5999f63d724..0366df4205c 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -112,8 +112,8 @@ gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index af721de73ce..1dba0512edf 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -21,7 +21,7 @@ require ( k8s.io/client-go v0.0.0 k8s.io/code-generator v0.0.0 k8s.io/component-base v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 k8s.io/utils v0.0.0-20190221042446-c2654d5206da ) diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index e2f00b2af0b..38622c7a306 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -201,8 +201,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 2b27de2a7a1..698c02811f1 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -44,8 +44,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 2b27de2a7a1..698c02811f1 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -44,8 +44,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 2b27de2a7a1..698c02811f1 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -44,8 +44,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index c5c617c5e23..75ea41f0387 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -44,8 +44,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index c8a03c7f23a..728912aa647 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -84,8 +84,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/node-api/go.sum b/staging/src/k8s.io/node-api/go.sum index 9a0445d4918..a4fc75aed8d 100644 --- a/staging/src/k8s.io/node-api/go.sum +++ b/staging/src/k8s.io/node-api/go.sum @@ -86,8 +86,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index e1bce6bde67..cc552ff4fd9 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -13,7 +13,7 @@ require ( k8s.io/client-go v0.0.0 k8s.io/code-generator v0.0.0 k8s.io/component-base v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 ) replace ( diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index ac75fa431a7..bd09efb0b47 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -198,8 +198,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index 3f5bb0ea74e..915e446a947 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -99,8 +99,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index 2a09dbc1ec3..cbd8819926d 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -9,7 +9,7 @@ require ( k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/code-generator v0.0.0 - k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 + k8s.io/klog v0.3.0 ) replace ( diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index d2ebb18ecd2..74d2d5160d5 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -87,8 +87,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index 6cb6d168375..bee306f398f 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -1,7 +1,27 @@ klog ==== -klog is a permanant fork of https://github.com/golang/glog. original README from glog is below +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ ---- diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS index 520ddb52575..6128a586995 100644 --- a/vendor/k8s.io/klog/SECURITY_CONTACTS +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -1,10 +1,10 @@ # Defined below are the security contacts for this repo. # -# They are the contact point for the Product Security Team to reach out +# They are the contact point for the Product Security Committee to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the -# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 398cd1c5dbf..887ea62dff7 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -78,6 +78,7 @@ import ( "fmt" "io" stdLog "log" + "math" "os" "path/filepath" "runtime" @@ -410,10 +411,14 @@ func InitFlags(flagset *flag.FlagSet) { } flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory") flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") flagset.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when openning log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -471,8 +476,15 @@ type loggingT struct { // with the log-dir option. logFile string + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + // If true, do not add the prefix headers, useful when used with SetOutput skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -861,17 +873,32 @@ func (l *loggingT) exit(err error) { type syncBuffer struct { logger *loggingT *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { if err := sb.rotateFile(time.Now(), false); err != nil { sb.logger.exit(err) } @@ -886,7 +913,7 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) { // rotateFile closes the syncBuffer's file and starts a new one. // The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for apending instead of truncated. +// If startup is true, existing files are opened for appending instead of truncated. func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { if sb.file != nil { sb.Flush() @@ -901,6 +928,10 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + if sb.logger.skipLogHeaders { + return nil + } + // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) @@ -925,8 +956,9 @@ func (l *loggingT) createFiles(sev severity) error { // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ - logger: l, - sev: s, + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), } if err := sb.rotateFile(now, true); err != nil { return err diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go index 1c4897f4fdc..e4010ad4df0 100644 --- a/vendor/k8s.io/klog/klog_file.go +++ b/vendor/k8s.io/klog/klog_file.go @@ -98,7 +98,7 @@ var onceLogDirs sync.Once // successfully, create also attempts to update the symlink for that tag, ignoring // errors. // The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for apending instead of truncated. +// If startup is true, existing files are opened for appending instead of truncated. func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { if logging.logFile != "" { f, err := openOrCreate(logging.logFile, startup) diff --git a/vendor/modules.txt b/vendor/modules.txt index 9039db03063..c25e60016d9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1571,7 +1571,7 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/heapster v1.2.0-beta.1 => k8s.io/heapster v1.2.0-beta.1 k8s.io/heapster/metrics/api/v1/types -# k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 => k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 +# k8s.io/klog v0.3.0 => k8s.io/klog v0.3.0 k8s.io/klog # k8s.io/kube-aggregator v0.0.0 => ./staging/src/k8s.io/kube-aggregator k8s.io/kube-aggregator/pkg/apis/apiregistration From b771be9b4428d06184a8356e8d5b61dd44cf18b0 Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 18 Apr 2019 11:50:18 -0400 Subject: [PATCH 064/209] add apimachinery api-reviewers --- OWNERS_ALIASES | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index fe330d3cd9f..1ca190d7e32 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -345,9 +345,11 @@ aliases: # api-reviewers targeted by sig area # see https://git.k8s.io/community/sig-architecture/api-review-process.md#training-reviews - # sig-api-machinery-api-reviewers: - # - - # - + sig-api-machinery-api-reviewers: + - caesarxuchao + - deads2k + - jpbetz + - sttts # sig-apps-api-reviewers: # - From 44860218379d0880578575629ca4481801b85fa9 Mon Sep 17 00:00:00 2001 From: caiweidong Date: Fri, 19 Apr 2019 00:18:54 +0800 Subject: [PATCH 065/209] Avoid useless caller --- pkg/scheduler/algorithm/predicates/predicates.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 9527550825d..c886b8fb259 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -1298,11 +1298,11 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P topologyMaps := newTopologyPairsMaps() for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { - namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { return nil, err } + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} From 0062a7d8de995fe59bfe7739ad20cea1bdde38e3 Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Thu, 18 Apr 2019 09:36:03 -0700 Subject: [PATCH 066/209] Store parsed CIDRs at initialization of Proxier --- pkg/proxy/ipvs/proxier.go | 23 +++++++++++++++++------ pkg/proxy/ipvs/proxier_test.go | 8 ++++---- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 7a78c61fe81..e01c5a27d48 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -194,7 +194,7 @@ type Proxier struct { syncPeriod time.Duration minSyncPeriod time.Duration // Values are CIDR's to exclude when cleaning up IPVS rules. - excludeCIDRs []string + excludeCIDRs []*net.IPNet // Set to true to set sysctls arp_ignore and arp_announce strictARP bool iptables utiliptables.Interface @@ -274,6 +274,19 @@ func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) { // Proxier implements ProxyProvider var _ proxy.ProxyProvider = &Proxier{} +// ParseExcludedCIDRs parses the input strings and returns net.IPNet +// The validation has been done earlier so the error condition will never happen under normal conditions +func ParseExcludedCIDRs(excludeCIDRStrs []string) []*net.IPNet { + var cidrExclusions []*net.IPNet + for _, excludedCIDR := range excludeCIDRStrs { + _, n, err := net.ParseCIDR(excludedCIDR) + if err == nil { + cidrExclusions = append(cidrExclusions, n) + } + } + return cidrExclusions +} + // NewProxier returns a new Proxier given an iptables and ipvs Interface instance. // Because of the iptables and ipvs logic, it is assumed that there is only a single Proxier active on a machine. // An error will be returned if it fails to update or acquire the initial lock. @@ -286,7 +299,7 @@ func NewProxier(ipt utiliptables.Interface, exec utilexec.Interface, syncPeriod time.Duration, minSyncPeriod time.Duration, - excludeCIDRs []string, + excludeCIDRStrs []string, strictARP bool, masqueradeAll bool, masqueradeBit int, @@ -397,7 +410,7 @@ func NewProxier(ipt utiliptables.Interface, endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder), syncPeriod: syncPeriod, minSyncPeriod: minSyncPeriod, - excludeCIDRs: excludeCIDRs, + excludeCIDRs: ParseExcludedCIDRs(excludeCIDRStrs), iptables: ipt, masqueradeAll: masqueradeAll, masqueradeMark: masqueradeMark, @@ -1715,9 +1728,7 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre func (proxier *Proxier) isIPInExcludeCIDRs(ip net.IP) bool { // make sure it does not fall within an excluded CIDR range. for _, excludedCIDR := range proxier.excludeCIDRs { - // Any validation of this CIDR already should have occurred. - _, n, _ := net.ParseCIDR(excludedCIDR) - if n.Contains(ip) { + if excludedCIDR.Contains(ip) { return true } } diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index e3246032b81..646d84eaf61 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -125,7 +125,7 @@ func (fakeSysctl *FakeSysctl) SetSysctl(sysctl string, newVal int) error { return nil } -func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP, excludeCIDRs []string) *Proxier { +func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP, excludeCIDRs []*net.IPNet) *Proxier { fcmd := fakeexec.FakeCmd{ CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{ func() ([]byte, error) { return []byte("dummy device have been created"), nil }, @@ -2823,7 +2823,7 @@ func TestCleanLegacyService(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"3.3.3.0/24", "4.4.4.0/24"}) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, ParseExcludedCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"})) // All ipvs services that were processed in the latest sync loop. activeServices := map[string]bool{"ipvs0": true, "ipvs1": true} @@ -2930,7 +2930,7 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) { ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) gtm := NewGracefulTerminationManager(ipvs) - fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"4.4.4.4/32"}) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, ParseExcludedCIDRs([]string{"4.4.4.4/32"})) fp.gracefuldeleteManager = gtm vs := &utilipvs.VirtualServer{ @@ -2984,7 +2984,7 @@ func TestCleanLegacyService6(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"3000::/64", "4000::/64"}) + fp := NewFakeProxier(ipt, ipvs, ipset, nil, ParseExcludedCIDRs([]string{"3000::/64", "4000::/64"})) fp.nodeIP = net.ParseIP("::1") // All ipvs services that were processed in the latest sync loop. From 31f751c9f67716dd7a0c48f4f12e8d2e66ff97ee Mon Sep 17 00:00:00 2001 From: caiweidong Date: Fri, 19 Apr 2019 01:08:08 +0800 Subject: [PATCH 067/209] Kubectl describe xxx suport PriorityClassName --- pkg/kubectl/describe/versioned/describe.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/kubectl/describe/versioned/describe.go b/pkg/kubectl/describe/versioned/describe.go index b75051c0e23..86b9e6a3f5b 100644 --- a/pkg/kubectl/describe/versioned/describe.go +++ b/pkg/kubectl/describe/versioned/describe.go @@ -735,6 +735,9 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { } else { w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) } + if len(pod.Spec.PriorityClassName) > 0 { + w.Write(LEVEL_0, "Priority Class Name:\t%s\n", pod.Spec.PriorityClassName) + } printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector) printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations) if events != nil { @@ -1964,6 +1967,9 @@ func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) { } describeContainers("Containers", template.Spec.Containers, nil, nil, w, " ") describeVolumes(template.Spec.Volumes, w, " ") + if len(template.Spec.PriorityClassName) > 0 { + w.Write(LEVEL_1, "Priority Class Name:\t%s\n", template.Spec.PriorityClassName) + } } // ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. From aa604942090ba316571f5888f0a44eea67e62ec6 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 04:06:06 +0800 Subject: [PATCH 068/209] fix golint failures of pkg/apis/authorization --- hack/.golint_failures | 1 - pkg/apis/authorization/register.go | 4 +++- pkg/apis/authorization/types.go | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 8ab29fc3b7a..f2b12903977 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -14,7 +14,6 @@ pkg/apis/apps/v1beta2 pkg/apis/apps/validation pkg/apis/auditregistration/v1alpha1 pkg/apis/authentication/v1 -pkg/apis/authorization pkg/apis/authorization/v1 pkg/apis/authorization/validation pkg/apis/autoscaling diff --git a/pkg/apis/authorization/register.go b/pkg/apis/authorization/register.go index 7ebf0322a53..3f91937ca26 100644 --- a/pkg/apis/authorization/register.go +++ b/pkg/apis/authorization/register.go @@ -38,8 +38,10 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder points to a list of functions added to Scheme. SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme ) func addKnownTypes(scheme *runtime.Scheme) error { diff --git a/pkg/apis/authorization/types.go b/pkg/apis/authorization/types.go index e798ec2123e..fafd1800d2d 100644 --- a/pkg/apis/authorization/types.go +++ b/pkg/apis/authorization/types.go @@ -138,7 +138,7 @@ type SelfSubjectAccessReviewSpec struct { NonResourceAttributes *NonResourceAttributes } -// SubjectAccessReviewStatus +// SubjectAccessReviewStatus represents the current state of a SubjectAccessReview. type SubjectAccessReviewStatus struct { // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool @@ -177,6 +177,7 @@ type SelfSubjectRulesReview struct { Status SubjectRulesReviewStatus } +// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview. type SelfSubjectRulesReviewSpec struct { // Namespace to evaluate rules for. Required. Namespace string From 113ab741e6d8012814436c4844fc8e5c104131be Mon Sep 17 00:00:00 2001 From: Jake Sanders Date: Thu, 18 Apr 2019 19:51:37 +0000 Subject: [PATCH 069/209] add option to set the value of the apiserver's insecure port --- cluster/gce/gci/configure-helper.sh | 4 ++++ cluster/gce/manifests/kube-apiserver.manifest | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index c916f1f4f3b..655f493785e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1593,6 +1593,10 @@ function start-kube-apiserver { params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}" fi params+=" --secure-port=443" + if [[ "${ENABLE_APISERVER_INSECURE_PORT:-true}" != "true" ]]; then + # Default is :8080 + params+=" --insecure-port=0" + fi params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}" params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}" params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname" diff --git a/cluster/gce/manifests/kube-apiserver.manifest b/cluster/gce/manifests/kube-apiserver.manifest index d045c844c47..acbdcee0a55 100644 --- a/cluster/gce/manifests/kube-apiserver.manifest +++ b/cluster/gce/manifests/kube-apiserver.manifest @@ -32,8 +32,9 @@ {{container_env}} "livenessProbe": { "httpGet": { + "scheme": "HTTPS", "host": "127.0.0.1", - "port": 8080, + "port": {{secure_port}}, "path": "/healthz?exclude=etcd" }, "initialDelaySeconds": {{liveness_probe_initial_delay}}, @@ -41,8 +42,9 @@ }, "readinessProbe": { "httpGet": { + "scheme": "HTTPS", "host": "127.0.0.1", - "port": 8080, + "port": {{secure_port}}, "path": "/healthz" }, "periodSeconds": 1, From 0501fecad008555774084f0bed370a963096afe9 Mon Sep 17 00:00:00 2001 From: Sean Sullivan Date: Wed, 17 Apr 2019 21:03:52 -0700 Subject: [PATCH 070/209] Split humanreadble.go into tablegenerator.go and tableprinter.go --- pkg/printers/BUILD | 7 +- pkg/printers/tablegenerator.go | 190 +++++++++++++++++ .../{humanreadable.go => tableprinter.go} | 195 ++---------------- ...nreadable_test.go => tableprinter_test.go} | 14 +- 4 files changed, 221 insertions(+), 185 deletions(-) create mode 100644 pkg/printers/tablegenerator.go rename pkg/printers/{humanreadable.go => tableprinter.go} (62%) rename pkg/printers/{humanreadable_test.go => tableprinter_test.go} (92%) diff --git a/pkg/printers/BUILD b/pkg/printers/BUILD index dea653133f2..bd7fd432b1b 100644 --- a/pkg/printers/BUILD +++ b/pkg/printers/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = [ - "humanreadable.go", "interface.go", + "tablegenerator.go", + "tableprinter.go", "tabwriter.go", ], importpath = "k8s.io/kubernetes/pkg/printers", @@ -45,10 +46,10 @@ filegroup( go_test( name = "go_default_test", - srcs = ["humanreadable_test.go"], + srcs = ["tableprinter_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/printers/tablegenerator.go b/pkg/printers/tablegenerator.go new file mode 100644 index 00000000000..bbfd7c54696 --- /dev/null +++ b/pkg/printers/tablegenerator.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +type TableGenerator interface { + GenerateTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) +} + +type PrintHandler interface { + TableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error + DefaultTableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error +} + +type handlerEntry struct { + columnDefinitions []metav1beta1.TableColumnDefinition + printFunc reflect.Value + args []reflect.Value +} + +// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide +// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers +// will only be printed if the object type changes. This makes it useful for printing items +// received from watches. +type HumanReadablePrinter struct { + handlerMap map[reflect.Type]*handlerEntry + defaultHandler *handlerEntry + options PrintOptions + lastType interface{} + lastColumns []metav1beta1.TableColumnDefinition +} + +var _ TableGenerator = &HumanReadablePrinter{} +var _ PrintHandler = &HumanReadablePrinter{} + +// NewTableGenerator creates a HumanReadablePrinter suitable for calling GenerateTable(). +func NewTableGenerator() *HumanReadablePrinter { + return &HumanReadablePrinter{ + handlerMap: make(map[reflect.Type]*handlerEntry), + } +} + +func (a *HumanReadablePrinter) With(fns ...func(PrintHandler)) *HumanReadablePrinter { + for _, fn := range fns { + fn(a) + } + return a +} + +// GenerateTable returns a table for the provided object, using the printer registered for that type. It returns +// a table that includes all of the information requested by options, but will not remove rows or columns. The +// caller is responsible for applying rules related to filtering rows or columns. +func (h *HumanReadablePrinter) GenerateTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) { + t := reflect.TypeOf(obj) + handler, ok := h.handlerMap[t] + if !ok { + return nil, fmt.Errorf("no table handler registered for this type %v", t) + } + + args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} + results := handler.printFunc.Call(args) + if !results[1].IsNil() { + return nil, results[1].Interface().(error) + } + + var columns []metav1beta1.TableColumnDefinition + if !options.NoHeaders { + columns = handler.columnDefinitions + if !options.Wide { + columns = make([]metav1beta1.TableColumnDefinition, 0, len(handler.columnDefinitions)) + for i := range handler.columnDefinitions { + if handler.columnDefinitions[i].Priority != 0 { + continue + } + columns = append(columns, handler.columnDefinitions[i]) + } + } + } + table := &metav1beta1.Table{ + ListMeta: metav1.ListMeta{ + ResourceVersion: "", + }, + ColumnDefinitions: columns, + Rows: results[0].Interface().([]metav1beta1.TableRow), + } + if m, err := meta.ListAccessor(obj); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + table.Continue = m.GetContinue() + } else { + if m, err := meta.CommonAccessor(obj); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + } + } + if err := decorateTable(table, options); err != nil { + return nil, err + } + return table, nil +} + +// TableHandler adds a print handler with a given set of columns to HumanReadablePrinter instance. +// See ValidateRowPrintHandlerFunc for required method signature. +func (h *HumanReadablePrinter) TableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { + printFuncValue := reflect.ValueOf(printFunc) + if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) + return err + } + entry := &handlerEntry{ + columnDefinitions: columnDefinitions, + printFunc: printFuncValue, + } + + objType := printFuncValue.Type().In(0) + if _, ok := h.handlerMap[objType]; ok { + err := fmt.Errorf("registered duplicate printer for %v", objType) + utilruntime.HandleError(err) + return err + } + h.handlerMap[objType] = entry + return nil +} + +// DefaultTableHandler registers a set of columns and a print func that is given a chance to process +// any object without an explicit handler. Only the most recently set print handler is used. +// See ValidateRowPrintHandlerFunc for required method signature. +func (h *HumanReadablePrinter) DefaultTableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { + printFuncValue := reflect.ValueOf(printFunc) + if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) + return err + } + entry := &handlerEntry{ + columnDefinitions: columnDefinitions, + printFunc: printFuncValue, + } + + h.defaultHandler = entry + return nil +} + +// ValidateRowPrintHandlerFunc validates print handler signature. +// printFunc is the function that will be called to print an object. +// It must be of the following type: +// func printFunc(object ObjectType, options PrintOptions) ([]metav1beta1.TableRow, error) +// where ObjectType is the type of the object that will be printed, and the first +// return value is an array of rows, with each row containing a number of cells that +// match the number of columns defined for that printer function. +func ValidateRowPrintHandlerFunc(printFunc reflect.Value) error { + if printFunc.Kind() != reflect.Func { + return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) + } + funcType := printFunc.Type() + if funcType.NumIn() != 2 || funcType.NumOut() != 2 { + return fmt.Errorf("invalid print handler." + + "Must accept 2 parameters and return 2 value.") + } + if funcType.In(1) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || + funcType.Out(0) != reflect.TypeOf((*[]metav1beta1.TableRow)(nil)).Elem() || + funcType.Out(1) != reflect.TypeOf((*error)(nil)).Elem() { + return fmt.Errorf("invalid print handler. The expected signature is: "+ + "func handler(obj %v, options PrintOptions) ([]metav1beta1.TableRow, error)", funcType.In(0)) + } + return nil +} diff --git a/pkg/printers/humanreadable.go b/pkg/printers/tableprinter.go similarity index 62% rename from pkg/printers/humanreadable.go rename to pkg/printers/tableprinter.go index eb2c2f784ba..20c3e542a57 100644 --- a/pkg/printers/humanreadable.go +++ b/pkg/printers/tableprinter.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,47 +23,20 @@ import ( "strings" "github.com/liggitt/tabwriter" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) -type TableGenerator interface { - GenerateTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) -} - -type PrintHandler interface { - TableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error - DefaultTableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error -} +var _ ResourcePrinter = &HumanReadablePrinter{} var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. -type handlerEntry struct { - columnDefinitions []metav1beta1.TableColumnDefinition - printFunc reflect.Value - args []reflect.Value -} - -// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide -// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers -// will only be printed if the object type changes. This makes it useful for printing items -// received from watches. -type HumanReadablePrinter struct { - handlerMap map[reflect.Type]*handlerEntry - defaultHandler *handlerEntry - options PrintOptions - lastType interface{} - lastColumns []metav1beta1.TableColumnDefinition -} - -var _ PrintHandler = &HumanReadablePrinter{} - -// NewHumanReadablePrinter creates a HumanReadablePrinter. +// NewHumanReadablePrinter creates a printer suitable for calling PrintObj(). +// TODO(seans3): Change return type to ResourcePrinter interface once we no longer need +// to constuct the "handlerMap". func NewHumanReadablePrinter(options PrintOptions) *HumanReadablePrinter { printer := &HumanReadablePrinter{ handlerMap: make(map[reflect.Type]*handlerEntry), @@ -72,93 +45,6 @@ func NewHumanReadablePrinter(options PrintOptions) *HumanReadablePrinter { return printer } -// NewTableGenerator creates a HumanReadablePrinter suitable for calling GenerateTable(). -func NewTableGenerator() *HumanReadablePrinter { - return &HumanReadablePrinter{ - handlerMap: make(map[reflect.Type]*handlerEntry), - } -} - -func (a *HumanReadablePrinter) With(fns ...func(PrintHandler)) *HumanReadablePrinter { - for _, fn := range fns { - fn(a) - } - return a -} - -// TableHandler adds a print handler with a given set of columns to HumanReadablePrinter instance. -// See ValidateRowPrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) TableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { - printFuncValue := reflect.ValueOf(printFunc) - if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) - return err - } - entry := &handlerEntry{ - columnDefinitions: columnDefinitions, - printFunc: printFuncValue, - } - - objType := printFuncValue.Type().In(0) - if _, ok := h.handlerMap[objType]; ok { - err := fmt.Errorf("registered duplicate printer for %v", objType) - utilruntime.HandleError(err) - return err - } - h.handlerMap[objType] = entry - return nil -} - -// DefaultTableHandler registers a set of columns and a print func that is given a chance to process -// any object without an explicit handler. Only the most recently set print handler is used. -// See ValidateRowPrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) DefaultTableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { - printFuncValue := reflect.ValueOf(printFunc) - if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) - return err - } - entry := &handlerEntry{ - columnDefinitions: columnDefinitions, - printFunc: printFuncValue, - } - - h.defaultHandler = entry - return nil -} - -// ValidateRowPrintHandlerFunc validates print handler signature. -// printFunc is the function that will be called to print an object. -// It must be of the following type: -// func printFunc(object ObjectType, options PrintOptions) ([]metav1beta1.TableRow, error) -// where ObjectType is the type of the object that will be printed, and the first -// return value is an array of rows, with each row containing a number of cells that -// match the number of columns defined for that printer function. -func ValidateRowPrintHandlerFunc(printFunc reflect.Value) error { - if printFunc.Kind() != reflect.Func { - return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) - } - funcType := printFunc.Type() - if funcType.NumIn() != 2 || funcType.NumOut() != 2 { - return fmt.Errorf("invalid print handler." + - "Must accept 2 parameters and return 2 value.") - } - if funcType.In(1) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || - funcType.Out(0) != reflect.TypeOf((*[]metav1beta1.TableRow)(nil)).Elem() || - funcType.Out(1) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("invalid print handler. The expected signature is: "+ - "func handler(obj %v, options PrintOptions) ([]metav1beta1.TableRow, error)", funcType.In(0)) - } - return nil -} - -func printHeader(columnNames []string, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { - return err - } - return nil -} - // PrintObj prints the obj in a human-friendly format according to the type of the obj. func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { w, found := output.(*tabwriter.Writer) @@ -168,6 +54,7 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er defer w.Flush() } + // Case 1: Parameter "obj" is a table from server; print it. // display tables following the rules of options if table, ok := obj.(*metav1beta1.Table); ok { // Do not print headers if this table has no column definitions, or they are the same as the last ones we printed @@ -186,12 +73,14 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er h.lastColumns = table.ColumnDefinitions } - if err := DecorateTable(table, localOptions); err != nil { + if err := decorateTable(table, localOptions); err != nil { return err } return PrintTable(table, output, localOptions) } + // Case 2: Parameter "obj" is not a table; search for a handler to print it. + // TODO(seans3): Remove this case in 1.16, since table should be returned from server-side printing. // print with a registered handler t := reflect.TypeOf(obj) if handler := h.handlerMap[t]; handler != nil { @@ -208,6 +97,7 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er return nil } + // Case 3: Could not find print handler for "obj"; use the default print handler. // print with the default handler if set, and use the columns from the last time if h.defaultHandler != nil { includeHeaders := h.lastType != h.defaultHandler && !h.options.NoHeaders @@ -229,7 +119,7 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er // PrintTable prints a table to the provided output respecting the filtering rules for options // for wide columns and filtered rows. It filters out rows that are Completed. You should call -// DecorateTable if you receive a table from a remote server before calling PrintTable. +// decorateTable if you receive a table from a remote server before calling PrintTable. func PrintTable(table *metav1beta1.Table, output io.Writer, options PrintOptions) error { if !options.NoHeaders { // avoid printing headers if we have no rows to display @@ -277,11 +167,11 @@ func PrintTable(table *metav1beta1.Table, output io.Writer, options PrintOptions return nil } -// DecorateTable takes a table and attempts to add label columns and the +// decorateTable takes a table and attempts to add label columns and the // namespace column. It will fill empty columns with nil (if the object // does not expose metadata). It returns an error if the table cannot // be decorated. -func DecorateTable(table *metav1beta1.Table, options PrintOptions) error { +func decorateTable(table *metav1beta1.Table, options PrintOptions) error { width := len(table.ColumnDefinitions) + len(options.ColumnLabels) if options.WithNamespace { width++ @@ -372,58 +262,6 @@ func DecorateTable(table *metav1beta1.Table, options PrintOptions) error { return nil } -// GenerateTable returns a table for the provided object, using the printer registered for that type. It returns -// a table that includes all of the information requested by options, but will not remove rows or columns. The -// caller is responsible for applying rules related to filtering rows or columns. -func (h *HumanReadablePrinter) GenerateTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) { - t := reflect.TypeOf(obj) - handler, ok := h.handlerMap[t] - if !ok { - return nil, fmt.Errorf("no table handler registered for this type %v", t) - } - - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} - results := handler.printFunc.Call(args) - if !results[1].IsNil() { - return nil, results[1].Interface().(error) - } - - var columns []metav1beta1.TableColumnDefinition - if !options.NoHeaders { - columns = handler.columnDefinitions - if !options.Wide { - columns = make([]metav1beta1.TableColumnDefinition, 0, len(handler.columnDefinitions)) - for i := range handler.columnDefinitions { - if handler.columnDefinitions[i].Priority != 0 { - continue - } - columns = append(columns, handler.columnDefinitions[i]) - } - } - } - table := &metav1beta1.Table{ - ListMeta: metav1.ListMeta{ - ResourceVersion: "", - }, - ColumnDefinitions: columns, - Rows: results[0].Interface().([]metav1beta1.TableRow), - } - if m, err := meta.ListAccessor(obj); err == nil { - table.ResourceVersion = m.GetResourceVersion() - table.SelfLink = m.GetSelfLink() - table.Continue = m.GetContinue() - } else { - if m, err := meta.CommonAccessor(obj); err == nil { - table.ResourceVersion = m.GetResourceVersion() - table.SelfLink = m.GetSelfLink() - } - } - if err := DecorateTable(table, options); err != nil { - return nil, err - } - return table, nil -} - // printRowsForHandlerEntry prints the incremental table output (headers if the current type is // different from lastType) including all the rows in the object. It returns the current type // or an error, if any. @@ -461,6 +299,13 @@ func printRowsForHandlerEntry(output io.Writer, handler *handlerEntry, obj runti return results[1].Interface().(error) } +func printHeader(columnNames []string, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { + return err + } + return nil +} + // printRows writes the provided rows to output. func printRows(output io.Writer, rows []metav1beta1.TableRow, options PrintOptions) { for _, row := range rows { diff --git a/pkg/printers/humanreadable_test.go b/pkg/printers/tableprinter_test.go similarity index 92% rename from pkg/printers/humanreadable_test.go rename to pkg/printers/tableprinter_test.go index 9d3bc1bdd8d..84c5b6586fc 100644 --- a/pkg/printers/humanreadable_test.go +++ b/pkg/printers/tableprinter_test.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,10 +22,10 @@ import ( "reflect" "testing" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" ) var testNamespaceColumnDefinitions = []metav1beta1.TableColumnDefinition{ @@ -34,7 +34,7 @@ var testNamespaceColumnDefinitions = []metav1beta1.TableColumnDefinition{ {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } -func testPrintNamespace(obj *api.Namespace, options PrintOptions) ([]metav1beta1.TableRow, error) { +func testPrintNamespace(obj *corev1.Namespace, options PrintOptions) ([]metav1beta1.TableRow, error) { if options.WithNamespace { return nil, fmt.Errorf("namespace is not namespaced") } @@ -64,7 +64,7 @@ func TestPrintRowsForHandlerEntry(t *testing.T) { printFunc: printFunc, }, opt: PrintOptions{}, - obj: &api.Namespace{ + obj: &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, }, includeHeader: false, @@ -77,7 +77,7 @@ func TestPrintRowsForHandlerEntry(t *testing.T) { printFunc: printFunc, }, opt: PrintOptions{}, - obj: &api.Namespace{ + obj: &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, }, includeHeader: true, @@ -90,7 +90,7 @@ func TestPrintRowsForHandlerEntry(t *testing.T) { printFunc: printFunc, }, opt: PrintOptions{}, - obj: &api.Namespace{ + obj: &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, }, includeHeader: true, @@ -105,7 +105,7 @@ func TestPrintRowsForHandlerEntry(t *testing.T) { opt: PrintOptions{ WithNamespace: true, }, - obj: &api.Namespace{ + obj: &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, }, includeHeader: true, From 04a7b50a020906e8e5cd32dfce378125606ce97d Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Thu, 18 Apr 2019 14:00:42 -0700 Subject: [PATCH 071/209] Use read lock in ready#check --- staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index cb1e11437aa..19a2841fc9f 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -1245,7 +1245,7 @@ type ready struct { } func newReady() *ready { - return &ready{c: sync.NewCond(&sync.Mutex{})} + return &ready{c: sync.NewCond(&sync.RWMutex{})} } func (r *ready) wait() { @@ -1259,8 +1259,9 @@ func (r *ready) wait() { // TODO: Make check() function more sophisticated, in particular // allow it to behave as "waitWithTimeout". func (r *ready) check() bool { - r.c.L.Lock() - defer r.c.L.Unlock() + rwMutex := r.c.L.(*sync.RWMutex) + rwMutex.RLock() + defer rwMutex.RUnlock() return r.ok } From d564d2e74d429e509ee1e81b71b7aa6d6720cd4b Mon Sep 17 00:00:00 2001 From: Vladimir Vivien Date: Wed, 3 Apr 2019 10:04:18 -0400 Subject: [PATCH 072/209] CSI - Prevents unsupported device mount with CanMountDevice(spec) check --- pkg/volume/csi/BUILD | 1 + pkg/volume/csi/csi_plugin.go | 12 +- pkg/volume/csi/csi_plugin_test.go | 112 ++++++++ pkg/volume/csi/csi_test.go | 425 ++++++++++++++++++++++++++++++ 4 files changed, 549 insertions(+), 1 deletion(-) create mode 100644 pkg/volume/csi/csi_test.go diff --git a/pkg/volume/csi/BUILD b/pkg/volume/csi/BUILD index 825ac5d5ef2..8bcccd68828 100644 --- a/pkg/volume/csi/BUILD +++ b/pkg/volume/csi/BUILD @@ -50,6 +50,7 @@ go_test( "csi_drivers_store_test.go", "csi_mounter_test.go", "csi_plugin_test.go", + "csi_test.go", "csi_util_test.go", "expander_test.go", ], diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 68b50eccd3b..ab399c7e10f 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -607,8 +607,18 @@ func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) { return !skipAttach, nil } -// TODO (#75352) add proper logic to determine device moutability by inspecting the spec. +// CanDeviceMount returns true if the spec supports device mount func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) { + driverMode, err := p.getDriverMode(spec) + if err != nil { + return false, err + } + + if driverMode == ephemeralDriverMode { + klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name())) + return false, nil + } + return true, nil } diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index fd9412a43e1..6d4354973b6 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -958,6 +958,118 @@ func TestPluginFindAttachablePlugin(t *testing.T) { } } +func TestPluginCanDeviceMount(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)() + tests := []struct { + name string + driverName string + spec *volume.Spec + canDeviceMount bool + shouldFail bool + }{ + { + name: "non device mountable inline", + driverName: "inline-driver", + spec: volume.NewSpecFromVolume(makeTestVol("test-vol", "inline-driver")), + canDeviceMount: false, + }, + { + name: "device mountable PV", + driverName: "device-mountable-pv", + spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-vol", 20, "device-mountable-pv", testVol), true), + canDeviceMount: true, + }, + { + name: "incomplete spec", + driverName: "device-unmountable", + spec: &volume.Spec{ReadOnly: true}, + canDeviceMount: false, + shouldFail: true, + }, + { + name: "missing spec", + driverName: "device-unmountable", + canDeviceMount: false, + shouldFail: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + + pluginCanDeviceMount, err := plug.CanDeviceMount(test.spec) + if err != nil && !test.shouldFail { + t.Fatalf("unexpected error in plug.CanDeviceMount: %s", err) + } + if pluginCanDeviceMount != test.canDeviceMount { + t.Fatalf("expecting plugin.CanAttach %t got %t", test.canDeviceMount, pluginCanDeviceMount) + } + }) + } +} + +func TestPluginFindDeviceMountablePluginBySpec(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)() + tests := []struct { + name string + driverName string + spec *volume.Spec + canDeviceMount bool + shouldFail bool + }{ + { + name: "non device mountable inline", + driverName: "inline-driver", + spec: volume.NewSpecFromVolume(makeTestVol("test-vol", "inline-driver")), + canDeviceMount: false, + }, + { + name: "device mountable PV", + driverName: "device-mountable-pv", + spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-vol", 20, "device-mountable-pv", testVol), true), + canDeviceMount: true, + }, + { + name: "incomplete spec", + driverName: "device-unmountable", + spec: &volume.Spec{ReadOnly: true}, + canDeviceMount: false, + shouldFail: true, + }, + { + name: "missing spec", + driverName: "device-unmountable", + canDeviceMount: false, + shouldFail: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("csi-test") + if err != nil { + t.Fatalf("can't create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + client := fakeclient.NewSimpleClientset() + host := volumetest.NewFakeVolumeHost(tmpDir, client, nil) + plugMgr := &volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host) + + plug, err := plugMgr.FindDeviceMountablePluginBySpec(test.spec) + if err != nil && !test.shouldFail { + t.Fatalf("unexpected error in plugMgr.FindDeviceMountablePluginBySpec: %s", err) + } + if (plug != nil) != test.canDeviceMount { + t.Fatalf("expecting deviceMountablePlugin, but got nil") + } + }) + } +} + func TestPluginNewBlockMapper(t *testing.T) { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)() diff --git a/pkg/volume/csi/csi_test.go b/pkg/volume/csi/csi_test.go new file mode 100644 index 00000000000..33e636ee081 --- /dev/null +++ b/pkg/volume/csi/csi_test.go @@ -0,0 +1,425 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "fmt" + "math/rand" + "os" + "path" + "path/filepath" + "testing" + "time" + + api "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" + "k8s.io/client-go/informers" + fakeclient "k8s.io/client-go/kubernetes/fake" + utiltesting "k8s.io/client-go/util/testing" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/volume" + volumetest "k8s.io/kubernetes/pkg/volume/testing" +) + +// TestCSI_VolumeAll runs a close approximation of volume workflow +// based on operations from the volume manager/reconciler/operation executor +func TestCSI_VolumeAll(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)() + tests := []struct { + name string + specName string + driver string + volName string + specFunc func(specName, driver, volName string) *volume.Spec + podFunc func() *api.Pod + isInline bool + shouldFail bool + }{ + { + name: "PersistentVolume", + specName: "pv2", + driver: "simple-driver", + volName: "vol2", + specFunc: func(specName, driver, volName string) *volume.Spec { + return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false) + }, + podFunc: func() *api.Pod { + podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) + return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} + }, + }, + { + name: "ephermeral inline", + driver: "inline-driver-1", + volName: "test.vol2", + specFunc: func(specName, driver, volName string) *volume.Spec { + return volume.NewSpecFromVolume(makeTestVol(specName, driver)) + }, + podFunc: func() *api.Pod { + podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) + return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} + }, + isInline: true, + }, + { + name: "missing spec", + specName: "pv2", + driver: "simple-driver", + volName: "vol2", + specFunc: func(specName, driver, volName string) *volume.Spec { + return nil + }, + podFunc: func() *api.Pod { + podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) + return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} + }, + shouldFail: true, + }, + { + name: "incompete spec", + specName: "pv2", + driver: "simple-driver", + volName: "vol2", + specFunc: func(specName, driver, volName string) *volume.Spec { + return &volume.Spec{ReadOnly: true} + }, + podFunc: func() *api.Pod { + podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) + return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} + }, + shouldFail: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("csi-test") + if err != nil { + t.Fatalf("can't create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + client := fakeclient.NewSimpleClientset() + fakeWatcher := watch.NewRaceFreeFake() + + factory := informers.NewSharedInformerFactory(client, csiResyncPeriod) + go factory.Start(wait.NeverStop) + + host := volumetest.NewFakeVolumeHostWithCSINodeName( + tmpDir, + client, + nil, + "csi-node", + factory.Storage().V1beta1().CSIDrivers().Lister(), + ) + + plugMgr := &volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host) + csiClient := setupClient(t, true) + + volSpec := test.specFunc(test.specName, test.driver, test.volName) + pod := test.podFunc() + attachName := getAttachmentName(test.volName, test.driver, string(host.GetNodeName())) + t.Log("csiTest.VolumeAll starting...") + + // *************** Attach/Mount volume resources ****************// + // attach volume + t.Log("csiTest.VolumeAll Attaching volume...") + attachPlug, err := plugMgr.FindAttachablePluginBySpec(volSpec) + if err != nil { + if !test.shouldFail { + t.Fatalf("csiTest.VolumeAll PluginManager.FindAttachablePluginBySpec failed: %v", err) + } else { + t.Log("csiTest.VolumeAll failed: ", err) + return + } + } + + if test.isInline && attachPlug != nil { + t.Fatal("csiTest.VolumeAll AttachablePlugin found with ephemeral volume") + } + if !test.isInline && attachPlug == nil { + t.Fatal("csiTest.VolumeAll AttachablePlugin not found with PV") + } + + var devicePath string + if attachPlug != nil { + t.Log("csiTest.VolumeAll attacher.Attach starting") + + var volAttacher volume.Attacher + + volAttacher, err := attachPlug.NewAttacher() + if err != nil { + t.Fatal("csiTest.VolumeAll failed to create new attacher: ", err) + } + + // creates VolumeAttachment and blocks until it is marked attached (done by external attacher) + go func(spec *volume.Spec, nodeName types.NodeName) { + attachID, err := volAttacher.Attach(spec, nodeName) + if err != nil { + t.Fatalf("csiTest.VolumeAll attacher.Attach failed: %s", err) + } + t.Logf("csiTest.VolumeAll got attachID %s", attachID) + + }(volSpec, host.GetNodeName()) + + // Simulates external-attacher and marks VolumeAttachment.Status.Attached = true + markVolumeAttached(t, host.GetKubeClient(), fakeWatcher, attachName, storage.VolumeAttachmentStatus{Attached: true}) + + devicePath, err = volAttacher.WaitForAttach(volSpec, "", pod, 500*time.Millisecond) + if err != nil { + t.Fatal("csiTest.VolumeAll attacher.WaitForAttach failed:", err) + } + + if devicePath != attachName { + t.Fatalf("csiTest.VolumeAll attacher.WaitForAttach got unexpected value %s", devicePath) + } + + t.Log("csiTest.VolumeAll attacher.WaitForAttach succeeded OK, attachment ID:", devicePath) + + } else { + t.Log("csiTest.VolumeAll volume attacher not found, skipping attachment") + } + + // Mount Device + t.Log("csiTest.VolumeAll Mouting device...") + devicePlug, err := plugMgr.FindDeviceMountablePluginBySpec(volSpec) + if err != nil { + t.Fatalf("csiTest.VolumeAll PluginManager.FindDeviceMountablePluginBySpec failed: %v", err) + } + + if test.isInline && devicePlug != nil { + t.Fatal("csiTest.VolumeAll DeviceMountablePlugin found with ephemeral volume") + } + if !test.isInline && devicePlug == nil { + t.Fatal("csiTest.VolumeAll DeviceMountablePlugin not found with PV") + } + + var devMounter volume.DeviceMounter + if devicePlug != nil { + devMounter, err = devicePlug.NewDeviceMounter() + if err != nil { + t.Fatal("csiTest.VolumeAll failed to create new device mounter: ", err) + } + } + + if devMounter != nil { + csiDevMounter := devMounter.(*csiAttacher) + csiDevMounter.csiClient = csiClient + devMountPath, err := csiDevMounter.GetDeviceMountPath(volSpec) + if err != nil { + t.Fatalf("csiTest.VolumeAll deviceMounter.GetdeviceMountPath failed %s", err) + } + if err := csiDevMounter.MountDevice(volSpec, devicePath, devMountPath); err != nil { + t.Fatalf("csiTest.VolumeAll deviceMounter.MountDevice failed: %v", err) + } + t.Log("csiTest.VolumeAll device mounted at path:", devMountPath) + } else { + t.Log("csiTest.VolumeAll DeviceMountablePlugin not found, skipping deviceMounter.MountDevice") + } + + // mount volume + t.Log("csiTest.VolumeAll Mouting volume...") + volPlug, err := plugMgr.FindPluginBySpec(volSpec) + if err != nil || volPlug == nil { + t.Fatalf("csiTest.VolumeAll PluginMgr.FindPluginBySpec failed: %v", err) + } + + if volPlug == nil { + t.Fatalf("csiTest.VolumeAll volumePlugin is nil") + } + + if !volPlug.CanSupport(volSpec) { + t.Fatal("csiTest.VolumeAll volumePlugin.CanSupport returned false") + } + + mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{}) + if err != nil || mounter == nil { + t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err) + } + + if err := mounter.CanMount(); err != nil { + t.Fatal("csiTest.VolumeAll mounter.CanMount failed, skipping mount") + } + + var fsGroup *int64 + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.FSGroup != nil { + fsGroup = pod.Spec.SecurityContext.FSGroup + } + + csiMounter := mounter.(*csiMountMgr) + csiMounter.csiClient = csiClient + if err := csiMounter.SetUp(fsGroup); err != nil { + t.Fatalf("csiTest.VolumeAll mounter.Setup(fsGroup) failed: %s", err) + } + t.Log("csiTest.VolumeAll mounter.Setup(fsGroup) done OK") + + dataFile := filepath.Join(path.Dir(mounter.GetPath()), volDataFileName) + if _, err := os.Stat(dataFile); err != nil { + t.Fatalf("csiTest.VolumeAll meatadata JSON file not found: %s", dataFile) + } + t.Log("csiTest.VolumeAll JSON datafile generated OK:", dataFile) + + // ******** Volume Reconstruction ************* // + volPath := path.Dir(csiMounter.GetPath()) + t.Log("csiTest.VolumeAll entering plugin.ConstructVolumeSpec for path", volPath) + spec, err := volPlug.ConstructVolumeSpec(test.volName, volPath) + if err != nil { + t.Fatalf("csiTest.VolumeAll plugin.ConstructVolumeSpec failed: %s", err) + } else { + if spec == nil { + t.Fatalf("csiTest.VolumeAll plugin.ConstructVolumeSpec returned nil spec") + } else { + volSpec = spec + + if test.isInline { + if volSpec.Volume == nil || volSpec.Volume.CSI == nil { + t.Fatal("csiTest.VolumeAll reconstruction of ephemeral volumeSpec missing CSI Volume source") + } + if volSpec.Volume.CSI.Driver == "" { + t.Fatal("csiTest.VolumeAll reconstruction ephemral volume missing driver name") + } + } else { + if volSpec.PersistentVolume == nil || volSpec.PersistentVolume.Spec.CSI == nil { + t.Fatal("csiTest.VolumeAll reconstruction of volumeSpec missing CSI PersistentVolume source") + } + csi := volSpec.PersistentVolume.Spec.CSI + if csi.Driver == "" { + t.Fatal("csiTest.VolumeAll reconstruction of PV missing driver name") + } + if csi.VolumeHandle == "" { + t.Fatal("csiTest.VolumeAll reconstruction of PV missing volume handle") + } + } + } + } + + // ************* Teardown everything **************** // + t.Log("csiTest.VolumeAll Tearing down...") + // unmount volume + t.Log("csiTest.VolumeAll Unmouting volume...") + volPlug, err = plugMgr.FindPluginBySpec(volSpec) + if err != nil || volPlug == nil { + t.Fatalf("csiTest.VolumeAll PluginMgr.FindPluginBySpec failed: %v", err) + } + if volPlug == nil { + t.Fatalf("csiTest.VolumeAll volumePlugin is nil") + } + mounter, err = volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{}) + if err != nil || mounter == nil { + t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err) + } + + unmounter, err := volPlug.NewUnmounter(test.specName, pod.GetUID()) + if err != nil { + t.Fatal("csiTest.VolumeAll volumePlugin.NewUnmounter failed:", err) + } + csiUnmounter := unmounter.(*csiMountMgr) + csiUnmounter.csiClient = csiClient + + if err := csiUnmounter.TearDownAt(mounter.GetPath()); err != nil { + t.Fatal("csiTest.VolumeAll unmounter.TearDownAt failed:", err) + } + t.Log("csiTest.VolumeAll unmounter.TearDownAt done OK for dir:", mounter.GetPath()) + + // unmount device + t.Log("csiTest.VolumeAll Unmouting device...") + devicePlug, err = plugMgr.FindDeviceMountablePluginBySpec(volSpec) + if err != nil { + t.Fatalf("csiTest.VolumeAll failed to create mountable device plugin: %s", err) + } + + if test.isInline && devicePlug != nil { + t.Fatal("csiTest.VolumeAll DeviceMountablePlugin found with ephemeral volume") + } + if !test.isInline && devicePlug == nil { + t.Fatal("csiTest.VolumeAll DeviceMountablePlugin not found with PV") + } + + var devUnmounter volume.DeviceUnmounter + if devicePlug != nil { + t.Log("csiTest.VolumeAll found DeviceMountablePlugin, entering device unmouting ...") + devMounter, err = devicePlug.NewDeviceMounter() + if err != nil { + t.Fatal("csiTest.VolumeAll failed to create new device mounter: ", err) + } + devUnmounter, err = devicePlug.NewDeviceUnmounter() + if err != nil { + t.Fatal("csiTest.VolumeAll failed to create new device unmounter: ", err) + } + + if devMounter != nil && devUnmounter != nil { + csiDevMounter := devMounter.(*csiAttacher) + csiDevUnmounter := devUnmounter.(*csiAttacher) + csiDevUnmounter.csiClient = csiClient + + devMountPath, err := csiDevMounter.GetDeviceMountPath(volSpec) + if err != nil { + t.Fatalf("csiTest.VolumeAll deviceMounter.GetdeviceMountPath failed %s", err) + } + if err := csiDevUnmounter.UnmountDevice(devMountPath); err != nil { + t.Fatalf("csiTest.VolumeAll deviceMounter.UnmountDevice failed: %s", err) + } + t.Log("csiTest.VolumeAll deviceUmounter.UnmountDevice done OK for path", devMountPath) + } + } else { + t.Log("csiTest.VolumeAll DeviceMountablePluginBySpec did not find a plugin, skipping unmounting.") + } + + // detach volume + t.Log("csiTest.VolumeAll Detaching volume...") + attachPlug, err = plugMgr.FindAttachablePluginBySpec(volSpec) + if err != nil { + t.Fatalf("csiTest.VolumeAll PluginManager.FindAttachablePluginBySpec failed: %v", err) + } + + if test.isInline && attachPlug != nil { + t.Fatal("csiTest.VolumeAll AttachablePlugin found with ephemeral volume") + } + if !test.isInline && attachPlug == nil { + t.Fatal("csiTest.VolumeAll AttachablePlugin not found with PV") + } + + if attachPlug != nil { + volDetacher, err := attachPlug.NewDetacher() + if err != nil { + t.Fatal("csiTest.VolumeAll failed to create new detacher: ", err) + } + + t.Log("csiTest.VolumeAll preparing detacher.Detach...") + volName, err := volPlug.GetVolumeName(volSpec) + if err != nil { + t.Fatal("csiTest.VolumeAll volumePlugin.GetVolumeName failed:", err) + } + csiDetacher := volDetacher.(*csiAttacher) + csiDetacher.csiClient = csiClient + if err := csiDetacher.Detach(volName, host.GetNodeName()); err != nil { + t.Fatal("csiTest.VolumeAll detacher.Detach failed:", err) + } + t.Log("csiTest.VolumeAll detacher.Detach succeeded for volume", volName) + + } else { + t.Log("csiTest.VolumeAll attachable plugin not found for plugin.Detach call, skipping") + } + }) + } +} From d278d59328f6b105fc0532fb0ecef13deb43f6ab Mon Sep 17 00:00:00 2001 From: Han Kang Date: Thu, 18 Apr 2019 17:01:13 -0700 Subject: [PATCH 073/209] add owners file to util/metrics and auto-labeling --- pkg/util/metrics/OWNERS | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 pkg/util/metrics/OWNERS diff --git a/pkg/util/metrics/OWNERS b/pkg/util/metrics/OWNERS new file mode 100644 index 00000000000..44a1c2003fa --- /dev/null +++ b/pkg/util/metrics/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- piosz +- brancz +reviewers: +- sig-instrumentation-pr-reviews +labels: +- sig/instrumentation From 4543e68ae55be17448518ef6ac3cac16524decbf Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Thu, 18 Apr 2019 18:53:13 -0700 Subject: [PATCH 074/209] Fix shellcheck in hack/lib/golang.sh --- hack/.shellcheck_failures | 1 - hack/lib/golang.sh | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index ea387368541..a4aa73da9e5 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -27,7 +27,6 @@ ./hack/cherry_pick_pull.sh ./hack/ginkgo-e2e.sh ./hack/grab-profiles.sh -./hack/lib/golang.sh ./hack/lib/init.sh ./hack/lib/swagger.sh ./hack/lib/test.sh diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 47133ad74e3..9806b5b461f 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -372,7 +372,7 @@ EOF # Ensure the go tool exists and is a viable version. kube::golang::verify_go_version() { - if [[ -z "$(which go)" ]]; then + if [[ -z "$(command -v go)" ]]; then kube::log::usage_from_stdin < Date: Tue, 9 Apr 2019 20:52:14 +0800 Subject: [PATCH 075/209] Rename some varible and clean up codes in scheduler_binder.go --- .../persistentvolume/scheduler_binder.go | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder.go b/pkg/controller/volume/persistentvolume/scheduler_binder.go index f72f9171d93..4ee2b93320a 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder.go @@ -160,26 +160,26 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume }() var ( - matchedClaims []*bindingInfo + matchedBindings []*bindingInfo provisionedClaims []*v1.PersistentVolumeClaim ) defer func() { // We recreate bindings for each new schedule loop. - if len(matchedClaims) == 0 && len(provisionedClaims) == 0 { + if len(matchedBindings) == 0 && len(provisionedClaims) == 0 { // Clear cache if no claims to bind or provision for this node. b.podBindingCache.ClearBindings(pod, node.Name) return } // Although we do not distinguish nil from empty in this function, for // easier testing, we normalize empty to nil. - if len(matchedClaims) == 0 { - matchedClaims = nil + if len(matchedBindings) == 0 { + matchedBindings = nil } if len(provisionedClaims) == 0 { provisionedClaims = nil } // Mark cache with all matched and provisioned claims for this node - b.podBindingCache.UpdateBindings(pod, node.Name, matchedClaims, provisionedClaims) + b.podBindingCache.UpdateBindings(pod, node.Name, matchedBindings, provisionedClaims) }() // The pod's volumes need to be processed in one call to avoid the race condition where @@ -225,7 +225,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume // Find matching volumes if len(claimsToFindMatching) > 0 { var unboundClaims []*v1.PersistentVolumeClaim - unboundVolumesSatisfied, matchedClaims, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) + unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) if err != nil { return false, false, err } @@ -598,10 +598,10 @@ func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { // getPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) // and unbound with immediate binding (including prebound) -func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaims []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { +func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { boundClaims = []*v1.PersistentVolumeClaim{} unboundClaimsImmediate = []*v1.PersistentVolumeClaim{} - unboundClaims = []*v1.PersistentVolumeClaim{} + unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{} for _, vol := range pod.Spec.Volumes { volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol) @@ -621,7 +621,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV // Prebound PVCs are treated as unbound immediate binding if delayBindingMode && pvc.Spec.VolumeName == "" { // Scheduler path - unboundClaims = append(unboundClaims, pvc) + unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc) } else { // !delayBindingMode || pvc.Spec.VolumeName != "" // Immediate binding should have already been bound @@ -629,7 +629,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV } } } - return boundClaims, unboundClaims, unboundClaimsImmediate, nil + return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil } func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) { @@ -654,7 +654,7 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node // findMatchingVolumes tries to find matching volumes for given claims, // and return unbound claims for further provision. -func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, matchedClaims []*bindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { +func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*bindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { podName := getPodName(pod) // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) @@ -662,7 +662,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi chosenPVs := map[string]*v1.PersistentVolume{} foundMatches = true - matchedClaims = []*bindingInfo{} for _, pvc := range claimsToBind { // Get storage class name from each PVC @@ -688,7 +687,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi // matching PV needs to be excluded so we don't select it again chosenPVs[pv.Name] = pv - matchedClaims = append(matchedClaims, &bindingInfo{pv: pv, pvc: pvc}) + bindings = append(bindings, &bindingInfo{pv: pv, pvc: pvc}) klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName) } From 9f2fa7d2691b1cd7c69fbd6b7e28a83320a57be4 Mon Sep 17 00:00:00 2001 From: Jiatong Wang Date: Wed, 17 Apr 2019 23:30:39 -0700 Subject: [PATCH 076/209] Remove unused function and clean up redundant code --- test/e2e/framework/service_util.go | 46 +++++++++--------------------- 1 file changed, 13 insertions(+), 33 deletions(-) diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 60a10853ef3..5570243a73e 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -507,7 +507,7 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.S for i := 0; i < 3; i++ { service, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) + return nil, fmt.Errorf("failed to get Service %q: %v", name, err) } update(service) service, err = j.Client.CoreV1().Services(namespace).Update(service) @@ -515,10 +515,10 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.S return service, nil } if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { - return nil, fmt.Errorf("Failed to update Service %q: %v", name, err) + return nil, fmt.Errorf("failed to update Service %q: %v", name, err) } } - return nil, fmt.Errorf("Too many retries updating Service %q", name) + return nil, fmt.Errorf("too many retries updating Service %q", name) } // UpdateServiceOrFail fetches a service, calls the update function on it, and @@ -573,10 +573,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool { - if len(svc.Status.LoadBalancer.Ingress) > 0 { - return true - } - return false + return len(svc.Status.LoadBalancer.Ingress) > 0 }) return service } @@ -591,10 +588,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) service := j.waitForConditionOrFail(namespace, name, timeout, "have no load balancer", func(svc *v1.Service) bool { - if len(svc.Status.LoadBalancer.Ingress) == 0 { - return true - } - return false + return len(svc.Status.LoadBalancer.Ingress) == 0 }) return service } @@ -757,7 +751,6 @@ func (j *ServiceTestJig) Scale(namespace string, replicas int) { if err := j.waitForPodsReady(namespace, pods); err != nil { Failf("Failed waiting for pods to be running: %v", err) } - return } func (j *ServiceTestJig) waitForPdbReady(namespace string) error { @@ -772,7 +765,7 @@ func (j *ServiceTestJig) waitForPdbReady(namespace string) error { } } - return fmt.Errorf("Timeout waiting for PDB %q to be ready", j.Name) + return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name) } func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { @@ -800,13 +793,13 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s } Logf("Found %d/%d pods - will retry", len(found), replicas) } - return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas) + return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas) } func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { timeout := 2 * time.Minute if !CheckPodsRunningReady(j.Client, namespace, pods, timeout) { - return fmt.Errorf("Timeout waiting for %d pods to be ready", len(pods)) + return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods)) } return nil } @@ -1010,7 +1003,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err url := fmt.Sprintf("http://%s%s", ipPort, request) if ip == "" || port == 0 { Failf("Got empty IP for reachability check (%s)", url) - return false, fmt.Errorf("Invalid input ip or port") + return false, fmt.Errorf("invalid input ip or port") } Logf("Testing HTTP health check on %v", url) resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second) @@ -1031,7 +1024,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err if resp.StatusCode == 200 { return true, nil } - return false, fmt.Errorf("Unexpected HTTP response code %s from health check responder at %s", resp.Status, url) + return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url) } func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error { @@ -1106,20 +1099,6 @@ func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service { return service } -// CreateWebserverRC creates rc-backed pods with the well-known webserver -// configuration and records it for cleanup. -func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *v1.ReplicationController { - rcSpec := RcByNamePort(t.Name, replicas, t.Image, 80, v1.ProtocolTCP, t.Labels, nil) - rcAct, err := t.CreateRC(rcSpec) - if err != nil { - Failf("Failed to create rc %s: %v", rcSpec.Name, err) - } - if err := VerifyPods(t.Client, t.Namespace, t.Name, false, replicas); err != nil { - Failf("Failed to create %d pods with name %s: %v", replicas, t.Name, err) - } - return rcAct -} - // CreateRC creates a replication controller and records it for cleanup. func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc) @@ -1349,7 +1328,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string } if len(createdPods) != replicas { - return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) + return podNames, "", fmt.Errorf("incorrect number of running pods: %v", len(createdPods)) } for i := range createdPods { @@ -1362,7 +1341,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string return podNames, "", err } if service.Spec.ClusterIP == "" { - return podNames, "", fmt.Errorf("Service IP is blank for %v", name) + return podNames, "", fmt.Errorf("service IP is blank for %v", name) } serviceIP := service.Spec.ClusterIP return podNames, serviceIP, nil @@ -1481,6 +1460,7 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI return fmt.Errorf("waiting for service to be down timed out") } +// CleanupServiceResources cleans up service Type=LoadBalancer resources. func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone) } From 04ab994cc337b7ad71b2be226bfd4015867b58b2 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 18:59:24 +0800 Subject: [PATCH 077/209] fix shellcheck failures of hack/verify-description.sh and hack/verify-import-boss.sh --- hack/.shellcheck_failures | 2 -- hack/verify-description.sh | 4 ++-- hack/verify-import-boss.sh | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index ea387368541..7265be73e32 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -49,9 +49,7 @@ ./hack/verify-boilerplate.sh ./hack/verify-cli-conventions.sh ./hack/verify-codegen.sh -./hack/verify-description.sh ./hack/verify-golint.sh -./hack/verify-import-boss.sh ./hack/verify-no-vendor-cycles.sh ./hack/verify-openapi-spec.sh ./hack/verify-readonly-packages.sh diff --git a/hack/verify-description.sh b/hack/verify-description.sh index d35145f95fd..17cb9b6dc19 100755 --- a/hack/verify-description.sh +++ b/hack/verify-description.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env @@ -49,7 +49,7 @@ find_files() { } if [[ $# -eq 0 ]]; then - versioned_api_files=$(find_files | egrep "pkg/.[^/]*/((v.[^/]*)|unversioned)/types\.go") || true + versioned_api_files=$(find_files | grep -E "pkg/.[^/]*/((v.[^/]*)|unversioned)/types\.go") || true else versioned_api_files="${*}" fi diff --git a/hack/verify-import-boss.sh b/hack/verify-import-boss.sh index 30681fbbdde..7b12e514d0f 100755 --- a/hack/verify-import-boss.sh +++ b/hack/verify-import-boss.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env From 8f1d45f7502122a122a53096d241d9114eddc2f5 Mon Sep 17 00:00:00 2001 From: Erick Fejta Date: Fri, 19 Apr 2019 06:50:40 +0000 Subject: [PATCH 078/209] Add --config=remote and --config=remote-cache modes for bazel --- build/root/.bazelrc | 29 +++++++++++++++++++++++++++++ build/root/BUILD.root | 12 ++++++++++++ build/root/WORKSPACE | 14 ++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/build/root/.bazelrc b/build/root/.bazelrc index f08b09c8e08..f596e7083fb 100644 --- a/build/root/.bazelrc +++ b/build/root/.bazelrc @@ -39,3 +39,32 @@ build:cross:linux_arm --config=repo_infra_crosstool --platforms=@io_bazel_rules_ build:cross:linux_arm64 --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 --cpu=arm64 build:cross:linux_ppc64le --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_ppc64le --cpu=ppc64le build:cross:linux_s390x --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_s390x --cpu=s390x + +# --config=remote-cache enables a remote bazel cache +# Note needs a --remote_instance_name=projects/PROJ/instances/default_instance flag +build:remote-cache --remote_cache=remotebuildexecution.googleapis.com +build:remote-cache --tls_enabled=true +build:remote-cache --remote_timeout=3600 +build:remote-cache --auth_enabled=true + +# --config=remote adds remote execution to the --config=remote-cache +# Note needs a --remote_instance_name=projects/PROJ/instances/default_instance flag +build:remote --config=remote-cache +build:remote --remote_executor=remotebuildexecution.googleapis.com +build:remote --jobs=500 +build:remote --host_javabase=@rbe_default//java:jdk +build:remote --javabase=@rbe_default//java:jdk +build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 +build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 +build:remote --crosstool_top=@rbe_default//cc:toolchain +build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 +build:remote --extra_toolchains=@rbe_default//config:cc-toolchain +build:remote --extra_execution_platforms=:rbe_with_network +build:remote --host_platform=:rbe_with_network +build:remote --platforms=:rbe_with_network +build:remote --spawn_strategy=remote +build:remote --strategy=Javac=remote +build:remote --strategy=Closure=remote +build:remote --strategy=Genrule=remote +build:remote --define=EXECUTOR=remote + diff --git a/build/root/BUILD.root b/build/root/BUILD.root index 3469dfab6d0..f55ca3b6881 100644 --- a/build/root/BUILD.root +++ b/build/root/BUILD.root @@ -106,3 +106,15 @@ genrule( cmd = "grep ^STABLE_BUILD_SCM_REVISION bazel-out/stable-status.txt | awk '{print $$2}' >$@", stamp = 1, ) + +platform( + name = "rbe_with_network", + parents = ["@rbe_default//config:platform"], + remote_execution_properties = """ + properties: { + name: "dockerNetwork" + value: "standard" + } + {PARENT_REMOTE_EXECUTION_PROPERTIES} + """, +) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index b0a1501f6b7..34876f8e42f 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -1,6 +1,20 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("//build:workspace_mirror.bzl", "mirror") +http_archive( + name = "bazel_toolchains", + sha256 = "8d43844d1d4447be2a108834771d617a1ad2a107f1680190bfe44925e7bf530e", + strip_prefix = "bazel-toolchains-4c003ad45e8a2d829ffc40e3aecfb6b8577a9406", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/4c003ad45e8a2d829ffc40e3aecfb6b8577a9406.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/archive/4c003ad45e8a2d829ffc40e3aecfb6b8577a9406.tar.gz", + ], +) + +load("@bazel_toolchains//rules:rbe_repo.bzl", "rbe_autoconfig") + +rbe_autoconfig(name = "rbe_default") + http_archive( name = "bazel_skylib", sha256 = "eb5c57e4c12e68c0c20bc774bfbc60a568e800d025557bc4ea022c6479acc867", From 4d6319b9b243c471a299efb1d7be6d282c6f7b15 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 19:21:42 +0800 Subject: [PATCH 079/209] fix shellcheck failures of hack/test-update-storage-objects.sh --- hack/.shellcheck_failures | 1 - hack/test-update-storage-objects.sh | 20 ++++++++++---------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index ea387368541..9aa01359bf3 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -43,7 +43,6 @@ ./hack/make-rules/vet.sh ./hack/pin-dependency.sh ./hack/test-integration.sh -./hack/test-update-storage-objects.sh ./hack/update-vendor.sh ./hack/verify-api-groups.sh ./hack/verify-boilerplate.sh diff --git a/hack/test-update-storage-objects.sh b/hack/test-update-storage-objects.sh index 73a7adb7cf3..98cea3d12bb 100755 --- a/hack/test-update-storage-objects.sh +++ b/hack/test-update-storage-objects.sh @@ -20,7 +20,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" # The api version in which objects are currently stored in etcd. @@ -67,7 +67,7 @@ function startApiServer() { --cert-dir="${TMPDIR:-/tmp/}" \ --service-cluster-ip-range="10.0.0.0/24" \ --storage-versions="${storage_versions}" \ - --storage-media-type=${storage_media_type} 1>&2 & + --storage-media-type="${storage_media_type}" 1>&2 & APISERVER_PID=$! # url, prefix, wait, times @@ -103,7 +103,7 @@ echo "${ETCD_VERSION}" > "${ETCD_DIR}/version.txt" # source_file,resource,namespace,name,old_version,new_version tests=( -test/e2e/testing-manifests/rbd-storage-class.yaml,storageclasses,,slow,v1beta1,v1 +"test/e2e/testing-manifests/rbd-storage-class.yaml,storageclasses,,slow,v1beta1,v1" ) KUBE_OLD_API_VERSION="networking.k8s.io/v1,storage.k8s.io/v1beta1,extensions/v1beta1" @@ -123,7 +123,7 @@ startApiServer ${KUBE_OLD_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON} # Create object(s) -for test in ${tests[@]}; do +for test in "${tests[@]}"; do IFS=',' read -ra test_data <<<"$test" source_file=${test_data[0]} @@ -140,7 +140,7 @@ for test in ${tests[@]}; do namespace="${namespace}/" fi kube::log::status "Verifying ${resource}/${namespace}${name} has storage version ${old_storage_version} in etcd" - ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep ${old_storage_version} + ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep "${old_storage_version}" done killApiServer @@ -160,7 +160,7 @@ kube::log::status "Updating storage versions in etcd" ${UPDATE_ETCD_OBJECTS_SCRIPT} # Verify that the storage version was changed in etcd -for test in ${tests[@]}; do +for test in "${tests[@]}"; do IFS=',' read -ra test_data <<<"$test" resource=${test_data[1]} namespace=${test_data[2]} @@ -171,7 +171,7 @@ for test in ${tests[@]}; do namespace="${namespace}/" fi kube::log::status "Verifying ${resource}/${namespace}${name} has updated storage version ${new_storage_version} in etcd" - ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep ${new_storage_version} + ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep "${new_storage_version}" done killApiServer @@ -188,7 +188,7 @@ RUNTIME_CONFIG="api/all=false,api/v1=true,apiregistration.k8s.io/v1=true,${KUBE_ sleep 1 startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_PROTOBUF} -for test in ${tests[@]}; do +for test in "${tests[@]}"; do IFS=',' read -ra test_data <<<"$test" resource=${test_data[1]} namespace=${test_data[2]} @@ -203,8 +203,8 @@ for test in ${tests[@]}; do kube::log::status "Verifying we can retrieve ${resource}/${namespace}${name} via kubectl" # We have to remove the cached discovery information about the old version; otherwise, # the 'kubectl get' will use that and fail to find the resource. - rm -rf ${HOME}/.kube/cache/discovery/localhost_8080/${KUBE_OLD_STORAGE_VERSIONS} - ${KUBECTL} get ${namespace_flag} ${resource}/${name} + rm -rf "${HOME}/.kube/cache/discovery/localhost_8080/${KUBE_OLD_STORAGE_VERSIONS}" + ${KUBECTL} get "${namespace_flag}" "${resource}/${name}" done killApiServer From e6e43828284c6e83cf2b4658fa377b4cc7dec0c3 Mon Sep 17 00:00:00 2001 From: wojtekt Date: Wed, 17 Apr 2019 12:53:36 +0200 Subject: [PATCH 080/209] Reduce contention in watchcache by not calling event handler under lock --- .../apiserver/pkg/storage/cacher/cacher.go | 41 ++++++------ .../pkg/storage/cacher/watch_cache.go | 63 +++++++++++-------- .../pkg/storage/cacher/watch_cache_test.go | 3 +- 3 files changed, 59 insertions(+), 48 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index cb1e11437aa..c85f051e18e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -291,11 +291,6 @@ type Cacher struct { // given configuration. func NewCacherFromConfig(config Config) *Cacher { stopCh := make(chan struct{}) - - watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc, config.GetAttrsFunc, config.Versioner) - listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) - reflectorName := "storage/cacher.go:" + config.ResourcePrefix - obj := config.NewFunc() // Give this error when it is constructed rather than when you get the // first watch item, because it's much easier to track down that way. @@ -303,18 +298,11 @@ func NewCacherFromConfig(config Config) *Cacher { panic("storage codec doesn't seem to match given type: " + err.Error()) } - reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) - // Configure reflector's pager to for an appropriate pagination chunk size for fetching data from - // storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error. - reflector.WatchListPageSize = storageWatchListPageSize - clock := clock.RealClock{} cacher := &Cacher{ ready: newReady(), storage: config.Storage, objectType: reflect.TypeOf(obj), - watchCache: watchCache, - reflector: reflector, versioner: config.Versioner, newFunc: config.NewFunc, triggerFunc: config.TriggerPublisherFunc, @@ -337,7 +325,27 @@ func NewCacherFromConfig(config Config) *Cacher { bookmarkWatchers: newTimeBucketWatchers(clock), watchBookmarkEnabled: utilfeature.DefaultFeatureGate.Enabled(features.WatchBookmark), } - watchCache.SetOnEvent(cacher.processEvent) + + // Ensure that timer is stopped. + if !cacher.timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-cacher.timer.C + } + + watchCache := newWatchCache( + config.CacheCapacity, config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner) + listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + reflectorName := "storage/cacher.go:" + config.ResourcePrefix + + reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) + // Configure reflector's pager to for an appropriate pagination chunk size for fetching data from + // storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error. + reflector.WatchListPageSize = storageWatchListPageSize + + cacher.watchCache = watchCache + cacher.reflector = reflector + go cacher.dispatchEvents() cacher.stopWg.Add(1) @@ -352,13 +360,6 @@ func NewCacherFromConfig(config Config) *Cacher { ) }() - // Ensure that timer is stopped. - if !cacher.timer.Stop() { - // Consume triggered (but not yet received) timer event - // so that future reuse does not get a spurious timeout. - <-cacher.timer.C - } - return cacher } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index 97efdb98cf5..332aacd98d5 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -125,7 +125,7 @@ type watchCache struct { // This handler is run at the end of every Add/Update/Delete method // and additionally gets the previous value of the object. - onEvent func(*watchCacheEvent) + eventHandler func(*watchCacheEvent) // for testing timeouts. clock clock.Clock @@ -137,6 +137,7 @@ type watchCache struct { func newWatchCache( capacity int, keyFunc func(runtime.Object) (string, error), + eventHandler func(*watchCacheEvent), getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error), versioner storage.Versioner) *watchCache { wc := &watchCache{ @@ -149,6 +150,7 @@ func newWatchCache( store: cache.NewStore(storeElementKey), resourceVersion: 0, listResourceVersion: 0, + eventHandler: eventHandler, clock: clock.RealClock{}, versioner: versioner, } @@ -204,6 +206,8 @@ func (w *watchCache) objectToVersionedRuntimeObject(obj interface{}) (runtime.Ob return object, resourceVersion, nil } +// processEvent is safe as long as there is at most one call to it in flight +// at any point in time. func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error { key, err := w.keyFunc(event.Object) if err != nil { @@ -224,30 +228,41 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd ResourceVersion: resourceVersion, } - // TODO: We should consider moving this lock below after the watchCacheEvent - // is created. In such situation, the only problematic scenario is Replace( - // happening after getting object from store and before acquiring a lock. - // Maybe introduce another lock for this purpose. - w.Lock() - defer w.Unlock() - previous, exists, err := w.store.Get(elem) - if err != nil { + if err := func() error { + // TODO: We should consider moving this lock below after the watchCacheEvent + // is created. In such situation, the only problematic scenario is Replace( + // happening after getting object from store and before acquiring a lock. + // Maybe introduce another lock for this purpose. + w.Lock() + defer w.Unlock() + + previous, exists, err := w.store.Get(elem) + if err != nil { + return err + } + if exists { + previousElem := previous.(*storeElement) + watchCacheEvent.PrevObject = previousElem.Object + watchCacheEvent.PrevObjLabels = previousElem.Labels + watchCacheEvent.PrevObjFields = previousElem.Fields + } + + w.updateCache(watchCacheEvent) + w.resourceVersion = resourceVersion + defer w.cond.Broadcast() + + return updateFunc(elem) + }(); err != nil { return err } - if exists { - previousElem := previous.(*storeElement) - watchCacheEvent.PrevObject = previousElem.Object - watchCacheEvent.PrevObjLabels = previousElem.Labels - watchCacheEvent.PrevObjFields = previousElem.Fields - } - w.updateCache(watchCacheEvent) - w.resourceVersion = resourceVersion - if w.onEvent != nil { - w.onEvent(watchCacheEvent) + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to processEvent in flight + // at any point in time. + if w.eventHandler != nil { + w.eventHandler(watchCacheEvent) } - w.cond.Broadcast() - return updateFunc(elem) + return nil } // Assumes that lock is already held for write. @@ -397,12 +412,6 @@ func (w *watchCache) SetOnReplace(onReplace func()) { w.onReplace = onReplace } -func (w *watchCache) SetOnEvent(onEvent func(*watchCacheEvent)) { - w.Lock() - defer w.Unlock() - w.onEvent = onEvent -} - func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) { size := w.endIndex - w.startIndex var oldest uint64 diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_test.go index b7bfa0bd544..fb265ba929e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_test.go @@ -76,7 +76,8 @@ func newTestWatchCache(capacity int) *watchCache { return labels.Set(pod.Labels), fields.Set{"spec.nodeName": pod.Spec.NodeName}, nil } versioner := etcd.APIObjectVersioner{} - wc := newWatchCache(capacity, keyFunc, getAttrsFunc, versioner) + mockHandler := func(*watchCacheEvent) {} + wc := newWatchCache(capacity, keyFunc, mockHandler, getAttrsFunc, versioner) wc.clock = clock.NewFakeClock(time.Now()) return wc } From 3cc85a1c0998bf7356b2126df4a05485acc6e734 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 19 Apr 2019 10:35:16 +0200 Subject: [PATCH 081/209] Updates OWNERS files in job controller --- pkg/controller/job/OWNERS | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/controller/job/OWNERS b/pkg/controller/job/OWNERS index 2941b561f02..938e26b5eed 100644 --- a/pkg/controller/job/OWNERS +++ b/pkg/controller/job/OWNERS @@ -1,12 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- erictune -- janetkuo -- soltysh +- sig-apps-approvers reviewers: -- erictune -- janetkuo -- soltysh +- sig-apps-reviewers labels: - sig/apps From c056a46ba9d45fb5aa98a285b2af2bee852f475f Mon Sep 17 00:00:00 2001 From: suigh Date: Fri, 19 Apr 2019 16:43:00 +0800 Subject: [PATCH 082/209] update the output of install-etcd.sh, show how to export the environment of etcd. --- hack/lib/etcd.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hack/lib/etcd.sh b/hack/lib/etcd.sh index 79821e7d9d7..5b2416d387c 100755 --- a/hack/lib/etcd.sh +++ b/hack/lib/etcd.sh @@ -112,9 +112,9 @@ kube::etcd::install() { cd "${KUBE_ROOT}/third_party" || return 1 if [[ $(readlink etcd) == etcd-v${ETCD_VERSION}-${os}-* ]]; then - kube::log::info "etcd v${ETCD_VERSION} already installed at path:" - kube::log::info "$(pwd)/$(readlink etcd)" - return # already installed + kube::log::info "etcd v${ETCD_VERSION} already installed. To use:" + kube::log::info "export PATH=\"$(pwd)/etcd:\${PATH}\"" + return #already installed fi if [[ ${os} == "darwin" ]]; then @@ -133,6 +133,6 @@ kube::etcd::install() { rm "${download_file}" fi kube::log::info "etcd v${ETCD_VERSION} installed. To use:" - kube::log::info "export PATH=$(pwd)/etcd:\${PATH}" + kube::log::info "export PATH=\"$(pwd)/etcd:\${PATH}\"" ) } From 2fc8ac9e81dc8975dba237ab8482ed1cc8c715dd Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Fri, 19 Apr 2019 11:43:14 +0200 Subject: [PATCH 083/209] [metrics-server addon] Restore metrics-server using of IP addresses This preference list matches is used to pick prefered field from k8s node object. It was introduced in metrics-server 0.3 and changed default behaviour to use DNS instead of IP addresses. It was merged into k8s 1.12 and caused breaking change by introducing dependency on DNS configuration. --- cluster/addons/metrics-server/metrics-server-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 0ab7bef2e8f..5862f0679fb 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -57,6 +57,7 @@ spec: # Remove these lines for non-GKE clusters, and when GKE supports token-based auth. - --kubelet-port=10255 - --deprecated-kubelet-completely-insecure=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP ports: - containerPort: 443 name: https From 61478c04d8ad24d68462b6b8793fb6b359ece294 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 18:04:54 +0800 Subject: [PATCH 084/209] fix golint failures of pkg/apis/batch --- hack/.golint_failures | 1 - pkg/apis/batch/register.go | 4 +++- pkg/apis/batch/types.go | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 8ab29fc3b7a..7ae161b590b 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -22,7 +22,6 @@ pkg/apis/autoscaling/v1 pkg/apis/autoscaling/v2beta1 pkg/apis/autoscaling/v2beta2 pkg/apis/autoscaling/validation -pkg/apis/batch pkg/apis/batch/v1 pkg/apis/batch/v1beta1 pkg/apis/batch/v2alpha1 diff --git a/pkg/apis/batch/register.go b/pkg/apis/batch/register.go index 3b4a6d40678..3b1558ab4be 100644 --- a/pkg/apis/batch/register.go +++ b/pkg/apis/batch/register.go @@ -38,8 +38,10 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder points to a list of functions added to Scheme. SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to the given scheme. diff --git a/pkg/apis/batch/types.go b/pkg/apis/batch/types.go index d62cdaf3bbb..8bbeae8301b 100644 --- a/pkg/apis/batch/types.go +++ b/pkg/apis/batch/types.go @@ -184,6 +184,7 @@ type JobStatus struct { Failed int32 } +// JobConditionType is a valid value for JobCondition.Type type JobConditionType string // These are valid conditions of a job. From 0e1956b762db91b2a48c4b3b6b2c61c07e11b309 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 18:27:32 +0800 Subject: [PATCH 085/209] fix golint failures of pkg/kubelet/checkpoint --- hack/.golint_failures | 1 - pkg/kubelet/checkpoint/checkpoint.go | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 8ab29fc3b7a..3efcd4bc60c 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -169,7 +169,6 @@ pkg/kubelet/apis/config/v1beta1 pkg/kubelet/apis/deviceplugin/v1beta1 pkg/kubelet/cadvisor pkg/kubelet/cadvisor/testing -pkg/kubelet/checkpoint pkg/kubelet/checkpointmanager/checksum pkg/kubelet/checkpointmanager/testing/example_checkpoint_formats/v1 pkg/kubelet/client diff --git a/pkg/kubelet/checkpoint/checkpoint.go b/pkg/kubelet/checkpoint/checkpoint.go index f1fa9bdb7ae..f3a4315ed63 100644 --- a/pkg/kubelet/checkpoint/checkpoint.go +++ b/pkg/kubelet/checkpoint/checkpoint.go @@ -34,6 +34,7 @@ const ( podPrefix = "Pod" ) +// PodCheckpoint defines the operations to retrieve pod type PodCheckpoint interface { checkpointmanager.Checkpoint GetPod() *v1.Pod @@ -66,6 +67,7 @@ func (cp *Data) VerifyChecksum() error { return cp.Checksum.Verify(*cp.Pod) } +// GetPod retrieves the pod from the checkpoint func (cp *Data) GetPod() *v1.Pod { return cp.Pod } From 7b8c9acc09d51a8f6018eafc49490102ae7cb0c4 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 18 Apr 2019 17:22:45 -0400 Subject: [PATCH 086/209] remove unused code Change-Id: If821920ec8872e326b7d85437ad8d2620807799d --- cmd/kubeadm/app/preflight/checks.go | 4 -- pkg/api/v1/persistentvolume/util_test.go | 6 -- pkg/apis/batch/validation/validation_test.go | 7 --- pkg/apis/policy/v1beta1/defaults.go | 5 -- .../providers/azure/azure_standard.go | 10 ---- pkg/cloudprovider/providers/gce/gce.go | 5 -- pkg/cloudprovider/providers/gce/gce_util.go | 4 -- .../openstack/openstack_loadbalancer.go | 2 - .../metrics/legacy_metrics_client_test.go | 16 ----- .../serviceaccount/tokens_controller.go | 10 ---- .../persistentvolume/pv_controller_test.go | 8 --- .../azure/azure_acr_helper.go | 10 ---- pkg/kubectl/apply/element.go | 2 - pkg/kubectl/cmd/set/BUILD | 1 - pkg/kubectl/cmd/set/helper.go | 31 ---------- pkg/kubectl/cmd/testing/fake.go | 7 --- pkg/kubectl/describe/versioned/describe.go | 10 ---- pkg/kubectl/util/deployment/deployment.go | 1 - pkg/kubelet/dockershim/helpers_test.go | 17 ------ pkg/kubelet/eviction/helpers.go | 8 --- pkg/kubelet/kubelet_node_status_test.go | 4 -- pkg/kubelet/kubelet_pods_test.go | 14 ----- pkg/kubelet/kubelet_test.go | 16 ----- pkg/kubelet/pod_workers_test.go | 13 ---- pkg/kubelet/status/status_manager_test.go | 4 -- pkg/master/BUILD | 1 - pkg/master/master_test.go | 7 --- pkg/printers/internalversion/printers.go | 8 --- pkg/proxy/iptables/proxier_test.go | 9 --- .../core/componentstatus/validator.go | 6 -- pkg/scheduler/internal/queue/BUILD | 1 - .../internal/queue/scheduling_queue.go | 6 -- pkg/util/mount/mount_helper_test.go | 19 ------ pkg/volume/awsebs/attacher_test.go | 7 --- pkg/volume/awsebs/aws_ebs.go | 21 ------- pkg/volume/awsebs/aws_util.go | 14 ----- pkg/volume/csi/csi_plugin.go | 9 --- pkg/volume/flexvolume/common_test.go | 18 ------ pkg/volume/flocker/flocker_test.go | 59 ------------------- pkg/volume/iscsi/iscsi.go | 9 --- pkg/volume/projected/projected_test.go | 14 ----- pkg/volume/rbd/rbd.go | 9 --- pkg/volume/scaleio/sio_mgr.go | 8 --- .../vsphere_volume/vsphere_volume_test.go | 4 -- .../apis/apiextensions/v1beta1/defaults.go | 10 ---- .../pkg/conversion/queryparams/convert.go | 4 -- .../pkg/genericclioptions/builder_flags.go | 4 -- staging/src/k8s.io/client-go/scale/client.go | 4 -- .../k8s.io/client-go/tools/cache/reflector.go | 6 -- .../tools/cache/reflector_metrics.go | 17 ------ .../tools/clientcmd/client_config.go | 10 ---- .../util/certificate/certificate_store.go | 7 --- .../cmd/informer-gen/generators/BUILD | 1 - .../cmd/informer-gen/generators/tags.go | 33 ----------- .../cmd/lister-gen/generators/BUILD | 1 - .../cmd/lister-gen/generators/tags.go | 33 ----------- test/conformance/walk.go | 12 ---- test/e2e/apimachinery/generated_clientset.go | 49 --------------- test/e2e/apimachinery/namespace.go | 18 ------ test/e2e/apimachinery/resource_quota.go | 11 ---- .../autoscaling/cluster_size_autoscaling.go | 53 ----------------- .../framework/providers/gce/recreate_node.go | 8 --- test/e2e/network/ingress.go | 23 -------- test/e2e/servicecatalog/podpreset.go | 12 ---- test/e2e/storage/BUILD | 2 - test/e2e/storage/drivers/csi_objects.go | 11 ---- test/e2e/storage/flexvolume.go | 21 ------- test/e2e/storage/regional_pd.go | 24 -------- test/e2e/storage/testsuites/subpath.go | 8 --- test/e2e_node/docker_util.go | 10 ---- test/e2e_node/remote/node_e2e.go | 17 ------ test/images/net/main.go | 8 --- test/images/netexec/netexec.go | 5 -- test/images/no-snat-test-proxy/main.go | 11 ---- .../master/transformation_testcase.go | 9 --- test/integration/scheduler/util.go | 11 ---- test/integration/statefulset/util.go | 7 --- 77 files changed, 904 deletions(-) delete mode 100644 staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go delete mode 100644 staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index f737e6e6851..93c223f667a 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -1088,10 +1088,6 @@ func RunPullImagesCheck(execer utilsexec.Interface, cfg *kubeadmapi.InitConfigur // RunChecks runs each check, displays it's warnings/errors, and once all // are processed will exit if any errors occurred. func RunChecks(checks []Checker, ww io.Writer, ignorePreflightErrors sets.String) error { - type checkErrors struct { - Name string - Errors []error - } var errsBuffer bytes.Buffer for _, c := range checks { diff --git a/pkg/api/v1/persistentvolume/util_test.go b/pkg/api/v1/persistentvolume/util_test.go index 5a60dd1ce52..0ed1a06bfef 100644 --- a/pkg/api/v1/persistentvolume/util_test.go +++ b/pkg/api/v1/persistentvolume/util_test.go @@ -268,9 +268,3 @@ func collectSecretPaths(t *testing.T, path *field.Path, name string, tp reflect. return secretPaths } - -func newHostPathType(pathType string) *corev1.HostPathType { - hostPathType := new(corev1.HostPathType) - *hostPathType = corev1.HostPathType(pathType) - return hostPathType -} diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index 5300193233e..9ca5493e1d9 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -17,7 +17,6 @@ limitations under the License. package validation import ( - "fmt" "strings" "testing" @@ -68,12 +67,6 @@ func getValidPodTemplateSpecForGenerated(selector *metav1.LabelSelector) api.Pod } } -func featureToggle(feature utilfeature.Feature) []string { - enabled := fmt.Sprintf("%s=%t", feature, true) - disabled := fmt.Sprintf("%s=%t", feature, false) - return []string{enabled, disabled} -} - func TestValidateJob(t *testing.T) { validManualSelector := getValidManualSelector() validPodTemplateSpecForManual := getValidPodTemplateSpecForManual(validManualSelector) diff --git a/pkg/apis/policy/v1beta1/defaults.go b/pkg/apis/policy/v1beta1/defaults.go index 47a3d1f9a58..6403387d6d6 100644 --- a/pkg/apis/policy/v1beta1/defaults.go +++ b/pkg/apis/policy/v1beta1/defaults.go @@ -18,13 +18,8 @@ package v1beta1 import ( policyv1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/runtime" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - func SetDefaults_PodSecurityPolicySpec(obj *policyv1beta1.PodSecurityPolicySpec) { // This field was added after PodSecurityPolicy was released. // Policies that do not include this field must remain as permissive as they were prior to the introduction of this field. diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index c2b22e1e6df..abd38b27d8b 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -568,16 +568,6 @@ func extractResourceGroupByNicID(nicID string) (string, error) { return matches[1], nil } -// extractResourceGroupByPipID extracts the resource group name by publicIP ID. -func extractResourceGroupByPipID(pipID string) (string, error) { - matches := publicIPResourceGroupRE.FindStringSubmatch(pipID) - if len(matches) != 2 { - return "", fmt.Errorf("error of extracting resourceGroup from pipID %q", pipID) - } - - return matches[1], nil -} - // getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet. func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) { var machine compute.VirtualMachine diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 6b36bd45613..0a22a6a59b6 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -81,11 +81,6 @@ const ( gceComputeAPIEndpointBeta = "https://www.googleapis.com/compute/beta/" ) -// gceObject is an abstraction of all GCE API object in go client -type gceObject interface { - MarshalJSON() ([]byte, error) -} - var _ cloudprovider.Interface = (*Cloud)(nil) var _ cloudprovider.Instances = (*Cloud)(nil) var _ cloudprovider.LoadBalancer = (*Cloud)(nil) diff --git a/pkg/cloudprovider/providers/gce/gce_util.go b/pkg/cloudprovider/providers/gce/gce_util.go index 328b9204f56..3b1d5353a4f 100644 --- a/pkg/cloudprovider/providers/gce/gce_util.go +++ b/pkg/cloudprovider/providers/gce/gce_util.go @@ -244,10 +244,6 @@ func makeGoogleAPINotFoundError(message string) error { return &googleapi.Error{Code: http.StatusNotFound, Message: message} } -func makeGoogleAPIError(code int, message string) error { - return &googleapi.Error{Code: code, Message: message} -} - // TODO(#51665): Remove this once Network Tiers becomes Beta in GCP. func handleAlphaNetworkTierGetError(err error) (string, error) { if isForbidden(err) { diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 0c76e695496..b3185626521 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -85,8 +85,6 @@ type LbaasV2 struct { LoadBalancer } -type empty struct{} - func networkExtensions(client *gophercloud.ServiceClient) (map[string]bool, error) { seen := make(map[string]bool) diff --git a/pkg/controller/podautoscaler/metrics/legacy_metrics_client_test.go b/pkg/controller/podautoscaler/metrics/legacy_metrics_client_test.go index 1d7a78e066d..d9901fae8d9 100644 --- a/pkg/controller/podautoscaler/metrics/legacy_metrics_client_test.go +++ b/pkg/controller/podautoscaler/metrics/legacy_metrics_client_test.go @@ -389,22 +389,6 @@ func TestCPUEmptyMetricsForOnePod(t *testing.T) { tc.runTest(t) } -func testCollapseTimeSamples(t *testing.T) { - now := time.Now() - metrics := heapster.MetricResult{ - Metrics: []heapster.MetricPoint{ - {Timestamp: now, Value: 50, FloatValue: nil}, - {Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil}, - {Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}}, - LatestTimestamp: now, - } - - val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute) - assert.True(t, hadMetrics, "should report that it received a populated list of metrics") - assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected") - assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)") -} - func offsetTimestampBy(t int) time.Time { return fixedTimestamp.Add(time.Duration(t) * time.Minute) } diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index f93cd5822f5..7b97036e836 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -702,16 +702,6 @@ func (e *TokensController) listTokenSecrets(serviceAccount *v1.ServiceAccount) ( return items, nil } -// serviceAccountNameAndUID is a helper method to get the ServiceAccount Name and UID from the given secret -// Returns "","" if the secret is not a ServiceAccountToken secret -// If the name or uid annotation is missing, "" is returned instead -func serviceAccountNameAndUID(secret *v1.Secret) (string, string) { - if secret.Type != v1.SecretTypeServiceAccountToken { - return "", "" - } - return secret.Annotations[v1.ServiceAccountNameKey], secret.Annotations[v1.ServiceAccountUIDKey] -} - func getSecretReferences(serviceAccount *v1.ServiceAccount) sets.String { references := sets.NewString() for _, secret := range serviceAccount.Secrets { diff --git a/pkg/controller/volume/persistentvolume/pv_controller_test.go b/pkg/controller/volume/persistentvolume/pv_controller_test.go index 37b223abf3f..c4461ded228 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_test.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_test.go @@ -234,14 +234,6 @@ func TestControllerCacheParsingError(t *testing.T) { } } -func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string) *v1.PersistentVolume { - if volume.Annotations == nil { - volume.Annotations = make(map[string]string) - } - volume.Annotations[annName] = annValue - return volume -} - func makePVCClass(scName *string, hasSelectNodeAnno bool) *v1.PersistentVolumeClaim { claim := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/credentialprovider/azure/azure_acr_helper.go b/pkg/credentialprovider/azure/azure_acr_helper.go index 1c6e0e94e6c..be18de4cec1 100644 --- a/pkg/credentialprovider/azure/azure_acr_helper.go +++ b/pkg/credentialprovider/azure/azure_acr_helper.go @@ -63,16 +63,6 @@ type authDirective struct { realm string } -type accessTokenPayload struct { - TenantID string `json:"tid"` -} - -type acrTokenPayload struct { - Expiration int64 `json:"exp"` - TenantID string `json:"tenant"` - Credential string `json:"credential"` -} - type acrAuthResponse struct { RefreshToken string `json:"refresh_token"` } diff --git a/pkg/kubectl/apply/element.go b/pkg/kubectl/apply/element.go index 6fc8b8055b7..bc9cf8a5070 100644 --- a/pkg/kubectl/apply/element.go +++ b/pkg/kubectl/apply/element.go @@ -151,8 +151,6 @@ func (mk MergeKeys) GetMergeKeyValue(i interface{}) (MergeKeyValue, error) { return result, nil } -type source int - // CombinedPrimitiveSlice implements a slice of primitives type CombinedPrimitiveSlice struct { Items []*PrimitiveListItem diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index 6f6d9d3c3c4..039c3c54431 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -28,7 +28,6 @@ go_library( "//pkg/kubectl/util/templates:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/kubectl/cmd/set/helper.go b/pkg/kubectl/cmd/set/helper.go index b2e578f607b..72cc7756056 100644 --- a/pkg/kubectl/cmd/set/helper.go +++ b/pkg/kubectl/cmd/set/helper.go @@ -17,17 +17,13 @@ limitations under the License. package set import ( - "fmt" - "io" "strings" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/resource" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) // selectContainers allows one or more containers to be matched against a string or wildcard @@ -44,33 +40,6 @@ func selectContainers(containers []v1.Container, spec string) ([]*v1.Container, return out, skipped } -// handlePodUpdateError prints a more useful error to the end user when mutating a pod. -func handlePodUpdateError(out io.Writer, err error, resource string) { - if statusError, ok := err.(*errors.StatusError); ok && errors.IsInvalid(err) { - errorDetails := statusError.Status().Details - if errorDetails.Kind == "Pod" { - all, match := true, false - for _, cause := range errorDetails.Causes { - if cause.Field == "spec" && strings.Contains(cause.Message, "may not update fields other than") { - fmt.Fprintf(out, "error: may not update %s in pod %q directly\n", resource, errorDetails.Name) - match = true - } else { - all = false - } - } - if all && match { - return - } - } else { - if ok := cmdutil.PrintErrorWithCauses(err, out); ok { - return - } - } - } - - fmt.Fprintf(out, "error: %v\n", err) -} - // selectString returns true if the provided string matches spec, where spec is a string with // a non-greedy '*' wildcard operator. // TODO: turn into a regex and handle greedy matches and backtracking. diff --git a/pkg/kubectl/cmd/testing/fake.go b/pkg/kubectl/cmd/testing/fake.go index 9e5d8d5a9dd..2d3e18c5cf9 100644 --- a/pkg/kubectl/cmd/testing/fake.go +++ b/pkg/kubectl/cmd/testing/fake.go @@ -212,13 +212,6 @@ func NewInternalNamespacedType(kind, apiversion, name, namespace string) *Intern var errInvalidVersion = errors.New("not a version") -func versionErrIfFalse(b bool) error { - if b { - return nil - } - return errInvalidVersion -} - // ValidVersion of API var ValidVersion = "v1" diff --git a/pkg/kubectl/describe/versioned/describe.go b/pkg/kubectl/describe/versioned/describe.go index b75051c0e23..bdedb868514 100644 --- a/pkg/kubectl/describe/versioned/describe.go +++ b/pkg/kubectl/describe/versioned/describe.go @@ -4393,16 +4393,6 @@ func shorten(s string, maxLength int) string { return s } -// translateTimestampUntil returns the elapsed time until timestamp in -// human-readable approximation. -func translateTimestampUntil(timestamp metav1.Time) string { - if timestamp.IsZero() { - return "" - } - - return duration.HumanDuration(time.Until(timestamp.Time)) -} - // translateTimestampSince returns the elapsed time since timestamp in // human-readable approximation. func translateTimestampSince(timestamp metav1.Time) string { diff --git a/pkg/kubectl/util/deployment/deployment.go b/pkg/kubectl/util/deployment/deployment.go index 72f99f7f2ee..49fd4ec9fdb 100644 --- a/pkg/kubectl/util/deployment/deployment.go +++ b/pkg/kubectl/util/deployment/deployment.go @@ -109,7 +109,6 @@ func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc { // TODO: switch this to full namespacers type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error) -type podListFunc func(string, metav1.ListOptions) (*corev1.PodList, error) // listReplicaSets returns a slice of RSes the given deployment targets. // Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), diff --git a/pkg/kubelet/dockershim/helpers_test.go b/pkg/kubelet/dockershim/helpers_test.go index 445191c53a4..fe36073a59b 100644 --- a/pkg/kubelet/dockershim/helpers_test.go +++ b/pkg/kubelet/dockershim/helpers_test.go @@ -18,9 +18,6 @@ package dockershim import ( "fmt" - "io/ioutil" - "os" - "path/filepath" "testing" dockertypes "github.com/docker/docker/api/types" @@ -128,20 +125,6 @@ func TestParsingCreationConflictError(t *testing.T) { require.Equal(t, matches[1], "24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e") } -// writeDockerConfig will write a config file into a temporary dir, and return that dir. -// Caller is responsible for deleting the dir and its contents. -func writeDockerConfig(cfg string) (string, error) { - tmpdir, err := ioutil.TempDir("", "dockershim=helpers_test.go=") - if err != nil { - return "", err - } - dir := filepath.Join(tmpdir, ".docker") - if err := os.Mkdir(dir, 0755); err != nil { - return "", err - } - return tmpdir, ioutil.WriteFile(filepath.Join(dir, "config.json"), []byte(cfg), 0644) -} - func TestEnsureSandboxImageExists(t *testing.T) { sandboxImage := "gcr.io/test/image" authConfig := dockertypes.AuthConfig{Username: "user", Password: "pass"} diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index e7c2ddcb19a..74229b75905 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -434,14 +434,6 @@ func formatThreshold(threshold evictionapi.Threshold) string { return fmt.Sprintf("threshold(signal=%v, operator=%v, value=%v, gracePeriod=%v)", threshold.Signal, threshold.Operator, evictionapi.ThresholdValue(threshold.Value), threshold.GracePeriod) } -// formatevictionapi.ThresholdValue formats a thresholdValue for logging. -func formatThresholdValue(value evictionapi.ThresholdValue) string { - if value.Quantity != nil { - return value.Quantity.String() - } - return fmt.Sprintf("%f%%", value.Percentage*float32(100)) -} - // cachedStatsFunc returns a statsFunc based on the provided pod stats. func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc { uid2PodStats := map[string]statsapi.PodStats{} diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 044faa3b68d..090dd7027f4 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -167,10 +167,6 @@ func (s sortableNodeAddress) Less(i, j int) bool { } func (s sortableNodeAddress) Swap(i, j int) { s[j], s[i] = s[i], s[j] } -func sortNodeAddresses(addrs sortableNodeAddress) { - sort.Sort(addrs) -} - func TestUpdateNewNodeStatus(t *testing.T) { cases := []struct { desc string diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 4cd3d631079..f67b075c951 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -2018,20 +2018,6 @@ func TestPodPhaseWithRestartOnFailure(t *testing.T) { } } -type fakeReadWriteCloser struct{} - -func (f *fakeReadWriteCloser) Write(data []byte) (int, error) { - return 0, nil -} - -func (f *fakeReadWriteCloser) Read(data []byte) (int, error) { - return 0, nil -} - -func (f *fakeReadWriteCloser) Close() error { - return nil -} - func TestGetExec(t *testing.T) { const ( podName = "podFoo" diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index f8fc05397de..c44e9b8038c 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -136,22 +136,6 @@ func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubel return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/) } -func newTestKubeletWithoutFakeVolumePlugin(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet { - imageList := []kubecontainer.Image{ - { - ID: "abc", - RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, - Size: 123, - }, - { - ID: "efg", - RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, - Size: 456, - }, - } - return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, false /*initFakeVolumePlugin*/) -} - func newTestKubeletWithImageList( t *testing.T, imageList []kubecontainer.Image, diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index ff13baa99ca..2dc7cd644fa 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -257,19 +257,6 @@ func (kl *simpleFakeKubelet) syncPodWithWaitGroup(options syncPodOptions) error return nil } -// byContainerName sort the containers in a running pod by their names. -type byContainerName kubecontainer.Pod - -func (b byContainerName) Len() int { return len(b.Containers) } - -func (b byContainerName) Swap(i, j int) { - b.Containers[i], b.Containers[j] = b.Containers[j], b.Containers[i] -} - -func (b byContainerName) Less(i, j int) bool { - return b.Containers[i].Name < b.Containers[j].Name -} - // TestFakePodWorkers verifies that the fakePodWorkers behaves the same way as the real podWorkers // for their invocation of the syncPodFn. func TestFakePodWorkers(t *testing.T) { diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 03f79b2a1bb..c3068763fe7 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -883,10 +883,6 @@ func getAction() core.GetAction { return core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}} } -func updateAction() core.UpdateAction { - return core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} -} - func patchAction() core.PatchAction { return core.PatchActionImpl{ActionImpl: core.ActionImpl{Verb: "patch", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} } diff --git a/pkg/master/BUILD b/pkg/master/BUILD index 0bec722bf3f..c46d692a45b 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -142,7 +142,6 @@ go_test( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/batch:go_default_library", - "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/generated/openapi:go_default_library", "//pkg/kubelet/client:go_default_library", diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 90239fa0e0b..736b0591c3e 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -52,7 +52,6 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" - api "k8s.io/kubernetes/pkg/apis/core" apisstorage "k8s.io/kubernetes/pkg/apis/storage" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master/reconcilers" @@ -233,12 +232,6 @@ func TestVersion(t *testing.T) { } } -type fakeEndpointReconciler struct{} - -func (*fakeEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { - return nil -} - func makeNodeList(nodes []string, nodeResources apiv1.NodeResources) *apiv1.NodeList { list := apiv1.NodeList{ Items: make([]apiv1.Node, len(nodes)), diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 637ba4420a1..c67d028ba45 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -1010,14 +1010,6 @@ func printServiceList(list *api.ServiceList, options printers.PrintOptions) ([]m return rows, nil } -// backendStringer behaves just like a string interface and converts the given backend to a string. -func backendStringer(backend *networking.IngressBackend) string { - if backend == nil { - return "" - } - return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) -} - func formatHosts(rules []networking.IngressRule) string { list := []string{} max := 3 diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index fa49fdbb916..c096753d0d9 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -327,15 +327,6 @@ func TestDeleteEndpointConnections(t *testing.T) { } } -type fakeClosable struct { - closed bool -} - -func (c *fakeClosable) Close() error { - c.closed = true - return nil -} - // fakePortOpener implements portOpener. type fakePortOpener struct { openPorts []*utilproxy.LocalPort diff --git a/pkg/registry/core/componentstatus/validator.go b/pkg/registry/core/componentstatus/validator.go index ebfad4bf020..2c237ade28a 100644 --- a/pkg/registry/core/componentstatus/validator.go +++ b/pkg/registry/core/componentstatus/validator.go @@ -18,7 +18,6 @@ package componentstatus import ( "crypto/tls" - "net/http" "sync" "time" @@ -31,11 +30,6 @@ const ( probeTimeOut = 20 * time.Second ) -// TODO: this basic interface is duplicated in N places. consolidate? -type httpGet interface { - Get(url string) (*http.Response, error) -} - type ValidatorFn func([]byte) error type Server struct { diff --git a/pkg/scheduler/internal/queue/BUILD b/pkg/scheduler/internal/queue/BUILD index 23ec5520400..a0729869868 100644 --- a/pkg/scheduler/internal/queue/BUILD +++ b/pkg/scheduler/internal/queue/BUILD @@ -9,7 +9,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/internal/queue", visibility = ["//pkg/scheduler:__subpackages__"], deps = [ - "//pkg/api/v1/pod:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/metrics:go_default_library", diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 43b06c8257f..6bc9bf74bd5 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -39,7 +39,6 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/pkg/scheduler/metrics" @@ -353,11 +352,6 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { return err } -func isPodUnschedulable(pod *v1.Pod) bool { - _, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable -} - // nsNameForPod returns a namespacedname for a pod func nsNameForPod(pod *v1.Pod) ktypes.NamespacedName { return ktypes.NamespacedName{ diff --git a/pkg/util/mount/mount_helper_test.go b/pkg/util/mount/mount_helper_test.go index 3afcca37986..18a3e16c7ea 100644 --- a/pkg/util/mount/mount_helper_test.go +++ b/pkg/util/mount/mount_helper_test.go @@ -113,18 +113,6 @@ func TestDoCleanupMountPoint(t *testing.T) { } } -func validateDirEmpty(dir string) error { - files, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - - if len(files) != 0 { - return fmt.Errorf("Directory %q is not empty", dir) - } - return nil -} - func validateDirExists(dir string) error { _, err := ioutil.ReadDir(dir) if err != nil { @@ -143,10 +131,3 @@ func validateDirNotExists(dir string) error { } return fmt.Errorf("dir %q still exists", dir) } - -func validateFileExists(file string) error { - if _, err := os.Stat(file); err != nil { - return err - } - return nil -} diff --git a/pkg/volume/awsebs/attacher_test.go b/pkg/volume/awsebs/attacher_test.go index df92328f03e..0124295c4b5 100644 --- a/pkg/volume/awsebs/attacher_test.go +++ b/pkg/volume/awsebs/attacher_test.go @@ -205,13 +205,6 @@ type detachCall struct { ret error } -type diskIsAttachedCall struct { - diskName aws.KubernetesVolumeID - nodeName types.NodeName - isAttached bool - ret error -} - func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) { expected := &testcase.attach diff --git a/pkg/volume/awsebs/aws_ebs.go b/pkg/volume/awsebs/aws_ebs.go index 274ac79f2b1..2f6e285e84b 100644 --- a/pkg/volume/awsebs/aws_ebs.go +++ b/pkg/volume/awsebs/aws_ebs.go @@ -454,27 +454,6 @@ func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) s return filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name) } -// Reverses the mapping done in makeGlobalPDPath -func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) { - basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath) - rel, err := filepath.Rel(basePath, globalPath) - if err != nil { - klog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err) - return "", err - } - if strings.Contains(rel, "../") { - klog.Errorf("Unexpected mount path: %s", globalPath) - return "", fmt.Errorf("unexpected mount path: " + globalPath) - } - // Reverse the :// replacement done in makeGlobalPDPath - volumeID := rel - if strings.HasPrefix(volumeID, "aws/") { - volumeID = strings.Replace(volumeID, "aws/", "aws://", 1) - } - klog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID) - return volumeID, nil -} - func (ebs *awsElasticBlockStore) GetPath() string { return getPath(ebs.podUID, ebs.volName, ebs.plugin.host) } diff --git a/pkg/volume/awsebs/aws_util.go b/pkg/volume/awsebs/aws_util.go index 67fe8d9cb47..43aadb8e0c3 100644 --- a/pkg/volume/awsebs/aws_util.go +++ b/pkg/volume/awsebs/aws_util.go @@ -201,20 +201,6 @@ func verifyDevicePath(devicePaths []string) (string, error) { return "", nil } -// Returns the first path that exists, or empty string if none exist. -func verifyAllPathsRemoved(devicePaths []string) (bool, error) { - allPathsRemoved := true - for _, path := range devicePaths { - exists, err := mount.PathExists(path) - if err != nil { - return false, fmt.Errorf("Error checking if path exists: %v", err) - } - allPathsRemoved = allPathsRemoved && !exists - } - - return allPathsRemoved, nil -} - // Returns list of all paths for given EBS mount // This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id) // Here it is mostly about applying the partition path diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 68b50eccd3b..e7bf8f359ea 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -894,12 +894,3 @@ func isV0Version(version string) bool { return parsedVersion.Major() == 0 } - -func isV1Version(version string) bool { - parsedVersion, err := utilversion.ParseGeneric(version) - if err != nil { - return false - } - - return parsedVersion.Major() == 1 -} diff --git a/pkg/volume/flexvolume/common_test.go b/pkg/volume/flexvolume/common_test.go index 45cbf267542..a7ed35b64df 100644 --- a/pkg/volume/flexvolume/common_test.go +++ b/pkg/volume/flexvolume/common_test.go @@ -20,7 +20,6 @@ import ( "encoding/json" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/test/utils/harness" @@ -108,23 +107,6 @@ func fakeVolumeSpec() *volume.Spec { return volume.NewSpecFromVolume(vol) } -func fakePersistentVolumeSpec() *volume.Spec { - vol := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vol1", - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - FlexVolume: &v1.FlexPersistentVolumeSource{ - Driver: "kubernetes.io/fakeAttacher", - ReadOnly: false, - }, - }, - }, - } - return volume.NewSpecFromPersistentVolume(vol, false) -} - func specJSON(plugin *flexVolumeAttachablePlugin, spec *volume.Spec, extraOptions map[string]string) string { o, err := NewOptionsForDriver(spec, plugin.host, extraOptions) if err != nil { diff --git a/pkg/volume/flocker/flocker_test.go b/pkg/volume/flocker/flocker_test.go index fee6f06983e..a770844095a 100644 --- a/pkg/volume/flocker/flocker_test.go +++ b/pkg/volume/flocker/flocker_test.go @@ -302,62 +302,3 @@ func TestIsReadOnly(t *testing.T) { b := &flockerVolumeMounter{readOnly: true} assert.True(t, b.GetAttributes().ReadOnly) } - -type mockFlockerClient struct { - datasetID, primaryUUID, path string - datasetState *flockerapi.DatasetState -} - -func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient { - return &mockFlockerClient{ - datasetID: mockDatasetID, - primaryUUID: mockPrimaryUUID, - path: mockPath, - datasetState: &flockerapi.DatasetState{ - Path: mockPath, - DatasetID: mockDatasetID, - Primary: mockPrimaryUUID, - }, - } -} - -func (m mockFlockerClient) CreateDataset(metaName string) (*flockerapi.DatasetState, error) { - return m.datasetState, nil -} -func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) { - return m.datasetState, nil -} -func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) { - return m.datasetID, nil -} -func (m mockFlockerClient) GetPrimaryUUID() (string, error) { - return m.primaryUUID, nil -} -func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) { - return m.datasetState, nil -} - -/* -TODO: re-enable after refactor -func TestSetUpAtInternal(t *testing.T) { - const dir = "dir" - mockPath := "expected-to-be-set-properly" // package var - expectedPath := mockPath - - assert := assert.New(t) - - plugMgr, rootDir := newInitializedVolumePlugMgr(t) - if rootDir != "" { - defer os.RemoveAll(rootDir) - } - plug, err := plugMgr.FindPluginByName(flockerPluginName) - assert.NoError(err) - - pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} - b := flockerVolumeMounter{flockerVolume: &flockerVolume{pod: pod, plugin: plug.(*flockerPlugin)}} - b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath) - - assert.NoError(b.SetUpAt(dir, nil)) - assert.Equal(expectedPath, b.flocker.path) -} -*/ diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index e5262ea6178..3dd5ddf3ca2 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -594,15 +594,6 @@ func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) ( return secret, err } -func createVolumeFromISCSIVolumeSource(volumeName string, iscsi v1.ISCSIVolumeSource) *v1.Volume { - return &v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - ISCSI: &iscsi, - }, - } -} - func createPersistentVolumeFromISCSIPVSource(volumeName string, iscsi v1.ISCSIPersistentVolumeSource) *v1.PersistentVolume { block := v1.PersistentVolumeBlock return &v1.PersistentVolume{ diff --git a/pkg/volume/projected/projected_test.go b/pkg/volume/projected/projected_test.go index 932e2fe992c..5955f490dbf 100644 --- a/pkg/volume/projected/projected_test.go +++ b/pkg/volume/projected/projected_test.go @@ -1193,20 +1193,6 @@ func makeSecret(namespace, name string) v1.Secret { } } -func configMap(namespace, name string) v1.ConfigMap { - return v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Data: map[string]string{ - "data-1": "value-1", - "data-2": "value-2", - "data-3": "value-3", - }, - } -} - func makeProjection(name string, defaultMode int32, kind string) *v1.ProjectedVolumeSource { var item v1.VolumeProjection diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 89df8315c73..8d2fbd31577 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -1081,15 +1081,6 @@ func getVolumeAccessModes(spec *volume.Spec) ([]v1.PersistentVolumeAccessMode, e return nil, nil } -func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) { - secret, err := volutil.GetSecretForPod(pod, secretName, kubeClient) - if err != nil { - klog.Errorf("failed to get secret from [%q/%q]: %+v", pod.Namespace, secretName, err) - return "", fmt.Errorf("failed to get secret from [%q/%q]: %+v", pod.Namespace, secretName, err) - } - return parseSecretMap(secret) -} - func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { secret, err := volutil.GetSecretForPV(namespace, secretName, rbdPluginName, kubeClient) if err != nil { diff --git a/pkg/volume/scaleio/sio_mgr.go b/pkg/volume/scaleio/sio_mgr.go index a322276b1dd..56a0680d1fa 100644 --- a/pkg/volume/scaleio/sio_mgr.go +++ b/pkg/volume/scaleio/sio_mgr.go @@ -27,14 +27,6 @@ import ( siotypes "github.com/codedellemc/goscaleio/types/v1" ) -type storageInterface interface { - CreateVolume(string, int64) (*siotypes.Volume, error) - AttachVolume(string, bool) (string, error) - IsAttached(string) (bool, error) - DetachVolume(string) error - DeleteVolume(string) error -} - type sioMgr struct { client sioInterface configData map[string]string diff --git a/pkg/volume/vsphere_volume/vsphere_volume_test.go b/pkg/volume/vsphere_volume/vsphere_volume_test.go index 09cbe4ed420..97217a10176 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_test.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_test.go @@ -62,10 +62,6 @@ func TestCanSupport(t *testing.T) { type fakePDManager struct { } -func getFakeDeviceName(host volume.VolumeHost, volPath string) string { - return path.Join(host.GetPluginDir(vsphereVolumePluginName), "device", volPath) -} - func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner, selectedZone []string) (volSpec *VolumeSpec, err error) { volSpec = &VolumeSpec{ Path: "[local] test-volume-name.vmdk", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index 3e1567bb633..ff9e28a112d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -80,13 +80,3 @@ func SetDefaults_ServiceReference(obj *ServiceReference) { obj.Port = utilpointer.Int32Ptr(443) } } - -// hasPerVersionColumns returns true if a CRD uses per-version columns. -func hasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { - for _, v := range versions { - if len(v.AdditionalPrinterColumns) > 0 { - return true - } - } - return false -} diff --git a/staging/src/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/staging/src/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index b3804aa42b2..2f0dd0074af 100644 --- a/staging/src/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/staging/src/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,10 +54,6 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } -func formatValue(value interface{}) string { - return fmt.Sprintf("%v", value) -} - func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go index 77007a9988d..8db95f927c9 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go @@ -211,10 +211,6 @@ func ResourceFinderForResult(result resource.Visitor) ResourceFinder { }) } -func strPtr(val string) *string { - return &val -} - func boolPtr(val bool) *bool { return &val } diff --git a/staging/src/k8s.io/client-go/scale/client.go b/staging/src/k8s.io/client-go/scale/client.go index ddf0d4ac8f7..00e597523b1 100644 --- a/staging/src/k8s.io/client-go/scale/client.go +++ b/staging/src/k8s.io/client-go/scale/client.go @@ -30,10 +30,6 @@ import ( var scaleConverter = NewScaleConverter() var codecs = serializer.NewCodecFactory(scaleConverter.Scheme()) -// restInterfaceProvider turns a restclient.Config into a restclient.Interface. -// It's overridable for the purposes of testing. -type restInterfaceProvider func(*restclient.Config) (restclient.Interface, error) - // scaleClient is an implementation of ScalesGetter // which makes use of a RESTMapper and a generic REST // client to support an discoverable resource. diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector.go b/staging/src/k8s.io/client-go/tools/cache/reflector.go index 4b5daeedc2a..72a69ea6e98 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector.go @@ -25,7 +25,6 @@ import ( "net" "net/url" "reflect" - "strings" "sync" "syscall" "time" @@ -113,11 +112,6 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } -func makeValidPrometheusMetricLabel(in string) string { - // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) -} - // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/"} diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector_metrics.go b/staging/src/k8s.io/client-go/tools/cache/reflector_metrics.go index 0945e5c3a2a..dd849c8fa1b 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector_metrics.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -94,23 +94,6 @@ var metricsFactory = struct { metricsProvider: noopMetricsProvider{}, } -func newReflectorMetrics(name string) *reflectorMetrics { - var ret *reflectorMetrics - if len(name) == 0 { - return ret - } - return &reflectorMetrics{ - numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), - listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), - numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), - numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), - numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), - watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), - numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), - lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), - } -} - // SetReflectorMetricsProvider sets the metrics provider func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { metricsFactory.setProviders.Do(func() { diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index a7b8c1c6e42..878e0df79ff 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -296,16 +296,6 @@ func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { return config } -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information -func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { - config := restclient.Config{} - config.CAFile = info.CAFile - if info.Insecure != nil { - config.Insecure = *info.Insecure - } - return config -} - func canIdentifyUser(config restclient.Config) bool { return len(config.Username) > 0 || (len(config.CertFile) > 0 || len(config.CertData) > 0) || diff --git a/staging/src/k8s.io/client-go/util/certificate/certificate_store.go b/staging/src/k8s.io/client-go/util/certificate/certificate_store.go index d2666615421..9b008084b6d 100644 --- a/staging/src/k8s.io/client-go/util/certificate/certificate_store.go +++ b/staging/src/k8s.io/client-go/util/certificate/certificate_store.go @@ -23,7 +23,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "time" "k8s.io/klog" @@ -289,12 +288,6 @@ func (s *fileStore) filename(qualifier string) string { return s.pairNamePrefix + "-" + qualifier + pemExtension } -// withoutExt returns the given filename after removing the extension. The -// extension to remove will be the result of filepath.Ext(). -func withoutExt(filename string) string { - return strings.TrimSuffix(filename, filepath.Ext(filename)) -} - func loadX509KeyPair(certFile, keyFile string) (*tls.Certificate, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD index 1717edd83f8..82040b6316b 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD @@ -14,7 +14,6 @@ go_library( "groupinterface.go", "informer.go", "packages.go", - "tags.go", "types.go", "versioninterface.go", ], diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go deleted file mode 100644 index d25d5b63049..00000000000 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generators - -import ( - "k8s.io/gengo/types" - "k8s.io/klog" -) - -// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if -// it exists, the value is boolean. If the tag did not exist, it returns -// false. -func extractBoolTagOrDie(key string, lines []string) bool { - val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) - if err != nil { - klog.Fatal(err) - } - return val -} diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD index ec1a2ec626e..e301f6e43e5 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD @@ -10,7 +10,6 @@ go_library( srcs = [ "expansion.go", "lister.go", - "tags.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/lister-gen/generators", importpath = "k8s.io/code-generator/cmd/lister-gen/generators", diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go deleted file mode 100644 index d25d5b63049..00000000000 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generators - -import ( - "k8s.io/gengo/types" - "k8s.io/klog" -) - -// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if -// it exists, the value is boolean. If the tag did not exist, it returns -// false. -func extractBoolTagOrDie(key string, lines []string) bool { - val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) - if err != nil { - klog.Fatal(err) - } - return val -} diff --git a/test/conformance/walk.go b/test/conformance/walk.go index 0cabab981fe..dd02c89ecf1 100644 --- a/test/conformance/walk.go +++ b/test/conformance/walk.go @@ -300,18 +300,6 @@ func (v *visitor) Visit(node ast.Node) (w ast.Visitor) { return v } -func scandir(dir string) { - v := newVisitor() - pkg, err := parser.ParseDir(v.FileSet, dir, nil, parser.ParseComments) - if err != nil { - panic(err) - } - - for _, p := range pkg { - ast.Walk(v, p) - } -} - func scanfile(path string, src interface{}) []conformanceData { v := newVisitor() file, err := parser.ParseFile(v.FileSet, path, src, parser.ParseComments) diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 2873cdfbd14..48ca4fbace9 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -36,27 +36,6 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" ) -func stagingClientPod(name, value string) v1.Pod { - return v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "name": "foo", - "time": value, - }, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "nginx", - Image: imageutils.GetE2EImage(imageutils.Nginx), - Ports: []v1.ContainerPort{{ContainerPort: 80}}, - }, - }, - }, - } -} - func testingPod(name, value string) v1.Pod { return v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -98,34 +77,6 @@ func observeCreation(w watch.Interface) { } } -func observeObjectDeletion(w watch.Interface) (obj runtime.Object) { - // output to give us a duration to failure. Maybe we aren't getting the - // full timeout for some reason. My guess would be watch failure - framework.Logf("Starting to observe pod deletion") - deleted := false - timeout := false - timer := time.After(framework.DefaultPodDeletionTimeout) - for !deleted && !timeout { - select { - case event, normal := <-w.ResultChan(): - if !normal { - framework.Failf("The channel was closed unexpectedly") - return - } - if event.Type == watch.Deleted { - obj = event.Object - deleted = true - } - case <-timer: - timeout = true - } - } - if !deleted { - framework.Failf("Failed to observe pod deletion") - } - return -} - func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool) { timer := time.After(30 * time.Second) updated := false diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index a52346d03e2..fccc7bce7af 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -27,7 +27,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -82,23 +81,6 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max })) } -func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod { - var pod *v1.Pod - var err error - err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) { - pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, err - } - return true, nil - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns) - return pod -} - func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { ginkgo.By("Creating a test namespace") namespaceName := "nsdeletetest" diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index f9069ec4cb4..0a089a619b3 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -1362,17 +1362,6 @@ func newTestResourceQuotaWithScopeForPriorityClass(name string, hard v1.Resource } } -// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing feature LocalStorageCapacityIsolation -func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota { - hard := v1.ResourceList{} - hard[v1.ResourceEphemeralStorage] = resource.MustParse("500Mi") - hard[v1.ResourceQuotas] = resource.MustParse("1") - return &v1.ResourceQuota{ - ObjectMeta: metav1.ObjectMeta{Name: name}, - Spec: v1.ResourceQuotaSpec{Hard: hard}, - } -} - // newTestResourceQuota returns a quota that enforces default constraints for testing func newTestResourceQuota(name string) *v1.ResourceQuota { hard := v1.ResourceList{} diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 7158b97236c..b14f1f60a46 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -17,7 +17,6 @@ limitations under the License. package autoscaling import ( - "bytes" "fmt" "io/ioutil" "math" @@ -1187,30 +1186,6 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error { return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr) } -func executeHTTPRequest(method string, url string, body string) (string, error) { - client := &http.Client{} - req, err := http.NewRequest(method, url, strings.NewReader(body)) - if err != nil { - By(fmt.Sprintf("Can't create request: %s", err.Error())) - return "", err - } - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("error: %s %s", resp.Status, string(respBody)) - } - - return string(respBody), nil -} - func addNodePool(name string, machineType string, numNodes int) { args := []string{"container", "node-pools", "create", name, "--quiet", "--machine-type=" + machineType, @@ -1297,26 +1272,6 @@ func getPoolSize(f *framework.Framework, poolName string) int { return size } -func doPut(url, content string) (string, error) { - req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(content))) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/json") - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - strBody := string(body) - return strBody, nil -} - func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error { By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) request := int64(1024 * 1024 * megabytes / replicas) @@ -1750,14 +1705,6 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa return nil } -// wrap runReplicatedPodOnEachNode to return cleanup -func runReplicatedPodOnEachNodeWithCleanup(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) (func(), error) { - err := runReplicatedPodOnEachNode(f, nodes, namespace, podsPerNode, id, labels, memRequest) - return func() { - framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, id) - }, err -} - // Increase cluster size by newNodesForScaledownTests to create some unused nodes // that can be later removed by cluster autoscaler. func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int { diff --git a/test/e2e/framework/providers/gce/recreate_node.go b/test/e2e/framework/providers/gce/recreate_node.go index 06a4c2e90c4..84affd39dd2 100644 --- a/test/e2e/framework/providers/gce/recreate_node.go +++ b/test/e2e/framework/providers/gce/recreate_node.go @@ -39,14 +39,6 @@ func nodeNames(nodes []v1.Node) []string { return result } -func podNames(pods []v1.Pod) []string { - result := make([]string, 0, len(pods)) - for i := range pods { - result = append(result, pods[i].Name) - } - return result -} - var _ = Describe("Recreate [Feature:Recreate]", func() { f := framework.NewDefaultFramework("recreate") var originalNodes []v1.Node diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index de97a30190e..e4c08b49546 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -858,29 +858,6 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress") } -func detectHTTPVersionAndSchemeTest(f *framework.Framework, jig *ingress.TestJig, address, version, scheme string) { - timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} - resp := "" - err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { - var err error - resp, err = framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "") - if err != nil { - framework.Logf("SimpleGET failed: %v", err) - return false, nil - } - if !strings.Contains(resp, version) { - framework.Logf("Waiting for transition to HTTP/2") - return false, nil - } - if !strings.Contains(resp, scheme) { - return false, nil - } - framework.Logf("Poll succeeded, request was served by HTTP2") - return true, nil - }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get %s or %s, response body: %s", version, scheme, resp)) -} - func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index e26d9dbfdd6..180dd4b6144 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -287,18 +287,6 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }) }) -func getPodPreset(c clientset.Interface, ns, name string) (*settings.PodPreset, error) { - return c.SettingsV1alpha1().PodPresets(ns).Get(name, metav1.GetOptions{}) -} - func createPodPreset(c clientset.Interface, ns string, job *settings.PodPreset) (*settings.PodPreset, error) { return c.SettingsV1alpha1().PodPresets(ns).Create(job) } - -func updatePodPreset(c clientset.Interface, ns string, job *settings.PodPreset) (*settings.PodPreset, error) { - return c.SettingsV1alpha1().PodPresets(ns).Update(job) -} - -func deletePodPreset(c clientset.Interface, ns, name string) error { - return c.SettingsV1alpha1().PodPresets(ns).Delete(name, nil) -} diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 3b44b4838c7..009376a75ed 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -57,9 +57,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e/storage/drivers/csi_objects.go b/test/e2e/storage/drivers/csi_objects.go index 9960da8add4..aa623e3e8f6 100644 --- a/test/e2e/storage/drivers/csi_objects.go +++ b/test/e2e/storage/drivers/csi_objects.go @@ -47,17 +47,6 @@ var ( } ) -func csiContainerImage(image string) string { - var fullName string - fullName += *csiImageRegistry + "/" + image + ":" - if *csiImageVersion != "" { - fullName += *csiImageVersion - } else { - fullName += csiImageVersions[image] - } - return fullName -} - func shredFile(filePath string) { if _, err := os.Stat(filePath); os.IsNotExist(err) { framework.Logf("File %v was not found, skipping shredding", filePath) diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 1c61c9bb820..f4b223bb65d 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -27,9 +27,6 @@ import ( . "github.com/onsi/ginkgo" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" - versionutil "k8s.io/apimachinery/pkg/util/version" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/version" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -144,24 +141,6 @@ func sshAndLog(cmd, host string, failOnError bool) { } } -func getMasterVersion(c clientset.Interface) (*versionutil.Version, error) { - var err error - var v *version.Info - waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - v, err = c.Discovery().ServerVersion() - return err == nil, nil - }) - if waitErr != nil { - return nil, fmt.Errorf("Could not get the master version: %v", waitErr) - } - - return versionutil.MustParseSemantic(v.GitVersion), nil -} - -func getNodeVersion(node *v1.Node) *versionutil.Version { - return versionutil.MustParseSemantic(node.Status.NodeInfo.KubeletVersion) -} - func getHostFromHostPort(hostPort string) string { // try to split host and port var host string diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 4df7aa96d90..15e10aaf30a 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -542,30 +542,6 @@ func getTwoRandomZones(c clientset.Interface) []string { return []string{zone1, zone2} } -// Waits for at least 1 replica of a StatefulSet to become not ready or until timeout occurs, whichever comes first. -func waitForStatefulSetReplicasNotReady(statefulSetName, ns string, c clientset.Interface) error { - const poll = 3 * time.Second - const timeout = statefulSetReadyTimeout - - framework.Logf("Waiting up to %v for StatefulSet %s to have at least 1 replica to become not ready", timeout, statefulSetName) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{}) - if err != nil { - framework.Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, poll, err) - continue - } else { - if sts.Status.ReadyReplicas < *sts.Spec.Replicas { - framework.Logf("%d replicas are ready out of a total of %d replicas in StatefulSet %s. (%v)", - sts.Status.ReadyReplicas, *sts.Spec.Replicas, statefulSetName, time.Since(start)) - return nil - } else { - framework.Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas) - } - } - } - return fmt.Errorf("All replicas in StatefulSet %s are still ready within %v", statefulSetName, timeout) -} - // If match is true, check if zones in PV exactly match zones given. // Otherwise, check whether zones in PV is superset of zones given. func verifyZonesInPV(volume *v1.PersistentVolume, zones sets.String, match bool) error { diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index d91d1a96119..121822dde05 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -641,14 +641,6 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1. } } -func clearSubpathPodCommands(pod *v1.Pod) { - pod.Spec.InitContainers[0].Command = nil - pod.Spec.InitContainers[1].Args = nil - pod.Spec.InitContainers[2].Args = nil - pod.Spec.Containers[0].Args = nil - pod.Spec.Containers[1].Args = nil -} - func setInitCommand(pod *v1.Pod, command string) { pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command} } diff --git a/test/e2e_node/docker_util.go b/test/e2e_node/docker_util.go index 2e84e7b6fc5..359e15cf2ec 100644 --- a/test/e2e_node/docker_util.go +++ b/test/e2e_node/docker_util.go @@ -53,16 +53,6 @@ func isSharedPIDNamespaceSupported() (bool, error) { return version.GTE(semver.MustParse("1.26.0")), nil } -// isDockerNoNewPrivilegesSupported returns true if Docker version is 1.11+ -// (API version 1.23+), and false otherwise. -func isDockerNoNewPrivilegesSupported() (bool, error) { - version, err := getDockerAPIVersion() - if err != nil { - return false, err - } - return version.GTE(semver.MustParse("1.23.0")), nil -} - // isDockerLiveRestoreSupported returns true if live-restore is supported in // the current Docker version. func isDockerLiveRestoreSupported() (bool, error) { diff --git a/test/e2e_node/remote/node_e2e.go b/test/e2e_node/remote/node_e2e.go index 63bc7dabbaa..2e5751b168f 100644 --- a/test/e2e_node/remote/node_e2e.go +++ b/test/e2e_node/remote/node_e2e.go @@ -85,23 +85,6 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error { return nil } -// dest is relative to the root of the tar -func tarAddFile(tar, source, dest string) error { - dir := filepath.Dir(dest) - tardir := filepath.Join(tar, dir) - tardest := filepath.Join(tar, dest) - - out, err := exec.Command("mkdir", "-p", tardir).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to create archive bin subdir %q, was dest for file %q. Err: %v. Output:\n%s", tardir, source, err, out) - } - out, err = exec.Command("cp", source, tardest).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to copy file %q to the archive bin subdir %q. Err: %v. Output:\n%s", source, tardir, err, out) - } - return nil -} - // prependCOSMounterFlag prepends the flag for setting the GCI mounter path to // args and returns the result. func prependCOSMounterFlag(args, host, workspace string) (string, error) { diff --git a/test/images/net/main.go b/test/images/net/main.go index 896bdca9d00..b3323221c74 100644 --- a/test/images/net/main.go +++ b/test/images/net/main.go @@ -33,11 +33,6 @@ import ( type runnerMap map[string]common.Runner -type runRequestJSON struct { - runner string - options interface{} -} - var ( // flags for the command line. See usage args below for // descriptions. @@ -157,6 +152,3 @@ func handleRunRequest(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "ok\noutput:\n\n"+output.b.String()) } - -func setupLogger() { -} diff --git a/test/images/netexec/netexec.go b/test/images/netexec/netexec.go index 8bd6f632bc1..f95b0c584f9 100644 --- a/test/images/netexec/netexec.go +++ b/test/images/netexec/netexec.go @@ -62,11 +62,6 @@ func (a *atomicBool) get() bool { return atomic.LoadInt32(&a.v) == 1 } -type output struct { - responses []string - errors []string -} - func init() { flag.IntVar(&httpPort, "http-port", 8080, "HTTP Listen Port") flag.IntVar(&udpPort, "udp-port", 8081, "UDP Listen Port") diff --git a/test/images/no-snat-test-proxy/main.go b/test/images/no-snat-test-proxy/main.go index 6646982b818..7b2c9547964 100644 --- a/test/images/no-snat-test-proxy/main.go +++ b/test/images/no-snat-test-proxy/main.go @@ -21,7 +21,6 @@ import ( "io/ioutil" "net/http" "os" - "strings" "github.com/spf13/pflag" cliflag "k8s.io/component-base/cli/flag" @@ -66,16 +65,6 @@ func (m *masqTestProxy) Run() error { return http.ListenAndServe(":"+m.Port, nil) } -type handler func(http.ResponseWriter, *http.Request) - -func joinErrors(errs []error, sep string) string { - strs := make([]string, len(errs)) - for i, err := range errs { - strs[i] = err.Error() - } - return strings.Join(strs, sep) -} - func checknosnatURL(pip, ips string) string { return fmt.Sprintf("http://%s/checknosnat?ips=%s", pip, ips) } diff --git a/test/integration/master/transformation_testcase.go b/test/integration/master/transformation_testcase.go index 50d5c1ab5bd..4a59f05343a 100644 --- a/test/integration/master/transformation_testcase.go +++ b/test/integration/master/transformation_testcase.go @@ -258,12 +258,3 @@ func (e *transformTest) printMetrics() error { return nil } - -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 91b999a9567..70cc08c2479 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -589,17 +589,6 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond } } -// podUnschedulable returns a condition function that returns true if the given pod -// gets unschedulable status. -func podSchedulableCondition(c clientset.Interface, podNamespace, podName string) (*v1.PodCondition, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - _, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - return cond, nil -} - // podUnschedulable returns a condition function that returns true if the given pod // gets unschedulable status. func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { diff --git a/test/integration/statefulset/util.go b/test/integration/statefulset/util.go index 0accec21302..bf866f7db90 100644 --- a/test/integration/statefulset/util.go +++ b/test/integration/statefulset/util.go @@ -48,13 +48,6 @@ const ( fakeImage = "fakeimage" ) -type statefulsetTester struct { - t *testing.T - c clientset.Interface - service *v1.Service - statefulset *appsv1.StatefulSet -} - func labelMap() map[string]string { return map[string]string{"foo": "bar"} } From 9870475c3dd2aa1842e490f2e1c9444151f52c49 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 19 Apr 2019 15:30:55 +0200 Subject: [PATCH 087/209] Add soltysh and pwittrock to sig-cli-api-reviewers --- OWNERS_ALIASES | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 1ca190d7e32..5446f6c4f6f 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -359,10 +359,11 @@ aliases: # - # - - # sig-cli-api-reviewers: - # - - # - + sig-cli-api-reviewers: + - pwittrock + - soltysh + sig-cloud-provider-api-reviewers: - andrewsykim - cheftako From 14775c4057b3f7e930338f08f9af25f5d36a5612 Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 22:19:41 +0800 Subject: [PATCH 088/209] fix shellcheck failures of hack/grab-profiles.sh --- hack/.shellcheck_failures | 1 - hack/grab-profiles.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 3802b45be93..14a58294a54 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -26,7 +26,6 @@ ./cluster/validate-cluster.sh ./hack/cherry_pick_pull.sh ./hack/ginkgo-e2e.sh -./hack/grab-profiles.sh ./hack/lib/init.sh ./hack/lib/swagger.sh ./hack/lib/test.sh diff --git a/hack/grab-profiles.sh b/hack/grab-profiles.sh index e9b4b09f536..b0a9bc08904 100755 --- a/hack/grab-profiles.sh +++ b/hack/grab-profiles.sh @@ -47,7 +47,7 @@ function grab_profiles_from_component { done } -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" server_addr="" @@ -65,7 +65,7 @@ output_dir="." tunnel_port="${tunnel_port:-1234}" args=$(getopt -o s:mho:k:c -l server:,master,heapster,output:,kubelet:,scheduler,controller-manager,help,inuse-space,inuse-objects,alloc-space,alloc-objects,cpu,kubelet-binary:,master-binary:,scheduler-binary:,controller-manager-binary:,scheduler-port:,controller-manager-port: -- "$@") -if [[ $? -ne 0 ]]; then +if [[ $? ]]; then >&2 echo "Error in getopt" exit 1 fi From a20aec8d2ddccdcdc654951801098c9ff069cdbf Mon Sep 17 00:00:00 2001 From: SataQiu Date: Fri, 19 Apr 2019 23:28:34 +0800 Subject: [PATCH 089/209] fix shellcheck failures of hack/verify-openapi-spec.sh --- hack/.shellcheck_failures | 1 - hack/verify-openapi-spec.sh | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 3802b45be93..869f3fdd03a 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -47,7 +47,6 @@ ./hack/verify-codegen.sh ./hack/verify-golint.sh ./hack/verify-no-vendor-cycles.sh -./hack/verify-openapi-spec.sh ./hack/verify-readonly-packages.sh ./hack/verify-test-featuregates.sh ./test/cmd/apply.sh diff --git a/hack/verify-openapi-spec.sh b/hack/verify-openapi-spec.sh index 38501e821d9..6965a6754ef 100755 --- a/hack/verify-openapi-spec.sh +++ b/hack/verify-openapi-spec.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env @@ -32,10 +32,10 @@ _tmp="${KUBE_ROOT}/_tmp" mkdir -p "${_tmp}" cp -a "${SPECROOT}" "${TMP_SPECROOT}" -trap "cp -a ${TMP_SPECROOT} ${SPECROOT}/..; rm -rf ${_tmp}" EXIT SIGINT -rm ${SPECROOT}/* -cp ${TMP_SPECROOT}/BUILD ${SPECROOT}/BUILD -cp ${TMP_SPECROOT}/README.md ${SPECROOT}/README.md +trap 'cp -a "${TMP_SPECROOT}" "${SPECROOT}"/..; rm -rf "${_tmp}"' EXIT SIGINT +rm "${SPECROOT}"/* +cp "${TMP_SPECROOT}/BUILD" "${SPECROOT}/BUILD" +cp "${TMP_SPECROOT}/README.md" "${SPECROOT}/README.md" "${KUBE_ROOT}/hack/update-openapi-spec.sh" echo "diffing ${SPECROOT} against freshly generated openapi spec" From 580513ed668065a7266a1aa22a4531f6a63b6989 Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Thu, 18 Apr 2019 16:31:20 +0300 Subject: [PATCH 090/209] kubeadm: drop duplicate function NewCACertAndKey The function certs.NewCACertAndKey() is just a wrapper around pkiutil.NewCertificateAuthority() which doesn't add any additional functionality. Instead use pkiutil.NewCertificateAuthority() directly. --- cmd/kubeadm/app/phases/certs/certlist.go | 4 ++-- cmd/kubeadm/app/phases/certs/certs.go | 13 +------------ cmd/kubeadm/app/phases/certs/certs_test.go | 10 ---------- cmd/kubeadm/app/phases/certs/renewal/BUILD | 1 - .../phases/certs/renewal/filerenewal_test.go | 4 ++-- .../app/phases/certs/renewal/renewal_test.go | 5 ++--- cmd/kubeadm/app/util/certs/util.go | 7 ------- cmd/kubeadm/app/util/pkiutil/pki_helpers.go | 4 ++-- .../app/util/pkiutil/pki_helpers_test.go | 17 +++++++---------- 9 files changed, 16 insertions(+), 49 deletions(-) diff --git a/cmd/kubeadm/app/phases/certs/certlist.go b/cmd/kubeadm/app/phases/certs/certlist.go index ec969d5e4da..08d42046cfb 100644 --- a/cmd/kubeadm/app/phases/certs/certlist.go +++ b/cmd/kubeadm/app/phases/certs/certlist.go @@ -85,7 +85,7 @@ func (k *KubeadmCert) CreateAsCA(ic *kubeadmapi.InitConfiguration) (*x509.Certif if err != nil { return nil, nil, errors.Wrapf(err, "couldn't get configuration for %q CA certificate", k.Name) } - caCert, caKey, err := NewCACertAndKey(cfg) + caCert, caKey, err := pkiutil.NewCertificateAuthority(cfg) if err != nil { return nil, nil, errors.Wrapf(err, "couldn't generate %q CA certificate", k.Name) } @@ -141,7 +141,7 @@ func (t CertificateTree) CreateTree(ic *kubeadmapi.InitConfiguration) error { // CA key exists; just use that to create new certificates. } else { // CACert doesn't already exist, create a new cert and key. - caCert, caKey, err = NewCACertAndKey(cfg) + caCert, caKey, err = pkiutil.NewCertificateAuthority(cfg) if err != nil { return err } diff --git a/cmd/kubeadm/app/phases/certs/certs.go b/cmd/kubeadm/app/phases/certs/certs.go index 2cac3f16e72..6d3da11d63e 100644 --- a/cmd/kubeadm/app/phases/certs/certs.go +++ b/cmd/kubeadm/app/phases/certs/certs.go @@ -90,17 +90,6 @@ func NewServiceAccountSigningKey() (*rsa.PrivateKey, error) { return saSigningKey, nil } -// NewCACertAndKey will generate a self signed CA. -func NewCACertAndKey(certSpec *certutil.Config) (*x509.Certificate, *rsa.PrivateKey, error) { - - caCert, caKey, err := pkiutil.NewCertificateAuthority(certSpec) - if err != nil { - return nil, nil, errors.Wrap(err, "failure while generating CA certificate and key") - } - - return caCert, caKey, nil -} - // CreateCACertAndKeyFiles generates and writes out a given certificate authority. // The certSpec should be one of the variables from this package. func CreateCACertAndKeyFiles(certSpec *KubeadmCert, cfg *kubeadmapi.InitConfiguration) error { @@ -114,7 +103,7 @@ func CreateCACertAndKeyFiles(certSpec *KubeadmCert, cfg *kubeadmapi.InitConfigur return err } - caCert, caKey, err := NewCACertAndKey(certConfig) + caCert, caKey, err := pkiutil.NewCertificateAuthority(certConfig) if err != nil { return err } diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index a77a33f95de..8924c72bd49 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -370,16 +370,6 @@ func TestWriteKeyFilesIfNotExist(t *testing.T) { } } -func TestNewCACertAndKey(t *testing.T) { - certCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, _, err := NewCACertAndKey(certCfg) - if err != nil { - t.Fatalf("failed call NewCACertAndKey: %v", err) - } - - certstestutil.AssertCertificateIsCa(t, caCert) -} - func TestSharedCertificateExists(t *testing.T) { caCert, caKey := certstestutil.CreateCACert(t) _, key, _ := certstestutil.CreateTestCert(t, caCert, caKey, certutil.AltNames{}) diff --git a/cmd/kubeadm/app/phases/certs/renewal/BUILD b/cmd/kubeadm/app/phases/certs/renewal/BUILD index 98da00fd170..626aad15750 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/BUILD +++ b/cmd/kubeadm/app/phases/certs/renewal/BUILD @@ -30,7 +30,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/app/util/certs:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//cmd/kubeadm/test:go_default_library", diff --git a/cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go b/cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go index 3c8a9e58f35..29d92e78c30 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go +++ b/cmd/kubeadm/app/phases/certs/renewal/filerenewal_test.go @@ -21,12 +21,12 @@ import ( "testing" certutil "k8s.io/client-go/util/cert" - "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" ) func TestFileRenew(t *testing.T) { caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := certs.NewCACertAndKey(caCertCfg) + caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) if err != nil { t.Fatalf("couldn't create CA: %v", err) } diff --git a/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go b/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go index bf651e00d0a..0d9c6df54a1 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go +++ b/cmd/kubeadm/app/phases/certs/renewal/renewal_test.go @@ -32,7 +32,6 @@ import ( fakecerts "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" k8stesting "k8s.io/client-go/testing" certutil "k8s.io/client-go/util/cert" - "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" testutil "k8s.io/kubernetes/cmd/kubeadm/test" @@ -40,7 +39,7 @@ import ( func TestRenewImplementations(t *testing.T) { caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := certs.NewCACertAndKey(caCertCfg) + caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) if err != nil { t.Fatalf("couldn't create CA: %v", err) } @@ -198,7 +197,7 @@ func TestRenewExistingCert(t *testing.T) { } caCertCfg := &certutil.Config{CommonName: "kubernetes"} - caCert, caKey, err := certs.NewCACertAndKey(caCertCfg) + caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg) if err != nil { t.Fatalf("couldn't create CA: %v", err) } diff --git a/cmd/kubeadm/app/util/certs/util.go b/cmd/kubeadm/app/util/certs/util.go index bf0d0210b71..cc0731f87e3 100644 --- a/cmd/kubeadm/app/util/certs/util.go +++ b/cmd/kubeadm/app/util/certs/util.go @@ -39,13 +39,6 @@ func SetupCertificateAuthorithy(t *testing.T) (*x509.Certificate, *rsa.PrivateKe return caCert, caKey } -// AssertCertificateIsCa is a utility function for kubeadm testing that asserts if a given certificate is a CA -func AssertCertificateIsCa(t *testing.T, cert *x509.Certificate) { - if !cert.IsCA { - t.Error("cert is not a valida CA") - } -} - // AssertCertificateIsSignedByCa is a utility function for kubeadm testing that asserts if a given certificate is signed // by the expected CA func AssertCertificateIsSignedByCa(t *testing.T, cert *x509.Certificate, signingCa *x509.Certificate) { diff --git a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go index 06f15c06774..6f68a01cbc8 100644 --- a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go +++ b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go @@ -61,12 +61,12 @@ const ( func NewCertificateAuthority(config *certutil.Config) (*x509.Certificate, *rsa.PrivateKey, error) { key, err := NewPrivateKey() if err != nil { - return nil, nil, errors.Wrap(err, "unable to create private key") + return nil, nil, errors.Wrap(err, "unable to create private key while generating CA certificate") } cert, err := certutil.NewSelfSignedCACert(*config, key) if err != nil { - return nil, nil, errors.Wrap(err, "unable to create self-signed certificate") + return nil, nil, errors.Wrap(err, "unable to create self-signed CA certificate") } return cert, key, nil diff --git a/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go b/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go index 3d162e6e30d..b4542a2ecc6 100644 --- a/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go +++ b/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go @@ -33,20 +33,17 @@ func TestNewCertificateAuthority(t *testing.T) { cert, key, err := NewCertificateAuthority(&certutil.Config{CommonName: "kubernetes"}) if cert == nil { - t.Errorf( - "failed NewCertificateAuthority, cert == nil", - ) + t.Error("failed NewCertificateAuthority, cert == nil") + } else if !cert.IsCA { + t.Error("cert is not a valida CA") } + if key == nil { - t.Errorf( - "failed NewCertificateAuthority, key == nil", - ) + t.Error("failed NewCertificateAuthority, key == nil") } + if err != nil { - t.Errorf( - "failed NewCertificateAuthority with an error: %v", - err, - ) + t.Errorf("failed NewCertificateAuthority with an error: %+v", err) } } From 42fcd5eb635dad664f5b136401c15718a7309ed7 Mon Sep 17 00:00:00 2001 From: Jake Sanders Date: Fri, 19 Apr 2019 17:40:28 +0000 Subject: [PATCH 091/209] remove erroneous kube-apiserver.manifest sed line --- cluster/gce/gci/configure-helper.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 655f493785e..70ebda6e04e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1882,7 +1882,6 @@ function start-kube-apiserver { sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}" sed -i -e "s@{{secure_port}}@443@g" "${src_file}" - sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}" From e05f85a6116a8c8056a9d8224de3a10543a3a3ac Mon Sep 17 00:00:00 2001 From: aaa <1693291525@qq.com> Date: Fri, 19 Apr 2019 14:59:50 -0400 Subject: [PATCH 092/209] Fix shellcheck failures on verify-readonly-packages.sh --- hack/.shellcheck_failures | 1 - hack/verify-readonly-packages.sh | 11 ++++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 3eac39a72b3..55ef10e6d69 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -47,7 +47,6 @@ ./hack/verify-golint.sh ./hack/verify-no-vendor-cycles.sh ./hack/verify-openapi-spec.sh -./hack/verify-readonly-packages.sh ./hack/verify-test-featuregates.sh ./test/cmd/apply.sh ./test/cmd/apps.sh diff --git a/hack/verify-readonly-packages.sh b/hack/verify-readonly-packages.sh index 014984c904e..56d59cc7f01 100755 --- a/hack/verify-readonly-packages.sh +++ b/hack/verify-readonly-packages.sh @@ -22,7 +22,7 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" readonly branch=${1:-${KUBE_VERIFY_GIT_BRANCH:-master}} @@ -44,12 +44,17 @@ find_files() { } IFS=$'\n' -conflicts=($(find_files | sed 's|/.readonly||' | while read dir; do + +conflicts=() +while IFS=$'\n' read -r line; do + conflicts+=( "$line" ) +done < <(find_files | sed 's|/.readonly||' | while read -r dir; do dir=${dir#./} if kube::util::has_changes "${branch}" "^${dir}/[^/]*\$" '/\.readonly$|/BUILD$|/zz_generated|/\.generated\.|\.proto$|\.pb\.go$' >/dev/null; then echo "${dir}" fi -done)) +done) + unset IFS if [ ${#conflicts[@]} -gt 0 ]; then From ad5bccc510f6a9072251a5201442fc031cf86744 Mon Sep 17 00:00:00 2001 From: aaa <1693291525@qq.com> Date: Mon, 18 Mar 2019 06:36:03 -0400 Subject: [PATCH 093/209] fix shellcheck in test-smoke.sh and test-network.sh update pull request update pull request update pull request update pull request --- cluster/test-network.sh | 3 +-- cluster/test-smoke.sh | 4 +--- hack/.shellcheck_failures | 2 -- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/cluster/test-network.sh b/cluster/test-network.sh index cd0d159e8ef..acc51742844 100755 --- a/cluster/test-network.sh +++ b/cluster/test-network.sh @@ -25,6 +25,5 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -KUBE_CONFIG_FILE="config-default.sh" -${KUBE_ROOT}/hack/ginkgo-e2e.sh --ginkgo.focus=Networking +"${KUBE_ROOT}/hack/ginkgo-e2e.sh" --ginkgo.focus=Networking diff --git a/cluster/test-smoke.sh b/cluster/test-smoke.sh index b5e6d34f92d..fdedf37a165 100755 --- a/cluster/test-smoke.sh +++ b/cluster/test-smoke.sh @@ -25,8 +25,6 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -TEST_ARGS="$@" - SMOKE_TEST_FOCUS_REGEX="Guestbook.application" -exec "${KUBE_ROOT}/cluster/test-e2e.sh" -ginkgo.focus="${SMOKE_TEST_FOCUS_REGEX}" ${TEST_ARGS} +exec "${KUBE_ROOT}/cluster/test-e2e.sh" -ginkgo.focus="${SMOKE_TEST_FOCUS_REGEX}" "$@" diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 55ef10e6d69..99ea9187da7 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -20,8 +20,6 @@ ./cluster/pre-existing/util.sh ./cluster/restore-from-backup.sh ./cluster/test-e2e.sh -./cluster/test-network.sh -./cluster/test-smoke.sh ./cluster/validate-cluster.sh ./hack/cherry_pick_pull.sh ./hack/ginkgo-e2e.sh From d94c5bac47686644fb48b416f63d9be214f5a86f Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 19 Apr 2019 22:30:44 +0200 Subject: [PATCH 094/209] kubeadm-cleanup-unused-func --- cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go | 15 --------------- .../app/phases/kubeconfig/kubeconfig_test.go | 10 ---------- 2 files changed, 25 deletions(-) diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index 485178d5817..666b39da475 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -59,21 +59,6 @@ type kubeConfigSpec struct { ClientCertAuth *clientCertAuth } -// CreateInitKubeConfigFiles will create and write to disk all kubeconfig files necessary in the kubeadm init phase -// to establish the control plane, including also the admin kubeconfig file. -// If kubeconfig files already exists, they are used only if evaluated equal; otherwise an error is returned. -func CreateInitKubeConfigFiles(outDir string, cfg *kubeadmapi.InitConfiguration) error { - klog.V(1).Infoln("creating all kubeconfig files") - return createKubeConfigFiles( - outDir, - cfg, - kubeadmconstants.AdminKubeConfigFileName, - kubeadmconstants.KubeletKubeConfigFileName, - kubeadmconstants.ControllerManagerKubeConfigFileName, - kubeadmconstants.SchedulerKubeConfigFileName, - ) -} - // CreateJoinControlPlaneKubeConfigFiles will create and write to disk the kubeconfig files required by kubeadm // join --control-plane workflow, plus the admin kubeconfig file used by the administrator and kubeadm itself; the // kubelet.conf file must not be created because it will be created and signed by the kubelet TLS bootstrap process. diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go index 6c6cccc32c6..bf52c353cfe 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go @@ -289,16 +289,6 @@ func TestCreateKubeconfigFilesAndWrappers(t *testing.T) { }, expectedError: true, }, - { // Test CreateInitKubeConfigFiles (wrapper to createKubeConfigFile) - name: "CreateInitKubeConfigFiles", - createKubeConfigFunction: CreateInitKubeConfigFiles, - expectedFiles: []string{ - kubeadmconstants.AdminKubeConfigFileName, - kubeadmconstants.KubeletKubeConfigFileName, - kubeadmconstants.ControllerManagerKubeConfigFileName, - kubeadmconstants.SchedulerKubeConfigFileName, - }, - }, { // Test CreateJoinControlPlaneKubeConfigFiles (wrapper to createKubeConfigFile) name: "CreateJoinControlPlaneKubeConfigFiles", createKubeConfigFunction: CreateJoinControlPlaneKubeConfigFiles, From d4d5afb9cc42d99d9922d4d6edbf42b30ff6d86c Mon Sep 17 00:00:00 2001 From: Jake Sanders Date: Thu, 18 Apr 2019 19:01:37 +0000 Subject: [PATCH 095/209] add additional approvers from GKE --- cluster/gce/OWNERS | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cluster/gce/OWNERS b/cluster/gce/OWNERS index eef8316838b..8dffe265b8e 100644 --- a/cluster/gce/OWNERS +++ b/cluster/gce/OWNERS @@ -2,6 +2,7 @@ reviewers: - bowei + - cjcullen - gmarek - jszczepkowski - vishh @@ -9,8 +10,10 @@ reviewers: - MaciekPytel - jingax10 - yujuhong + - zmerlynn approvers: - bowei + - cjcullen - gmarek - jszczepkowski - vishh @@ -18,3 +21,4 @@ approvers: - MaciekPytel - jingax10 - yujuhong + - zmerlynn From 51149dad31cbc09a8838934fb6a9c6c89f2a9ef1 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Wed, 17 Apr 2019 14:39:49 -0700 Subject: [PATCH 096/209] Fixed a kubemark panic when hollow-node is morphed as proxy --- cmd/kubemark/hollow-node.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/kubemark/hollow-node.go b/cmd/kubemark/hollow-node.go index 5309b16005f..685c36c0ca8 100644 --- a/cmd/kubemark/hollow-node.go +++ b/cmd/kubemark/hollow-node.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "errors" goflag "flag" "fmt" "math/rand" @@ -185,7 +186,9 @@ func run(config *hollowNodeConfig) { } iptInterface := fakeiptables.NewFake() sysctl := fakesysctl.NewFake() - execer := &fakeexec.FakeExec{} + execer := &fakeexec.FakeExec{ + LookPathFunc: func(_ string) (string, error) { return "", errors.New("fake execer") }, + } eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "kube-proxy", Host: config.NodeName}) From 05074e9dde8c7c7655725cc6b2eb702d4c67cdd2 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Wed, 17 Apr 2019 16:50:37 -0700 Subject: [PATCH 097/209] GCE/Windows: add cluster-location to instance metadata This will be used by stackdriver to annotate the logs. --- cluster/gce/windows/node-helper.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/gce/windows/node-helper.sh b/cluster/gce/windows/node-helper.sh index 1f62911c0e5..f1ad723bacf 100755 --- a/cluster/gce/windows/node-helper.sh +++ b/cluster/gce/windows/node-helper.sh @@ -19,6 +19,7 @@ function get-windows-node-instance-metadata-from-file { local metadata="" metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt," + metadata+="cluster-location=${KUBE_TEMP}/cluster-location.txt," metadata+="kube-env=${KUBE_TEMP}/windows-node-kube-env.yaml," metadata+="kubelet-config=${KUBE_TEMP}/windows-node-kubelet-config.yaml," # To get startup script output run "gcloud compute instances From 4cd26257199fb76549d72f8e0e2a9abde2ea4d7c Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Wed, 17 Apr 2019 16:54:34 -0700 Subject: [PATCH 098/209] GCE/Windows: enable stackdriver logging agent This change bumps the stackdriver logging agent version to v1-9, re-enable it, and change the script/configuration to: * Create /var/log in the startup script, since the fluentd configuration expects the directory to exists * Add support for collecting kubelet/kube-proxy logs --- cluster/gce/windows/configure.ps1 | 3 +- cluster/gce/windows/k8s-node-setup.psm1 | 49 ++++++++++++++++++++++--- 2 files changed, 44 insertions(+), 8 deletions(-) diff --git a/cluster/gce/windows/configure.ps1 b/cluster/gce/windows/configure.ps1 index a3d9d896be8..413a1d615b8 100644 --- a/cluster/gce/windows/configure.ps1 +++ b/cluster/gce/windows/configure.ps1 @@ -112,8 +112,7 @@ try { Set-EnvironmentVars Create-Directories Download-HelperScripts - # Disable Stackdrver logging until issue is fixed. - # InstallAndStart-LoggingAgent + InstallAndStart-LoggingAgent Create-DockerRegistryKey DownloadAndInstall-KubernetesBinaries diff --git a/cluster/gce/windows/k8s-node-setup.psm1 b/cluster/gce/windows/k8s-node-setup.psm1 index 9abd5bab4d0..ac521b38853 100644 --- a/cluster/gce/windows/k8s-node-setup.psm1 +++ b/cluster/gce/windows/k8s-node-setup.psm1 @@ -270,11 +270,13 @@ function Disable-WindowsDefender { # Creates directories where other functions in this module will read and write # data. # Note: C:\tmp is required for running certain kubernetes tests. +# C:\var\log is used by kubelet to stored container logs and also +# hard-coded in the fluentd/stackdriver config for log collection. function Create-Directories { Log-Output "Creating ${env:K8S_DIR} and its subdirectories." ForEach ($dir in ("${env:K8S_DIR}", "${env:NODE_DIR}", "${env:LOGS_DIR}", "${env:CNI_DIR}", "${env:CNI_CONFIG_DIR}", "${env:MANIFESTS_DIR}", - "${env:PKI_DIR}"), "C:\tmp") { + "${env:PKI_DIR}"), "C:\tmp", "C:\var\log") { mkdir -Force $dir } } @@ -1059,7 +1061,7 @@ function Create-DockerRegistryKey { # TODO(pjh): move the Stackdriver logging agent code below into a separate # module; it was put here temporarily to avoid disrupting the file layout in # the K8s release machinery. -$STACKDRIVER_VERSION = 'v1-8' +$STACKDRIVER_VERSION = 'v1-9' $STACKDRIVER_ROOT = 'C:\Program Files (x86)\Stackdriver' # Install and start the Stackdriver logging agent according to @@ -1123,9 +1125,6 @@ function InstallAndStart-LoggingAgent { Remove-Item -Force -Recurse $tmp_dir } -# TODO(yujuhong): -# - Collect kubelet/kube-proxy logs. -# - Add tag for kubernetes node name. $FLUENTD_CONFIG = @' # This configuration file for Fluentd is used to watch changes to kubernetes # container logs in the directory /var/lib/docker/containers/ and submit the @@ -1184,6 +1183,34 @@ $FLUENTD_CONFIG = @' read_from_head true +# Example: +# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537] + + @type tail + format multiline + multiline_flush_interval 5s + format_firstline /^\w\d{4}/ + format1 /^(?\w)(?