mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
Use framework.ExpectNoError() instead Expect()
The e2e test framework has ExpectNoError() for readable test code. This replaces Expect(err).NotTo(HaveOccurred()) with it.
This commit is contained in:
parent
596406581e
commit
2635b6d95c
@ -53,7 +53,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
|||||||
Logf("ExecWithOptions %+v", options)
|
Logf("ExecWithOptions %+v", options)
|
||||||
|
|
||||||
config, err := LoadConfig()
|
config, err := LoadConfig()
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to load restclient config")
|
ExpectNoError(err, "failed to load restclient config")
|
||||||
|
|
||||||
const tty = false
|
const tty = false
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
|
|||||||
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
||||||
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||||
Logf("Exec stderr: %q", stderr)
|
Logf("Exec stderr: %q", stderr)
|
||||||
Expect(err).NotTo(HaveOccurred(),
|
ExpectNoError(err,
|
||||||
"failed to execute command in pod %v, container %v: %v",
|
"failed to execute command in pod %v, container %v: %v",
|
||||||
podName, containerName, err)
|
podName, containerName, err)
|
||||||
return stdout
|
return stdout
|
||||||
@ -113,14 +113,14 @@ func (f *Framework) ExecShellInContainer(podName, containerName string, cmd stri
|
|||||||
|
|
||||||
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
|
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
|
||||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
ExpectNoError(err, "failed to get pod")
|
||||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||||
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
||||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
ExpectNoError(err, "failed to get pod")
|
||||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||||
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ func (f *Framework) BeforeEach() {
|
|||||||
componentTexts)
|
componentTexts)
|
||||||
}
|
}
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
config.QPS = f.Options.ClientQPS
|
config.QPS = f.Options.ClientQPS
|
||||||
config.Burst = f.Options.ClientBurst
|
config.Burst = f.Options.ClientBurst
|
||||||
if f.Options.GroupVersion != nil {
|
if f.Options.GroupVersion != nil {
|
||||||
@ -185,23 +185,23 @@ func (f *Framework) BeforeEach() {
|
|||||||
config.ContentType = TestContext.KubeAPIContentType
|
config.ContentType = TestContext.KubeAPIContentType
|
||||||
}
|
}
|
||||||
f.ClientSet, err = clientset.NewForConfig(config)
|
f.ClientSet, err = clientset.NewForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
|
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
// csi.storage.k8s.io is based on CRD, which is served only as JSON
|
// csi.storage.k8s.io is based on CRD, which is served only as JSON
|
||||||
jsonConfig := config
|
jsonConfig := config
|
||||||
jsonConfig.ContentType = "application/json"
|
jsonConfig.ContentType = "application/json"
|
||||||
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
|
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
// node.k8s.io is also based on CRD
|
// node.k8s.io is also based on CRD
|
||||||
f.NodeAPIClientSet, err = nodeapiclient.NewForConfig(jsonConfig)
|
f.NodeAPIClientSet, err = nodeapiclient.NewForConfig(jsonConfig)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
|
|
||||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||||
// as they are required when creating a REST client.
|
// as they are required when creating a REST client.
|
||||||
@ -212,9 +212,9 @@ func (f *Framework) BeforeEach() {
|
|||||||
config.NegotiatedSerializer = legacyscheme.Codecs
|
config.NegotiatedSerializer = legacyscheme.Codecs
|
||||||
}
|
}
|
||||||
restClient, err := rest.RESTClientFor(config)
|
restClient, err := rest.RESTClientFor(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||||
restMapper.Reset()
|
restMapper.Reset()
|
||||||
@ -229,14 +229,14 @@ func (f *Framework) BeforeEach() {
|
|||||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||||
"e2e-framework": f.BaseName,
|
"e2e-framework": f.BaseName,
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
|
|
||||||
f.Namespace = namespace
|
f.Namespace = namespace
|
||||||
|
|
||||||
if TestContext.VerifyServiceAccount {
|
if TestContext.VerifyServiceAccount {
|
||||||
By("Waiting for a default service account to be provisioned in namespace")
|
By("Waiting for a default service account to be provisioned in namespace")
|
||||||
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
Logf("Skipping waiting for service account")
|
Logf("Skipping waiting for service account")
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,6 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,7 +51,7 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
|
|||||||
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
||||||
func NVIDIADevicePlugin() *v1.Pod {
|
func NVIDIADevicePlugin() *v1.Pod {
|
||||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
p := &v1.Pod{
|
p := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||||
|
@ -22,7 +22,6 @@ go_library(
|
|||||||
"//test/e2e/manifest:go_default_library",
|
"//test/e2e/manifest:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
|
||||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -54,7 +54,6 @@ import (
|
|||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -760,7 +759,7 @@ func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string]
|
|||||||
svcPorts := make(map[string]v1.ServicePort)
|
svcPorts := make(map[string]v1.ServicePort)
|
||||||
if includeDefaultBackend {
|
if includeDefaultBackend {
|
||||||
defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0]
|
svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -775,7 +774,7 @@ func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string]
|
|||||||
}
|
}
|
||||||
for _, svcName := range backendSvcs {
|
for _, svcName := range backendSvcs {
|
||||||
svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{})
|
svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
svcPorts[svcName] = svc.Spec.Ports[0]
|
svcPorts[svcName] = svc.Spec.Ports[0]
|
||||||
}
|
}
|
||||||
return svcPorts
|
return svcPorts
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -507,7 +506,7 @@ func (config *NetworkingTestConfig) createSessionAffinityService(selector map[st
|
|||||||
|
|
||||||
func (config *NetworkingTestConfig) DeleteNodePortService() {
|
func (config *NetworkingTestConfig) DeleteNodePortService() {
|
||||||
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
|
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
|
||||||
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
|
ExpectNoError(err, "error while deleting NodePortService. err:%v)", err)
|
||||||
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
|
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -535,13 +534,13 @@ func (config *NetworkingTestConfig) createTestPods() {
|
|||||||
|
|
||||||
func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
|
func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
|
||||||
_, err := config.getServiceClient().Create(serviceSpec)
|
_, err := config.getServiceClient().Create(serviceSpec)
|
||||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
||||||
|
|
||||||
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
|
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
|
ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
|
||||||
|
|
||||||
createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
|
createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
||||||
|
|
||||||
return createdService
|
return createdService
|
||||||
}
|
}
|
||||||
@ -705,7 +704,7 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does an HTTP GET, but does not reuse TCP connections
|
// Does an HTTP GET, but does not reuse TCP connections
|
||||||
|
@ -20,7 +20,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
|
||||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||||
|
@ -31,8 +31,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -118,7 +116,7 @@ func GetClusterName(instancePrefix string) string {
|
|||||||
// From cluster/gce/util.sh, all firewall rules should be consistent with the ones created by startup scripts.
|
// From cluster/gce/util.sh, all firewall rules should be consistent with the ones created by startup scripts.
|
||||||
func GetE2eFirewalls(masterName, masterTag, nodeTag, network, clusterIpRange string) []*compute.Firewall {
|
func GetE2eFirewalls(masterName, masterTag, nodeTag, network, clusterIpRange string) []*compute.Firewall {
|
||||||
instancePrefix, err := GetInstancePrefix(masterName)
|
instancePrefix, err := GetInstancePrefix(masterName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
clusterName := GetClusterName(instancePrefix)
|
clusterName := GetClusterName(instancePrefix)
|
||||||
|
|
||||||
fws := []*compute.Firewall{}
|
fws := []*compute.Firewall{}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@ -137,7 +136,7 @@ func (cont *GCEIngressController) ListGlobalForwardingRules() []*compute.Forward
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
fwdList := []*compute.ForwardingRule{}
|
fwdList := []*compute.ForwardingRule{}
|
||||||
l, err := gceCloud.ListGlobalForwardingRules()
|
l, err := gceCloud.ListGlobalForwardingRules()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, fwd := range l {
|
for _, fwd := range l {
|
||||||
if cont.isOwned(fwd.Name) {
|
if cont.isOwned(fwd.Name) {
|
||||||
fwdList = append(fwdList, fwd)
|
fwdList = append(fwdList, fwd)
|
||||||
@ -171,7 +170,7 @@ func (cont *GCEIngressController) deleteForwardingRule(del bool) string {
|
|||||||
func (cont *GCEIngressController) GetGlobalAddress(ipName string) *compute.Address {
|
func (cont *GCEIngressController) GetGlobalAddress(ipName string) *compute.Address {
|
||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
ip, err := gceCloud.GetGlobalAddress(ipName)
|
ip, err := gceCloud.GetGlobalAddress(ipName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
return ip
|
return ip
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +198,7 @@ func (cont *GCEIngressController) ListTargetHttpProxies() []*compute.TargetHttpP
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
tpList := []*compute.TargetHttpProxy{}
|
tpList := []*compute.TargetHttpProxy{}
|
||||||
l, err := gceCloud.ListTargetHTTPProxies()
|
l, err := gceCloud.ListTargetHTTPProxies()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, tp := range l {
|
for _, tp := range l {
|
||||||
if cont.isOwned(tp.Name) {
|
if cont.isOwned(tp.Name) {
|
||||||
tpList = append(tpList, tp)
|
tpList = append(tpList, tp)
|
||||||
@ -212,7 +211,7 @@ func (cont *GCEIngressController) ListTargetHttpsProxies() []*compute.TargetHttp
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
tpsList := []*compute.TargetHttpsProxy{}
|
tpsList := []*compute.TargetHttpsProxy{}
|
||||||
l, err := gceCloud.ListTargetHTTPSProxies()
|
l, err := gceCloud.ListTargetHTTPSProxies()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, tps := range l {
|
for _, tps := range l {
|
||||||
if cont.isOwned(tps.Name) {
|
if cont.isOwned(tps.Name) {
|
||||||
tpsList = append(tpsList, tps)
|
tpsList = append(tpsList, tps)
|
||||||
@ -260,7 +259,7 @@ func (cont *GCEIngressController) ListUrlMaps() []*compute.UrlMap {
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
umList := []*compute.UrlMap{}
|
umList := []*compute.UrlMap{}
|
||||||
l, err := gceCloud.ListURLMaps()
|
l, err := gceCloud.ListURLMaps()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, um := range l {
|
for _, um := range l {
|
||||||
if cont.isOwned(um.Name) {
|
if cont.isOwned(um.Name) {
|
||||||
umList = append(umList, um)
|
umList = append(umList, um)
|
||||||
@ -302,7 +301,7 @@ func (cont *GCEIngressController) ListGlobalBackendServices() []*compute.Backend
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
beList := []*compute.BackendService{}
|
beList := []*compute.BackendService{}
|
||||||
l, err := gceCloud.ListGlobalBackendServices()
|
l, err := gceCloud.ListGlobalBackendServices()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, be := range l {
|
for _, be := range l {
|
||||||
if cont.isOwned(be.Name) {
|
if cont.isOwned(be.Name) {
|
||||||
beList = append(beList, be)
|
beList = append(beList, be)
|
||||||
@ -374,7 +373,7 @@ func (cont *GCEIngressController) ListSslCertificates() []*compute.SslCertificat
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
sslList := []*compute.SslCertificate{}
|
sslList := []*compute.SslCertificate{}
|
||||||
l, err := gceCloud.ListSslCertificates()
|
l, err := gceCloud.ListSslCertificates()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, ssl := range l {
|
for _, ssl := range l {
|
||||||
if cont.isOwned(ssl.Name) {
|
if cont.isOwned(ssl.Name) {
|
||||||
sslList = append(sslList, ssl)
|
sslList = append(sslList, ssl)
|
||||||
@ -415,7 +414,7 @@ func (cont *GCEIngressController) ListInstanceGroups() []*compute.InstanceGroup
|
|||||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||||
igList := []*compute.InstanceGroup{}
|
igList := []*compute.InstanceGroup{}
|
||||||
l, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
l, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, ig := range l {
|
for _, ig := range l {
|
||||||
if cont.isOwned(ig.Name) {
|
if cont.isOwned(ig.Name) {
|
||||||
igList = append(igList, ig)
|
igList = append(igList, ig)
|
||||||
@ -565,7 +564,7 @@ func (cont *GCEIngressController) GetFirewallRuleName() string {
|
|||||||
// methods here to be consistent with rest of the code in this repo.
|
// methods here to be consistent with rest of the code in this repo.
|
||||||
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
||||||
fw, err := cont.GetFirewallRuleOrError()
|
fw, err := cont.GetFirewallRuleOrError()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
return fw
|
return fw
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,9 +61,9 @@ func (p *Provider) FrameworkBeforeEach(f *framework.Framework) {
|
|||||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", *kubemarkExternalKubeConfig)
|
externalConfig, err := clientcmd.BuildConfigFromFlags("", *kubemarkExternalKubeConfig)
|
||||||
externalConfig.QPS = f.Options.ClientQPS
|
externalConfig.QPS = f.Options.ClientQPS
|
||||||
externalConfig.Burst = f.Options.ClientBurst
|
externalConfig.Burst = f.Options.ClientBurst
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
f.KubemarkExternalClusterClientSet = externalClient
|
f.KubemarkExternalClusterClientSet = externalClient
|
||||||
p.closeChannel = make(chan struct{})
|
p.closeChannel = make(chan struct{})
|
||||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||||
@ -71,7 +71,7 @@ func (p *Provider) FrameworkBeforeEach(f *framework.Framework) {
|
|||||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||||
go kubemarkNodeInformer.Informer().Run(p.closeChannel)
|
go kubemarkNodeInformer.Informer().Run(p.closeChannel)
|
||||||
p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
externalInformerFactory.Start(p.closeChannel)
|
externalInformerFactory.Start(p.closeChannel)
|
||||||
Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(BeTrue())
|
Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(BeTrue())
|
||||||
go p.controller.Run(p.closeChannel)
|
go p.controller.Run(p.closeChannel)
|
||||||
|
@ -45,7 +45,6 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -349,7 +348,7 @@ func GetNodePublicIps(c clientset.Interface) ([]string, error) {
|
|||||||
|
|
||||||
func PickNodeIP(c clientset.Interface) string {
|
func PickNodeIP(c clientset.Interface) string {
|
||||||
publicIps, err := GetNodePublicIps(c)
|
publicIps, err := GetNodePublicIps(c)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
if len(publicIps) == 0 {
|
if len(publicIps) == 0 {
|
||||||
Failf("got unexpected number (%d) of public IPs", len(publicIps))
|
Failf("got unexpected number (%d) of public IPs", len(publicIps))
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
appsV1beta2 "k8s.io/api/apps/v1beta2"
|
appsV1beta2 "k8s.io/api/apps/v1beta2"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@ -98,18 +96,18 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
|
|||||||
|
|
||||||
Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||||
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
Logf("Parsing service from %v", mkpath("service.yaml"))
|
Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||||
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
|
|
||||||
Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||||
_, err = s.c.CoreV1().Services(ns).Create(svc)
|
_, err = s.c.CoreV1().Services(ns).Create(svc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
|
|
||||||
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||||
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
|
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
return ss
|
return ss
|
||||||
}
|
}
|
||||||
@ -187,7 +185,7 @@ type VerifyStatefulPodFunc func(*v1.Pod)
|
|||||||
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
|
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||||
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
|
ExpectNoError(err, fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
|
||||||
verify(pod)
|
verify(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -829,7 +829,7 @@ func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[s
|
|||||||
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
|
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
|
||||||
By("Deleting namespaces")
|
By("Deleting namespaces")
|
||||||
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
|
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to get namespace list")
|
ExpectNoError(err, "Failed to get namespace list")
|
||||||
var deleted []string
|
var deleted []string
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
OUTER:
|
OUTER:
|
||||||
@ -1835,7 +1835,7 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error {
|
|||||||
Logf("Endpoint %s/%s is not ready yet", ns, name)
|
Logf("Endpoint %s/%s is not ready yet", ns, name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to get endpoints for %s/%s", ns, name)
|
ExpectNoError(err, "Failed to get endpoints for %s/%s", ns, name)
|
||||||
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
|
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
|
||||||
Logf("Endpoint %s/%s is not ready yet", ns, name)
|
Logf("Endpoint %s/%s is not ready yet", ns, name)
|
||||||
continue
|
continue
|
||||||
@ -1867,7 +1867,7 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
|||||||
successes := 0
|
successes := 0
|
||||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
|
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to get list of currentPods in namespace: %s", r.ns)
|
ExpectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
|
||||||
for i, pod := range r.pods.Items {
|
for i, pod := range r.pods.Items {
|
||||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||||
if !isElementOf(pod.UID, currentPods) {
|
if !isElementOf(pod.UID, currentPods) {
|
||||||
@ -2303,7 +2303,7 @@ func (b kubectlBuilder) ExecOrDie() string {
|
|||||||
Logf("stdout: %q", retryStr)
|
Logf("stdout: %q", retryStr)
|
||||||
Logf("err: %v", retryErr)
|
Logf("err: %v", retryErr)
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2498,7 +2498,7 @@ type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error
|
|||||||
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||||
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
||||||
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to list events in namespace %q", namespace)
|
ExpectNoError(err, "failed to list events in namespace %q", namespace)
|
||||||
|
|
||||||
By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
||||||
// Sort events by their first timestamp
|
// Sort events by their first timestamp
|
||||||
@ -3481,7 +3481,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
|
|||||||
tweak(execPod)
|
tweak(execPod)
|
||||||
}
|
}
|
||||||
created, err := client.CoreV1().Pods(ns).Create(execPod)
|
created, err := client.CoreV1().Pods(ns).Create(execPod)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create new exec pod in namespace: %s", ns)
|
ExpectNoError(err, "failed to create new exec pod in namespace: %s", ns)
|
||||||
err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||||
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
|
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3492,7 +3492,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
|
|||||||
}
|
}
|
||||||
return retrievedPod.Status.Phase == v1.PodRunning, nil
|
return retrievedPod.Status.Phase == v1.PodRunning, nil
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
return created.Name
|
return created.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3517,13 +3517,13 @@ func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]s
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace %s", name, ns)
|
ExpectNoError(err, "failed to create pod %s in namespace %s", name, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeletePodOrFail(c clientset.Interface, ns, name string) {
|
func DeletePodOrFail(c clientset.Interface, ns, name string) {
|
||||||
By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
|
By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
|
||||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace %s", name, ns)
|
ExpectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckPodsRunningReady returns whether all pods whose names are listed in
|
// CheckPodsRunningReady returns whether all pods whose names are listed in
|
||||||
|
@ -168,7 +168,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config Volume
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
|
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create endpoints for Gluster server")
|
ExpectNoError(err, "failed to create endpoints for Gluster server")
|
||||||
|
|
||||||
return config, pod, ip
|
return config, pod, ip
|
||||||
}
|
}
|
||||||
@ -468,19 +468,19 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
|||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
|
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
|
ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fsGroup != nil {
|
if fsGroup != nil {
|
||||||
By("Checking fsGroup is correct.")
|
By("Checking fsGroup is correct.")
|
||||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right privileges in the file %v", int(*fsGroup))
|
ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
|
||||||
}
|
}
|
||||||
|
|
||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
By("Checking fsType is correct.")
|
By("Checking fsType is correct.")
|
||||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute)
|
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right fsType %s", fsType)
|
ExpectNoError(err, "failed: getting the right fsType %s", fsType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -538,13 +538,13 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
|||||||
defer func() {
|
defer func() {
|
||||||
podClient.Delete(podName, nil)
|
podClient.Delete(podName, nil)
|
||||||
err := waitForPodNotFoundInNamespace(client, podName, injectPod.Namespace, PodDeleteTimeout)
|
err := waitForPodNotFoundInNamespace(client, podName, injectPod.Namespace, PodDeleteTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
injectPod, err := podClient.Create(injectPod)
|
injectPod, err := podClient.Create(injectPod)
|
||||||
ExpectNoError(err, "Failed to create injector pod: %v", err)
|
ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||||
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
|
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
|
||||||
|
Loading…
Reference in New Issue
Block a user