mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
various corrections in test/e2e package
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
parent
0f582f7c3f
commit
9e9fc2be88
8
test/conformance/testdata/conformance.yaml
vendored
8
test/conformance/testdata/conformance.yaml
vendored
@ -826,7 +826,7 @@
|
||||
file: test/e2e/apps/job.go
|
||||
- testname: Jobs, active pods, graceful termination
|
||||
codename: '[sig-apps] Job should delete a job [Conformance]'
|
||||
description: Create a job. Ensure the active pods reflect paralellism in the namespace
|
||||
description: Create a job. Ensure the active pods reflect parallelism in the namespace
|
||||
and delete the job. Job MUST be deleted successfully.
|
||||
release: v1.15
|
||||
file: test/e2e/apps/job.go
|
||||
@ -2680,9 +2680,9 @@
|
||||
codename: '[sig-storage] EmptyDir volumes pod should support shared volumes between
|
||||
containers [Conformance]'
|
||||
description: A Pod created with an 'emptyDir' Volume, should share volumes between
|
||||
the containeres in the pod. The two busybox image containers shoud share the volumes
|
||||
mounted to the pod. The main container shoud wait until the sub container drops
|
||||
a file, and main container acess the shared data.
|
||||
the containeres in the pod. The two busybox image containers should share the
|
||||
volumes mounted to the pod. The main container should wait until the sub container
|
||||
drops a file, and main container access the shared data.
|
||||
release: v1.15
|
||||
file: test/e2e/common/storage/empty_dir.go
|
||||
- testname: EmptyDir, medium default, volume mode 0644
|
||||
|
@ -97,7 +97,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
successes++
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(successes, completions, "epected %d successful job pods, but got %d", completions, successes)
|
||||
framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes)
|
||||
})
|
||||
|
||||
ginkgo.It("should not create pods when created in suspend state", func() {
|
||||
@ -145,7 +145,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
|
||||
ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
|
||||
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
@ -223,7 +223,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
/*
|
||||
Testcase: Ensure that the pods associated with the job are removed once the job is deleted
|
||||
Description: Create a job and ensure the associated pod count is equal to paralellism count. Delete the
|
||||
Description: Create a job and ensure the associated pod count is equal to parallelism count. Delete the
|
||||
job and ensure if the pods associated with the job have been removed
|
||||
*/
|
||||
ginkgo.It("should remove pods when job is deleted", func() {
|
||||
@ -232,7 +232,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
|
||||
ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
|
||||
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
@ -304,7 +304,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
/*
|
||||
Release: v1.15
|
||||
Testname: Jobs, active pods, graceful termination
|
||||
Description: Create a job. Ensure the active pods reflect paralellism in the namespace and delete the job. Job MUST be deleted successfully.
|
||||
Description: Create a job. Ensure the active pods reflect parallelism in the namespace and delete the job. Job MUST be deleted successfully.
|
||||
*/
|
||||
framework.ConformanceIt("should delete a job", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
@ -432,7 +432,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
job.Spec.Template.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": testNodeName}
|
||||
}
|
||||
|
||||
framework.Logf("Creating job %q with a node hostname selector %q wth cpu request %q", job.Name, testNodeName, cpuRequest)
|
||||
framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest)
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
@ -471,7 +471,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
|
||||
ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
|
||||
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
|
@ -185,7 +185,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
replicas := int32(1)
|
||||
|
||||
// Create a ReplicaSet for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// The source for the Docker container kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
|
||||
|
@ -1838,7 +1838,7 @@ func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, ss *appsv1.S
|
||||
set := getStatefulSet(c, ss.Namespace, ss.Name)
|
||||
setUID := set.GetUID()
|
||||
if setUID == "" {
|
||||
framework.Failf("Statefulset %s mising UID", ss.Name)
|
||||
framework.Failf("Statefulset %s missing UID", ss.Name)
|
||||
}
|
||||
return wait.PollImmediate(e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, func() (bool, error) {
|
||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})
|
||||
|
@ -501,7 +501,7 @@ type podBatch struct {
|
||||
// 1. Create replication controllers that eat up all the space that should be
|
||||
// empty after setup, making sure they end up on different nodes by specifying
|
||||
// conflicting host port
|
||||
// 2. Create targer RC that will generate the load on the cluster
|
||||
// 2. Create target RC that will generate the load on the cluster
|
||||
// 3. Remove the rcs created in 1.
|
||||
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch,
|
||||
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error {
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
// TODO: Those tests should be splitted by SIG and moved to SIG-owned directories,
|
||||
// TODO: Those tests should be split by SIG and moved to SIG-owned directories,
|
||||
//
|
||||
// however that involves also splitting the actual upgrade jobs too.
|
||||
// Figure out the eventual solution for it.
|
||||
|
@ -136,7 +136,7 @@ const (
|
||||
ContainerStateUnknown ContainerState = "Unknown"
|
||||
)
|
||||
|
||||
// GetContainerState returns current state the container represents among its lifecyle
|
||||
// GetContainerState returns current state the container represents among its lifecycle
|
||||
func GetContainerState(state v1.ContainerState) ContainerState {
|
||||
if state.Waiting != nil {
|
||||
return ContainerStateWaiting
|
||||
|
@ -551,7 +551,7 @@ var _ = SIGDescribe("ConfigMap", func() {
|
||||
})
|
||||
|
||||
// The pod is in pending during volume creation until the configMap objects are available
|
||||
// or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
// or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional.
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
|
@ -220,8 +220,8 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
|
||||
/*
|
||||
Release: v1.15
|
||||
Testname: EmptyDir, Shared volumes between containers
|
||||
Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers shoud share the volumes mounted to the pod.
|
||||
The main container shoud wait until the sub container drops a file, and main container acess the shared data.
|
||||
Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers should share the volumes mounted to the pod.
|
||||
The main container should wait until the sub container drops a file, and main container access the shared data.
|
||||
*/
|
||||
framework.ConformanceIt("pod should support shared volumes between containers", func() {
|
||||
var (
|
||||
|
@ -457,7 +457,7 @@ var _ = SIGDescribe("Projected configMap", func() {
|
||||
})
|
||||
|
||||
//The pod is in pending during volume creation until the configMap objects are available
|
||||
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional.
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/projected-configmap-volumes"
|
||||
|
@ -433,7 +433,7 @@ var _ = SIGDescribe("Secrets", func() {
|
||||
})
|
||||
|
||||
// The secret is in pending during volume creation until the secret objects are available
|
||||
// or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
// or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
|
@ -162,7 +162,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
|
||||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
// DeferCleanup, in constrast to AfterEach, triggers execution in
|
||||
// DeferCleanup, in contrast to AfterEach, triggers execution in
|
||||
// first-in-last-out order. This ensures that the framework instance
|
||||
// remains valid as long as possible.
|
||||
//
|
||||
|
@ -796,7 +796,7 @@ func (j *TestJig) WaitForIngressToStable() {
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
framework.Failf("error in waiting for ingress to stablize: %v", err)
|
||||
framework.Failf("error in waiting for ingress to stabilize: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ type TestResult struct {
|
||||
Output string
|
||||
// Failure is SpecSummary.Failure.Message with varying parts stripped.
|
||||
Failure string
|
||||
// Stack is a normalized version (just file names, function parametes stripped) of
|
||||
// Stack is a normalized version (just file names, function parameters stripped) of
|
||||
// Ginkgo's FullStackTrace of a failure. Empty if no failure.
|
||||
Stack string
|
||||
// Called to normalize the actual output string before comparison if non-nil.
|
||||
|
@ -436,7 +436,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP
|
||||
// success. If 0, then we return as soon as all endpoints succeed.
|
||||
// - There is no logical change to test results if faillures happen AFTER endpoints have succeeded,
|
||||
// hence over-padding minTries will NOT reverse a successful result and is thus not very useful yet
|
||||
// (See the TODO about checking probability, which isnt implemented yet).
|
||||
// (See the TODO about checking probability, which isn't implemented yet).
|
||||
// - maxTries is the maximum number of curl/echo attempts before an error is returned. The
|
||||
// smaller this number is, the less 'slack' there is for declaring success.
|
||||
// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit.
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
// CurrentKubeletPerfMetricsVersion is the current kubelet performance metrics
|
||||
// version. This is used by mutiple perf related data structures. We should
|
||||
// version. This is used by multiple perf related data structures. We should
|
||||
// bump up the version each time we make an incompatible change to the metrics.
|
||||
const CurrentKubeletPerfMetricsVersion = "v2"
|
||||
|
||||
|
@ -688,7 +688,7 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe
|
||||
|
||||
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
|
||||
// If the error is retryable, sleep the recommended delay and ignore the error.
|
||||
// If the erorr is terminal, return it.
|
||||
// If the error is terminal, return it.
|
||||
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
|
||||
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
|
||||
if retryNotFound && apierrors.IsNotFound(err) {
|
||||
|
@ -788,7 +788,7 @@ func (cont *IngressController) CreateStaticIP(name string) string {
|
||||
return ip.Address
|
||||
}
|
||||
|
||||
// deleteStaticIPs delets all static-ips allocated through calls to
|
||||
// deleteStaticIPs deletes all static-ips allocated through calls to
|
||||
// CreateStaticIP.
|
||||
func (cont *IngressController) deleteStaticIPs() error {
|
||||
if cont.staticIPName != "" {
|
||||
|
@ -417,7 +417,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error {
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if es, ok := obj.(*discoveryv1.EndpointSlice); ok {
|
||||
// TODO: currently we only consider addreses in 1 slice, but services with
|
||||
// TODO: currently we only consider addresses in 1 slice, but services with
|
||||
// a large number of endpoints (>1000) may have multiple slices. Some slices
|
||||
// with only a few addresses. We should check the addresses in all slices.
|
||||
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
|
||||
@ -427,7 +427,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error {
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if es, ok := cur.(*discoveryv1.EndpointSlice); ok {
|
||||
// TODO: currently we only consider addreses in 1 slice, but services with
|
||||
// TODO: currently we only consider addresses in 1 slice, but services with
|
||||
// a large number of endpoints (>1000) may have multiple slices. Some slices
|
||||
// with only a few addresses. We should check the addresses in all slices.
|
||||
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
|
||||
@ -854,12 +854,12 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v
|
||||
// If the node's internal address points to localhost, then we are not
|
||||
// able to test the service reachability via that address
|
||||
if isInvalidOrLocalhostAddress(internalAddr) {
|
||||
framework.Logf("skipping testEndpointReachability() for internal adddress %s", internalAddr)
|
||||
framework.Logf("skipping testEndpointReachability() for internal address %s", internalAddr)
|
||||
continue
|
||||
}
|
||||
// Check service reachability on the node internalIP which is same family as clusterIP
|
||||
if isClusterIPV4 != netutils.IsIPv4String(internalAddr) {
|
||||
framework.Logf("skipping testEndpointReachability() for internal adddress %s as it does not match clusterIP (%s) family", internalAddr, clusterIP)
|
||||
framework.Logf("skipping testEndpointReachability() for internal address %s as it does not match clusterIP (%s) family", internalAddr, clusterIP)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -872,7 +872,7 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v
|
||||
externalAddrs := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
|
||||
for _, externalAddr := range externalAddrs {
|
||||
if isClusterIPV4 != netutils.IsIPv4String(externalAddr) {
|
||||
framework.Logf("skipping testEndpointReachability() for external adddress %s as it does not match clusterIP (%s) family", externalAddr, clusterIP)
|
||||
framework.Logf("skipping testEndpointReachability() for external address %s as it does not match clusterIP (%s) family", externalAddr, clusterIP)
|
||||
continue
|
||||
}
|
||||
err := testEndpointReachability(externalAddr, sp.NodePort, sp.Protocol, pod)
|
||||
|
@ -245,7 +245,7 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin
|
||||
return nil
|
||||
}
|
||||
|
||||
// udpate updates a statefulset, and it is only used within rest.go
|
||||
// update updates a statefulset, and it is only used within rest.go
|
||||
func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet {
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
|
@ -1367,7 +1367,7 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
// WatchEventSequenceVerifier ...
|
||||
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
|
||||
//
|
||||
// testContext cancelation signal across API boundries, e.g: context.TODO()
|
||||
// testContext cancellation signal across API boundaries, e.g: context.TODO()
|
||||
// dc sets up a client to the API
|
||||
// resourceType specify the type of resource
|
||||
// namespace select a namespace
|
||||
|
@ -199,7 +199,7 @@ var _ = common.SIGDescribe("Firewall rule", func() {
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
|
||||
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
|
||||
ginkgo.By("Accessing service through the external ip and examine got no response from the node without tags")
|
||||
err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
@ -244,12 +244,12 @@ func assertNotReachableHTTPTimeout(ip, path string, port int, timeout time.Durat
|
||||
}
|
||||
}
|
||||
|
||||
// testHitNodesFromOutside checkes HTTP connectivity from outside.
|
||||
// testHitNodesFromOutside checks HTTP connectivity from outside.
|
||||
func testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {
|
||||
return testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)
|
||||
}
|
||||
|
||||
// testHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.
|
||||
// testHitNodesFromOutsideWithCount checks HTTP connectivity from outside with count.
|
||||
func testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
|
||||
countToSucceed int) error {
|
||||
framework.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
|
||||
|
@ -512,7 +512,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
|
||||
// See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
|
||||
_, err := jig.Run(nil)
|
||||
framework.ExpectNoError(err)
|
||||
// Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons.
|
||||
// Make sure acceptPod is running. There are certain chances that pod might be terminated due to unexpected reasons.
|
||||
acceptPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), acceptPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name)
|
||||
framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning)
|
||||
@ -542,7 +542,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
|
||||
checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
|
||||
checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
|
||||
|
||||
// Make sure dropPod is running. There are certain chances that the pod might be teminated due to unexpected reasons.
|
||||
// Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons.
|
||||
dropPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), dropPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name)
|
||||
framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning)
|
||||
|
@ -152,7 +152,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error {
|
||||
if f.ScaleTestDeploy != nil {
|
||||
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
|
||||
if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3754,7 +3754,7 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
})
|
||||
|
||||
// execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
|
||||
// affinity test for non-load-balancer services. Session afinity will be
|
||||
// affinity test for non-load-balancer services. Session affinity will be
|
||||
// enabled when the service is created and a short timeout will be configured so
|
||||
// session affinity must change after the timeout expirese.
|
||||
func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
|
||||
@ -3853,7 +3853,7 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf
|
||||
}
|
||||
|
||||
// execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of
|
||||
// affinity test for non-load-balancer services. Session afinity will be
|
||||
// affinity test for non-load-balancer services. Session affinity will be
|
||||
// enabled when the service is created. If parameter isTransitionTest is true,
|
||||
// session affinity will be switched off/on and test if the service converges
|
||||
// to a stable affinity state.
|
||||
|
@ -404,7 +404,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||
if podName == podGroup+"1" {
|
||||
framework.Logf("Noticed Pod %q gets evicted.", podName)
|
||||
} else if podName == podGroup+"2" {
|
||||
framework.Failf("Unexepected Pod %q gets evicted.", podName)
|
||||
framework.Failf("Unexpected Pod %q gets evicted.", podName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ func (c *MockCSICalls) Get() []MockCSICall {
|
||||
return c.calls[:]
|
||||
}
|
||||
|
||||
// Add appens one new call at the end.
|
||||
// Add appends one new call at the end.
|
||||
func (c *MockCSICalls) Add(call MockCSICall) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
@ -49,7 +49,7 @@ func shredFile(filePath string) {
|
||||
framework.Logf("File %v successfully shredded", filePath)
|
||||
return
|
||||
}
|
||||
// Shred failed Try to remove the file for good meausure
|
||||
// Shred failed Try to remove the file for good measure
|
||||
err = os.Remove(filePath)
|
||||
framework.ExpectNoError(err, "Failed to remove service account file %s", filePath)
|
||||
|
||||
|
@ -141,8 +141,8 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD
|
||||
changedRootDirFileOwnership int // Change the ownership of the file in the root directory (/mnt/volume1/file1), as part of the initial pod
|
||||
changedSubDirFileOwnership int // Change the ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the initial pod
|
||||
secondPodFsGroup int // FsGroup of the second pod
|
||||
finalExpectedRootDirFileOwnership int // Final expcted ownership of the file in the root directory (/mnt/volume1/file1), as part of the second pod
|
||||
finalExpectedSubDirFileOwnership int // Final expcted ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the second pod
|
||||
finalExpectedRootDirFileOwnership int // Final expected ownership of the file in the root directory (/mnt/volume1/file1), as part of the second pod
|
||||
finalExpectedSubDirFileOwnership int // Final expected ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the second pod
|
||||
// Whether the test can run for drivers that support volumeMountGroup capability.
|
||||
// For CSI drivers that support volumeMountGroup:
|
||||
// * OnRootMismatch policy is not supported.
|
||||
|
@ -670,7 +670,7 @@ func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.
|
||||
return pv, err
|
||||
}
|
||||
|
||||
// checkProvisioning verifies that the claim is bound and has the correct properities
|
||||
// checkProvisioning verifies that the claim is bound and has the correct properties
|
||||
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
|
||||
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -283,7 +283,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision)
|
||||
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
|
||||
if err != nil {
|
||||
framework.Logf("Warning: did not get event about provisioing failed")
|
||||
framework.Logf("Warning: did not get event about provisioning failed")
|
||||
}
|
||||
|
||||
// Check the pvc is still pending
|
||||
|
@ -316,7 +316,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
case *rbacv1.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
// All those names are exempt from renaming. That list could be populated by querying
|
||||
// and get extended by tests.
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
PatchName(f, &item.Name)
|
||||
|
@ -306,7 +306,7 @@ func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]stri
|
||||
return pod
|
||||
}
|
||||
|
||||
// func to get pod spec with given volume paths, node selector lables and container commands
|
||||
// func to get pod spec with given volume paths, node selector labels and container commands
|
||||
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
|
||||
var volumeMounts []v1.VolumeMount
|
||||
var volumes []v1.Volume
|
||||
|
@ -159,7 +159,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
||||
}
|
||||
|
||||
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
|
||||
ginkgo.By("Verifing disk format")
|
||||
ginkgo.By("Verifying disk format")
|
||||
eagerlyScrub := false
|
||||
thinProvisioned := false
|
||||
diskFound := false
|
||||
|
@ -184,7 +184,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -1,4 +1,4 @@
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -46,7 +46,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -47,14 +47,14 @@ trap "rm -rf csi-driver-host-path" EXIT
|
||||
# Main YAML files.
|
||||
mkdir hostpath
|
||||
cat >hostpath/README.md <<EOF
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/$hostpath_version/deploy/
|
||||
|
||||
Do not edit manually. Run $script to refresh the content.
|
||||
EOF
|
||||
cp -r csi-driver-host-path/deploy/kubernetes-latest/hostpath hostpath/
|
||||
cat >hostpath/hostpath/e2e-test-rbac.yaml <<EOF
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -127,7 +127,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
|
||||
})
|
||||
framework.Logf("Result of curling the kubernetes service... (Failure ok, only testing for the sake of DNS resolution) %v ... error = %v", stdout, err)
|
||||
|
||||
// curl returns an error if the host isnt resolved, otherwise, it will return a passing result.
|
||||
// curl returns an error if the host isn't resolved, otherwise, it will return a passing result.
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ Conformance tests.
|
||||
|
||||
## Testing images
|
||||
|
||||
Once the image has been built and pushed to an accesible registry, you can run the tests using that image
|
||||
Once the image has been built and pushed to an accessible registry, you can run the tests using that image
|
||||
by having the environment variable `KUBE_TEST_REPO_LIST` set before running the tests that are using the
|
||||
image:
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user