various corrections in test/e2e package

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal 2022-09-16 16:58:13 +05:30
parent 0f582f7c3f
commit 9e9fc2be88
43 changed files with 65 additions and 65 deletions

View File

@ -826,7 +826,7 @@
file: test/e2e/apps/job.go file: test/e2e/apps/job.go
- testname: Jobs, active pods, graceful termination - testname: Jobs, active pods, graceful termination
codename: '[sig-apps] Job should delete a job [Conformance]' codename: '[sig-apps] Job should delete a job [Conformance]'
description: Create a job. Ensure the active pods reflect paralellism in the namespace description: Create a job. Ensure the active pods reflect parallelism in the namespace
and delete the job. Job MUST be deleted successfully. and delete the job. Job MUST be deleted successfully.
release: v1.15 release: v1.15
file: test/e2e/apps/job.go file: test/e2e/apps/job.go
@ -2165,7 +2165,7 @@
killed and restarted incrementing restart count to 1. The liveness probe must killed and restarted incrementing restart count to 1. The liveness probe must
fail again after restart once the http handler for /healthz enpoind on the Pod fail again after restart once the http handler for /healthz enpoind on the Pod
returns an http error after 10 seconds from the start. Restart counts MUST increment returns an http error after 10 seconds from the start. Restart counts MUST increment
everytime health check fails, measure upto 5 restart. every time health check fails, measure up to 5 restart.
release: v1.9 release: v1.9
file: test/e2e/common/node/container_probe.go file: test/e2e/common/node/container_probe.go
- testname: Pod readiness probe, with initial delay - testname: Pod readiness probe, with initial delay
@ -2680,9 +2680,9 @@
codename: '[sig-storage] EmptyDir volumes pod should support shared volumes between codename: '[sig-storage] EmptyDir volumes pod should support shared volumes between
containers [Conformance]' containers [Conformance]'
description: A Pod created with an 'emptyDir' Volume, should share volumes between description: A Pod created with an 'emptyDir' Volume, should share volumes between
the containeres in the pod. The two busybox image containers shoud share the volumes the containeres in the pod. The two busybox image containers should share the
mounted to the pod. The main container shoud wait until the sub container drops volumes mounted to the pod. The main container should wait until the sub container
a file, and main container acess the shared data. drops a file, and main container access the shared data.
release: v1.15 release: v1.15
file: test/e2e/common/storage/empty_dir.go file: test/e2e/common/storage/empty_dir.go
- testname: EmptyDir, medium default, volume mode 0644 - testname: EmptyDir, medium default, volume mode 0644

View File

@ -97,7 +97,7 @@ var _ = SIGDescribe("Job", func() {
successes++ successes++
} }
} }
framework.ExpectEqual(successes, completions, "epected %d successful job pods, but got %d", completions, successes) framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes)
}) })
ginkgo.It("should not create pods when created in suspend state", func() { ginkgo.It("should not create pods when created in suspend state", func() {
@ -145,7 +145,7 @@ var _ = SIGDescribe("Job", func() {
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure pods equal to paralellism count is attached to the job") ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
@ -223,7 +223,7 @@ var _ = SIGDescribe("Job", func() {
/* /*
Testcase: Ensure that the pods associated with the job are removed once the job is deleted Testcase: Ensure that the pods associated with the job are removed once the job is deleted
Description: Create a job and ensure the associated pod count is equal to paralellism count. Delete the Description: Create a job and ensure the associated pod count is equal to parallelism count. Delete the
job and ensure if the pods associated with the job have been removed job and ensure if the pods associated with the job have been removed
*/ */
ginkgo.It("should remove pods when job is deleted", func() { ginkgo.It("should remove pods when job is deleted", func() {
@ -232,7 +232,7 @@ var _ = SIGDescribe("Job", func() {
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure pods equal to paralellism count is attached to the job") ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
@ -304,7 +304,7 @@ var _ = SIGDescribe("Job", func() {
/* /*
Release: v1.15 Release: v1.15
Testname: Jobs, active pods, graceful termination Testname: Jobs, active pods, graceful termination
Description: Create a job. Ensure the active pods reflect paralellism in the namespace and delete the job. Job MUST be deleted successfully. Description: Create a job. Ensure the active pods reflect parallelism in the namespace and delete the job. Job MUST be deleted successfully.
*/ */
framework.ConformanceIt("should delete a job", func() { framework.ConformanceIt("should delete a job", func() {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
@ -432,7 +432,7 @@ var _ = SIGDescribe("Job", func() {
job.Spec.Template.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": testNodeName} job.Spec.Template.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": testNodeName}
} }
framework.Logf("Creating job %q with a node hostname selector %q wth cpu request %q", job.Name, testNodeName, cpuRequest) framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest)
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
@ -471,7 +471,7 @@ var _ = SIGDescribe("Job", func() {
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure pods equal to paralellism count is attached to the job") ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)

View File

@ -185,7 +185,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
replicas := int32(1) replicas := int32(1)
// Create a ReplicaSet for a service that serves its hostname. // Create a ReplicaSet for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is // The source for the Docker container kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname // in contrib/for-demos/serve_hostname
framework.Logf("Creating ReplicaSet %s", name) framework.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})

View File

@ -1838,7 +1838,7 @@ func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, ss *appsv1.S
set := getStatefulSet(c, ss.Namespace, ss.Name) set := getStatefulSet(c, ss.Namespace, ss.Name)
setUID := set.GetUID() setUID := set.GetUID()
if setUID == "" { if setUID == "" {
framework.Failf("Statefulset %s mising UID", ss.Name) framework.Failf("Statefulset %s missing UID", ss.Name)
} }
return wait.PollImmediate(e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, func() (bool, error) { return wait.PollImmediate(e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})

View File

@ -501,7 +501,7 @@ type podBatch struct {
// 1. Create replication controllers that eat up all the space that should be // 1. Create replication controllers that eat up all the space that should be
// empty after setup, making sure they end up on different nodes by specifying // empty after setup, making sure they end up on different nodes by specifying
// conflicting host port // conflicting host port
// 2. Create targer RC that will generate the load on the cluster // 2. Create target RC that will generate the load on the cluster
// 3. Remove the rcs created in 1. // 3. Remove the rcs created in 1.
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch, func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch,
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error { podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error {

View File

@ -31,7 +31,7 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
) )
// TODO: Those tests should be splitted by SIG and moved to SIG-owned directories, // TODO: Those tests should be split by SIG and moved to SIG-owned directories,
// //
// however that involves also splitting the actual upgrade jobs too. // however that involves also splitting the actual upgrade jobs too.
// Figure out the eventual solution for it. // Figure out the eventual solution for it.

View File

@ -136,7 +136,7 @@ const (
ContainerStateUnknown ContainerState = "Unknown" ContainerStateUnknown ContainerState = "Unknown"
) )
// GetContainerState returns current state the container represents among its lifecyle // GetContainerState returns current state the container represents among its lifecycle
func GetContainerState(state v1.ContainerState) ContainerState { func GetContainerState(state v1.ContainerState) ContainerState {
if state.Waiting != nil { if state.Waiting != nil {
return ContainerStateWaiting return ContainerStateWaiting

View File

@ -190,7 +190,7 @@ var _ = SIGDescribe("Probing container", func() {
/* /*
Release: v1.9 Release: v1.9
Testname: Pod liveness probe, using http endpoint, multiple restarts (slow) Testname: Pod liveness probe, using http endpoint, multiple restarts (slow)
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart. Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment every time health check fails, measure up to 5 restart.
*/ */
framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() { framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() {
livenessProbe := &v1.Probe{ livenessProbe := &v1.Probe{

View File

@ -551,7 +551,7 @@ var _ = SIGDescribe("ConfigMap", func() {
}) })
// The pod is in pending during volume creation until the configMap objects are available // The pod is in pending during volume creation until the configMap objects are available
// or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional. // or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional.
// Slow (~5 mins) // Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() { ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes" volumeMountPath := "/etc/configmap-volumes"

View File

@ -220,8 +220,8 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
/* /*
Release: v1.15 Release: v1.15
Testname: EmptyDir, Shared volumes between containers Testname: EmptyDir, Shared volumes between containers
Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers shoud share the volumes mounted to the pod. Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers should share the volumes mounted to the pod.
The main container shoud wait until the sub container drops a file, and main container acess the shared data. The main container should wait until the sub container drops a file, and main container access the shared data.
*/ */
framework.ConformanceIt("pod should support shared volumes between containers", func() { framework.ConformanceIt("pod should support shared volumes between containers", func() {
var ( var (

View File

@ -457,7 +457,7 @@ var _ = SIGDescribe("Projected configMap", func() {
}) })
//The pod is in pending during volume creation until the configMap objects are available //The pod is in pending during volume creation until the configMap objects are available
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional. //or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional.
//Slow (~5 mins) //Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() { ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/projected-configmap-volumes" volumeMountPath := "/etc/projected-configmap-volumes"

View File

@ -433,7 +433,7 @@ var _ = SIGDescribe("Secrets", func() {
}) })
// The secret is in pending during volume creation until the secret objects are available // The secret is in pending during volume creation until the secret objects are available
// or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional. // or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
// Slow (~5 mins) // Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() { ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes" volumeMountPath := "/etc/secret-volumes"

View File

@ -162,7 +162,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
// BeforeEach gets a client and makes a namespace. // BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() { func (f *Framework) BeforeEach() {
// DeferCleanup, in constrast to AfterEach, triggers execution in // DeferCleanup, in contrast to AfterEach, triggers execution in
// first-in-last-out order. This ensures that the framework instance // first-in-last-out order. This ensures that the framework instance
// remains valid as long as possible. // remains valid as long as possible.
// //

View File

@ -796,7 +796,7 @@ func (j *TestJig) WaitForIngressToStable() {
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
framework.Failf("error in waiting for ingress to stablize: %v", err) framework.Failf("error in waiting for ingress to stabilize: %v", err)
} }
} }

View File

@ -70,7 +70,7 @@ type TestResult struct {
Output string Output string
// Failure is SpecSummary.Failure.Message with varying parts stripped. // Failure is SpecSummary.Failure.Message with varying parts stripped.
Failure string Failure string
// Stack is a normalized version (just file names, function parametes stripped) of // Stack is a normalized version (just file names, function parameters stripped) of
// Ginkgo's FullStackTrace of a failure. Empty if no failure. // Ginkgo's FullStackTrace of a failure. Empty if no failure.
Stack string Stack string
// Called to normalize the actual output string before comparison if non-nil. // Called to normalize the actual output string before comparison if non-nil.

View File

@ -436,7 +436,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP
// success. If 0, then we return as soon as all endpoints succeed. // success. If 0, then we return as soon as all endpoints succeed.
// - There is no logical change to test results if faillures happen AFTER endpoints have succeeded, // - There is no logical change to test results if faillures happen AFTER endpoints have succeeded,
// hence over-padding minTries will NOT reverse a successful result and is thus not very useful yet // hence over-padding minTries will NOT reverse a successful result and is thus not very useful yet
// (See the TODO about checking probability, which isnt implemented yet). // (See the TODO about checking probability, which isn't implemented yet).
// - maxTries is the maximum number of curl/echo attempts before an error is returned. The // - maxTries is the maximum number of curl/echo attempts before an error is returned. The
// smaller this number is, the less 'slack' there is for declaring success. // smaller this number is, the less 'slack' there is for declaring success.
// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit. // - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit.

View File

@ -24,7 +24,7 @@ import (
) )
// CurrentKubeletPerfMetricsVersion is the current kubelet performance metrics // CurrentKubeletPerfMetricsVersion is the current kubelet performance metrics
// version. This is used by mutiple perf related data structures. We should // version. This is used by multiple perf related data structures. We should
// bump up the version each time we make an incompatible change to the metrics. // bump up the version each time we make an incompatible change to the metrics.
const CurrentKubeletPerfMetricsVersion = "v2" const CurrentKubeletPerfMetricsVersion = "v2"

View File

@ -688,7 +688,7 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function. // handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
// If the error is retryable, sleep the recommended delay and ignore the error. // If the error is retryable, sleep the recommended delay and ignore the error.
// If the erorr is terminal, return it. // If the error is terminal, return it.
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) { func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
taskDescription := fmt.Sprintf(taskFormat, taskArgs...) taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
if retryNotFound && apierrors.IsNotFound(err) { if retryNotFound && apierrors.IsNotFound(err) {

View File

@ -788,7 +788,7 @@ func (cont *IngressController) CreateStaticIP(name string) string {
return ip.Address return ip.Address
} }
// deleteStaticIPs delets all static-ips allocated through calls to // deleteStaticIPs deletes all static-ips allocated through calls to
// CreateStaticIP. // CreateStaticIP.
func (cont *IngressController) deleteStaticIPs() error { func (cont *IngressController) deleteStaticIPs() error {
if cont.staticIPName != "" { if cont.staticIPName != "" {

View File

@ -417,7 +417,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error {
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
if es, ok := obj.(*discoveryv1.EndpointSlice); ok { if es, ok := obj.(*discoveryv1.EndpointSlice); ok {
// TODO: currently we only consider addreses in 1 slice, but services with // TODO: currently we only consider addresses in 1 slice, but services with
// a large number of endpoints (>1000) may have multiple slices. Some slices // a large number of endpoints (>1000) may have multiple slices. Some slices
// with only a few addresses. We should check the addresses in all slices. // with only a few addresses. We should check the addresses in all slices.
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 { if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
@ -427,7 +427,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error {
}, },
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
if es, ok := cur.(*discoveryv1.EndpointSlice); ok { if es, ok := cur.(*discoveryv1.EndpointSlice); ok {
// TODO: currently we only consider addreses in 1 slice, but services with // TODO: currently we only consider addresses in 1 slice, but services with
// a large number of endpoints (>1000) may have multiple slices. Some slices // a large number of endpoints (>1000) may have multiple slices. Some slices
// with only a few addresses. We should check the addresses in all slices. // with only a few addresses. We should check the addresses in all slices.
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 { if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
@ -854,12 +854,12 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v
// If the node's internal address points to localhost, then we are not // If the node's internal address points to localhost, then we are not
// able to test the service reachability via that address // able to test the service reachability via that address
if isInvalidOrLocalhostAddress(internalAddr) { if isInvalidOrLocalhostAddress(internalAddr) {
framework.Logf("skipping testEndpointReachability() for internal adddress %s", internalAddr) framework.Logf("skipping testEndpointReachability() for internal address %s", internalAddr)
continue continue
} }
// Check service reachability on the node internalIP which is same family as clusterIP // Check service reachability on the node internalIP which is same family as clusterIP
if isClusterIPV4 != netutils.IsIPv4String(internalAddr) { if isClusterIPV4 != netutils.IsIPv4String(internalAddr) {
framework.Logf("skipping testEndpointReachability() for internal adddress %s as it does not match clusterIP (%s) family", internalAddr, clusterIP) framework.Logf("skipping testEndpointReachability() for internal address %s as it does not match clusterIP (%s) family", internalAddr, clusterIP)
continue continue
} }
@ -872,7 +872,7 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v
externalAddrs := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) externalAddrs := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
for _, externalAddr := range externalAddrs { for _, externalAddr := range externalAddrs {
if isClusterIPV4 != netutils.IsIPv4String(externalAddr) { if isClusterIPV4 != netutils.IsIPv4String(externalAddr) {
framework.Logf("skipping testEndpointReachability() for external adddress %s as it does not match clusterIP (%s) family", externalAddr, clusterIP) framework.Logf("skipping testEndpointReachability() for external address %s as it does not match clusterIP (%s) family", externalAddr, clusterIP)
continue continue
} }
err := testEndpointReachability(externalAddr, sp.NodePort, sp.Protocol, pod) err := testEndpointReachability(externalAddr, sp.NodePort, sp.Protocol, pod)

View File

@ -245,7 +245,7 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin
return nil return nil
} }
// udpate updates a statefulset, and it is only used within rest.go // update updates a statefulset, and it is only used within rest.go
func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet { func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
ss, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})

View File

@ -1367,7 +1367,7 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
// WatchEventSequenceVerifier ... // WatchEventSequenceVerifier ...
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure // manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
// //
// testContext cancelation signal across API boundries, e.g: context.TODO() // testContext cancellation signal across API boundaries, e.g: context.TODO()
// dc sets up a client to the API // dc sets up a client to the API
// resourceType specify the type of resource // resourceType specify the type of resource
// namespace select a namespace // namespace select a namespace

View File

@ -199,7 +199,7 @@ var _ = common.SIGDescribe("Firewall rule", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags") ginkgo.By("Accessing service through the external ip and examine got no response from the node without tags")
err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15) err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@ -244,12 +244,12 @@ func assertNotReachableHTTPTimeout(ip, path string, port int, timeout time.Durat
} }
} }
// testHitNodesFromOutside checkes HTTP connectivity from outside. // testHitNodesFromOutside checks HTTP connectivity from outside.
func testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error { func testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {
return testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1) return testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)
} }
// testHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count. // testHitNodesFromOutsideWithCount checks HTTP connectivity from outside with count.
func testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String, func testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
countToSucceed int) error { countToSucceed int) error {
framework.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed) framework.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)

View File

@ -512,7 +512,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
// See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
_, err := jig.Run(nil) _, err := jig.Run(nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons. // Make sure acceptPod is running. There are certain chances that pod might be terminated due to unexpected reasons.
acceptPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), acceptPod.Name, metav1.GetOptions{}) acceptPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), acceptPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name)
framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning)
@ -542,7 +542,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP) checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP) checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
// Make sure dropPod is running. There are certain chances that the pod might be teminated due to unexpected reasons. // Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons.
dropPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), dropPod.Name, metav1.GetOptions{}) dropPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), dropPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name) framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name)
framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning)

View File

@ -152,7 +152,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error {
if f.ScaleTestDeploy != nil { if f.ScaleTestDeploy != nil {
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil { if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
} }
} }

View File

@ -3754,7 +3754,7 @@ var _ = common.SIGDescribe("Services", func() {
}) })
// execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of // execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
// affinity test for non-load-balancer services. Session afinity will be // affinity test for non-load-balancer services. Session affinity will be
// enabled when the service is created and a short timeout will be configured so // enabled when the service is created and a short timeout will be configured so
// session affinity must change after the timeout expirese. // session affinity must change after the timeout expirese.
func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
@ -3853,7 +3853,7 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf
} }
// execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of // execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of
// affinity test for non-load-balancer services. Session afinity will be // affinity test for non-load-balancer services. Session affinity will be
// enabled when the service is created. If parameter isTransitionTest is true, // enabled when the service is created. If parameter isTransitionTest is true,
// session affinity will be switched off/on and test if the service converges // session affinity will be switched off/on and test if the service converges
// to a stable affinity state. // to a stable affinity state.

View File

@ -404,7 +404,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
if podName == podGroup+"1" { if podName == podGroup+"1" {
framework.Logf("Noticed Pod %q gets evicted.", podName) framework.Logf("Noticed Pod %q gets evicted.", podName)
} else if podName == podGroup+"2" { } else if podName == podGroup+"2" {
framework.Failf("Unexepected Pod %q gets evicted.", podName) framework.Failf("Unexpected Pod %q gets evicted.", podName)
return return
} }
} }

View File

@ -394,7 +394,7 @@ func (c *MockCSICalls) Get() []MockCSICall {
return c.calls[:] return c.calls[:]
} }
// Add appens one new call at the end. // Add appends one new call at the end.
func (c *MockCSICalls) Add(call MockCSICall) { func (c *MockCSICalls) Add(call MockCSICall) {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock() defer c.mutex.Unlock()

View File

@ -49,7 +49,7 @@ func shredFile(filePath string) {
framework.Logf("File %v successfully shredded", filePath) framework.Logf("File %v successfully shredded", filePath)
return return
} }
// Shred failed Try to remove the file for good meausure // Shred failed Try to remove the file for good measure
err = os.Remove(filePath) err = os.Remove(filePath)
framework.ExpectNoError(err, "Failed to remove service account file %s", filePath) framework.ExpectNoError(err, "Failed to remove service account file %s", filePath)

View File

@ -141,8 +141,8 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD
changedRootDirFileOwnership int // Change the ownership of the file in the root directory (/mnt/volume1/file1), as part of the initial pod changedRootDirFileOwnership int // Change the ownership of the file in the root directory (/mnt/volume1/file1), as part of the initial pod
changedSubDirFileOwnership int // Change the ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the initial pod changedSubDirFileOwnership int // Change the ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the initial pod
secondPodFsGroup int // FsGroup of the second pod secondPodFsGroup int // FsGroup of the second pod
finalExpectedRootDirFileOwnership int // Final expcted ownership of the file in the root directory (/mnt/volume1/file1), as part of the second pod finalExpectedRootDirFileOwnership int // Final expected ownership of the file in the root directory (/mnt/volume1/file1), as part of the second pod
finalExpectedSubDirFileOwnership int // Final expcted ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the second pod finalExpectedSubDirFileOwnership int // Final expected ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the second pod
// Whether the test can run for drivers that support volumeMountGroup capability. // Whether the test can run for drivers that support volumeMountGroup capability.
// For CSI drivers that support volumeMountGroup: // For CSI drivers that support volumeMountGroup:
// * OnRootMismatch policy is not supported. // * OnRootMismatch policy is not supported.

View File

@ -670,7 +670,7 @@ func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.
return pv, err return pv, err
} }
// checkProvisioning verifies that the claim is bound and has the correct properities // checkProvisioning verifies that the claim is bound and has the correct properties
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -225,7 +225,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err) framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err)
} }
// TODO: write data per pod and validate it everytime // TODO: write data per pod and validate it every time
err = e2epod.DeletePodWithWait(f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
if err != nil { if err != nil {

View File

@ -283,7 +283,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision) err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision)
// Events are unreliable, don't depend on the event. It's used only to speed up the test. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil { if err != nil {
framework.Logf("Warning: did not get event about provisioing failed") framework.Logf("Warning: did not get event about provisioning failed")
} }
// Check the pvc is still pending // Check the pvc is still pending

View File

@ -316,7 +316,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
case *rbacv1.RoleRef: case *rbacv1.RoleRef:
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles // TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
// which contains all role names that are defined cluster-wide before the test starts? // which contains all role names that are defined cluster-wide before the test starts?
// All those names are excempt from renaming. That list could be populated by querying // All those names are exempt from renaming. That list could be populated by querying
// and get extended by tests. // and get extended by tests.
if item.Name != "e2e-test-privileged-psp" { if item.Name != "e2e-test-privileged-psp" {
PatchName(f, &item.Name) PatchName(f, &item.Name)

View File

@ -306,7 +306,7 @@ func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]stri
return pod return pod
} }
// func to get pod spec with given volume paths, node selector lables and container commands // func to get pod spec with given volume paths, node selector labels and container commands
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod { func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
var volumeMounts []v1.VolumeMount var volumeMounts []v1.VolumeMount
var volumes []v1.Volume var volumes []v1.Volume
@ -613,7 +613,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
return vmxPath return vmxPath
} }
// verify ready node count. Try upto 3 minutes. Return true if count is expected count // verify ready node count. Try up to 3 minutes. Return true if count is expected count
func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
numNodes := 0 numNodes := 0
for i := 0; i < 36; i++ { for i := 0; i < 36; i++ {

View File

@ -159,7 +159,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
} }
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
ginkgo.By("Verifing disk format") ginkgo.By("Verifying disk format")
eagerlyScrub := false eagerlyScrub := false
thinProvisioned := false thinProvisioned := false
diskFound := false diskFound := false

View File

@ -184,7 +184,7 @@ roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
--- ---
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding() # privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:

View File

@ -1,4 +1,4 @@
The files in this directory are exact copys of "kubernetes-latest" in The files in this directory are exact copies of "kubernetes-latest" in
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/ https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/
Do not edit manually. Run ./update-hostpath.sh to refresh the content. Do not edit manually. Run ./update-hostpath.sh to refresh the content.

View File

@ -1,4 +1,4 @@
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding() # privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:

View File

@ -46,7 +46,7 @@ roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
--- ---
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding() # privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:

View File

@ -47,14 +47,14 @@ trap "rm -rf csi-driver-host-path" EXIT
# Main YAML files. # Main YAML files.
mkdir hostpath mkdir hostpath
cat >hostpath/README.md <<EOF cat >hostpath/README.md <<EOF
The files in this directory are exact copys of "kubernetes-latest" in The files in this directory are exact copies of "kubernetes-latest" in
https://github.com/kubernetes-csi/csi-driver-host-path/tree/$hostpath_version/deploy/ https://github.com/kubernetes-csi/csi-driver-host-path/tree/$hostpath_version/deploy/
Do not edit manually. Run $script to refresh the content. Do not edit manually. Run $script to refresh the content.
EOF EOF
cp -r csi-driver-host-path/deploy/kubernetes-latest/hostpath hostpath/ cp -r csi-driver-host-path/deploy/kubernetes-latest/hostpath hostpath/
cat >hostpath/hostpath/e2e-test-rbac.yaml <<EOF cat >hostpath/hostpath/e2e-test-rbac.yaml <<EOF
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding() # privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:

View File

@ -127,7 +127,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
}) })
framework.Logf("Result of curling the kubernetes service... (Failure ok, only testing for the sake of DNS resolution) %v ... error = %v", stdout, err) framework.Logf("Result of curling the kubernetes service... (Failure ok, only testing for the sake of DNS resolution) %v ... error = %v", stdout, err)
// curl returns an error if the host isnt resolved, otherwise, it will return a passing result. // curl returns an error if the host isn't resolved, otherwise, it will return a passing result.
if err != nil { if err != nil {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@ -151,7 +151,7 @@ Conformance tests.
## Testing images ## Testing images
Once the image has been built and pushed to an accesible registry, you can run the tests using that image Once the image has been built and pushed to an accessible registry, you can run the tests using that image
by having the environment variable `KUBE_TEST_REPO_LIST` set before running the tests that are using the by having the environment variable `KUBE_TEST_REPO_LIST` set before running the tests that are using the
image: image: