mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
fix typos
This commit is contained in:
parent
10066243df
commit
e530336b18
@ -2199,7 +2199,7 @@ func testSlowWebhookTimeoutNoError(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createAdmissionWebhookMultiVersionTestCRDWithV1Storage creates a new CRD specifically
|
// createAdmissionWebhookMultiVersionTestCRDWithV1Storage creates a new CRD specifically
|
||||||
// for the admissin webhook calling test.
|
// for the admission webhook calling test.
|
||||||
func createAdmissionWebhookMultiVersionTestCRDWithV1Storage(f *framework.Framework, opts ...crd.Option) (*crd.TestCrd, error) {
|
func createAdmissionWebhookMultiVersionTestCRDWithV1Storage(f *framework.Framework, opts ...crd.Option) (*crd.TestCrd, error) {
|
||||||
group := fmt.Sprintf("%s.example.com", f.BaseName)
|
group := fmt.Sprintf("%s.example.com", f.BaseName)
|
||||||
return crd.CreateMultiVersionTestCRD(f, group, append([]crd.Option{func(crd *apiextensionsv1.CustomResourceDefinition) {
|
return crd.CreateMultiVersionTestCRD(f, group, append([]crd.Option{func(crd *apiextensionsv1.CustomResourceDefinition) {
|
||||||
|
@ -94,7 +94,7 @@ var _ = SIGDescribe("Security Context", func() {
|
|||||||
/*
|
/*
|
||||||
Release: v1.15
|
Release: v1.15
|
||||||
Testname: Security Context, runAsUser=0
|
Testname: Security Context, runAsUser=0
|
||||||
Description: Container is created with runAsUser option by passing uid 0 to run as root priviledged user. Pod MUST be in Succeeded phase.
|
Description: Container is created with runAsUser option by passing uid 0 to run as root privileged user. Pod MUST be in Succeeded phase.
|
||||||
This e2e can not be promoted to Conformance because a Conformant platform may not allow to run containers with 'uid 0' or running privileged operations.
|
This e2e can not be promoted to Conformance because a Conformant platform may not allow to run containers with 'uid 0' or running privileged operations.
|
||||||
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
|
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
|
||||||
*/
|
*/
|
||||||
|
@ -22,7 +22,7 @@ limitations under the License.
|
|||||||
* The test creates a server pod, exporting simple 'index.html' file.
|
* The test creates a server pod, exporting simple 'index.html' file.
|
||||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||||
* and checks that the pod can see the file. It does so by importing the file
|
* and checks that the pod can see the file. It does so by importing the file
|
||||||
* into web server root and loadind the index.html from it.
|
* into web server root and loading the index.html from it.
|
||||||
*
|
*
|
||||||
* These tests work only when privileged containers are allowed, exporting
|
* These tests work only when privileged containers are allowed, exporting
|
||||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||||
|
@ -176,7 +176,7 @@ func (r *VolumeResource) CleanupResource() error {
|
|||||||
ginkgo.By("Deleting pvc")
|
ginkgo.By("Deleting pvc")
|
||||||
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
|
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
|
||||||
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
||||||
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
|
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isn't %v",
|
||||||
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
|
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
|
||||||
}
|
}
|
||||||
if r.Pvc != nil {
|
if r.Pvc != nil {
|
||||||
|
@ -158,12 +158,12 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
|||||||
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
||||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||||
framework.ExpectEqual(isVolumeAttached, true)
|
framework.ExpectEqual(isVolumeAttached, true)
|
||||||
framework.ExpectNoError(verifyDiskAttachedError)
|
framework.ExpectNoError(verifyDiskAttachedError)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
|
ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
|
||||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
|
ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
|
||||||
|
@ -58,7 +58,7 @@ const (
|
|||||||
Test to verify if an invalid fstype specified in storage class fails pod creation.
|
Test to verify if an invalid fstype specified in storage class fails pod creation.
|
||||||
|
|
||||||
Steps
|
Steps
|
||||||
1. Create StorageClass with inavlid.
|
1. Create StorageClass with invalid.
|
||||||
2. Create PVC which uses the StorageClass created in step 1.
|
2. Create PVC which uses the StorageClass created in step 1.
|
||||||
3. Wait for PV to be provisioned.
|
3. Wait for PV to be provisioned.
|
||||||
4. Wait for PVC's status to become Bound.
|
4. Wait for PVC's status to become Bound.
|
||||||
|
@ -45,7 +45,7 @@ func TestWebhookLoopback(t *testing.T) {
|
|||||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
},
|
},
|
||||||
ModifyServerConfig: func(config *controlplane.Config) {
|
ModifyServerConfig: func(config *controlplane.Config) {
|
||||||
// Avoid resolveable kubernetes service
|
// Avoid resolvable kubernetes service
|
||||||
config.ExtraConfig.EndpointReconcilerType = reconcilers.NoneEndpointReconcilerType
|
config.ExtraConfig.EndpointReconcilerType = reconcilers.NoneEndpointReconcilerType
|
||||||
|
|
||||||
// Hook into audit to watch requests
|
// Hook into audit to watch requests
|
||||||
|
@ -211,7 +211,7 @@ func assertStorageVersionEntries(t *testing.T, client kubernetes.Interface,
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if sv.Status.StorageVersions[0].APIServerID != firstID {
|
if sv.Status.StorageVersions[0].APIServerID != firstID {
|
||||||
lastErr = fmt.Errorf("unexpected fisrt storage version entry id, expected %v, got: %v",
|
lastErr = fmt.Errorf("unexpected first storage version entry id, expected %v, got: %v",
|
||||||
firstID, sv.Status.StorageVersions[0].APIServerID)
|
firstID, sv.Status.StorageVersions[0].APIServerID)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
/*
|
/*
|
||||||
This soak tests places a specified number of pods on each node and then
|
This soak tests places a specified number of pods on each node and then
|
||||||
repeatedly sends queries to a service running on these pods via
|
repeatedly sends queries to a service running on these pods via
|
||||||
a serivce
|
a service
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
Loading…
Reference in New Issue
Block a user