Merge pull request #85469 from hwdef/fix-staticcheck12

test/e2e/apimachinery: fix staticcheck warning
This commit is contained in:
Kubernetes Prow Robot 2019-11-26 12:17:34 -08:00 committed by GitHub
commit a82f7cdd5d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 36 additions and 83 deletions

View File

@ -65,7 +65,6 @@ pkg/volume/util/fsquota/common
pkg/volume/util/operationexecutor
pkg/volume/util/subpath
pkg/volume/vsphere_volume
test/e2e/apimachinery
test/e2e/apps
test/e2e/auth
test/e2e/autoscaling

View File

@ -32,7 +32,6 @@ import (
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
clientset "k8s.io/client-go/kubernetes"
@ -49,8 +48,6 @@ import (
"github.com/onsi/ginkgo"
)
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.10.0")
const (
aggregatorServicePort = 7443
)
@ -405,7 +402,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}
pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
framework.ExpectNoError(result.Error(), "getting pods for flunders service")
framework.ExpectNoError(err, "getting pods for flunders service")
// kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
@ -461,7 +458,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
unstruct, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
_, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders

View File

@ -326,7 +326,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
return false, fmt.Errorf("failed to list pods: %v", err)
}
// We intentionally don't wait the number of pods to reach
// rc.Spec.Replicas. We want to see if the garbage collector and the
@ -384,7 +384,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
@ -411,7 +411,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
rcs, err := rcClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rcs: %v", err)
return false, fmt.Errorf("failed to list rcs: %v", err)
}
if len(rcs.Items) != 0 {
return false, nil
@ -450,7 +450,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
@ -499,7 +499,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
rsList, err := rsClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
return false, fmt.Errorf("failed to list rs: %v", err)
}
return len(rsList.Items) > 0, nil
@ -558,7 +558,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
rsList, err := rsClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
return false, fmt.Errorf("failed to list rs: %v", err)
}
return len(rsList.Items) > 0, nil
@ -631,7 +631,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
@ -726,7 +726,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc1, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %v", err)
}
if rc1.Status.Replicas == *rc1.Spec.Replicas {
return true, nil
@ -854,7 +854,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) {
pods, err2 = podClient.List(metav1.ListOptions{})
if err2 != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
return false, fmt.Errorf("failed to list pods: %v", err)
}
if len(pods.Items) == 0 {
return true, nil
@ -1057,7 +1057,7 @@ var _ = SIGDescribe("Garbage collector", func() {
return false, nil
}
if err != nil && !errors.IsNotFound(err) {
return false, fmt.Errorf("Failed to get owner: %v", err)
return false, fmt.Errorf("failed to get owner: %v", err)
}
return true, nil
}); err != nil {
@ -1086,7 +1086,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
return false, fmt.Errorf("failed to list jobs: %v", err)
}
return len(jobs.Items) > 0, nil
})

View File

@ -67,7 +67,7 @@ func testingPod(name, value string) v1.Pod {
func observeCreation(w watch.Interface) {
select {
case event, _ := <-w.ResultChan():
case event := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe the creation: %v", event)
}
@ -82,7 +82,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
timeout := false
for !updated && !timeout {
select {
case event, _ := <-w.ResultChan():
case event := <-w.ResultChan():
if event.Type == watch.Modified {
if expectedUpdate(event.Object) {
updated = true
@ -95,7 +95,6 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
if !updated {
framework.Failf("Failed to observe pod update")
}
return
}
var _ = SIGDescribe("Generated clientset", func() {

View File

@ -64,7 +64,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
@ -243,7 +243,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests[v1.ResourceCPU] = resource.MustParse("600m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectError(err)
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
@ -255,7 +255,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectError(err)
ginkgo.By("Ensuring a pod cannot update its resource requirements")
@ -679,7 +679,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
@ -718,7 +718,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
@ -995,7 +995,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
@ -1034,7 +1034,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
@ -1144,7 +1144,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating 2nd pod with priority class should fail")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
framework.ExpectError(err)
ginkgo.By("Deleting first pod")

View File

@ -59,20 +59,19 @@ const (
serviceName = "e2e-test-webhook"
roleBindingName = "webhook-auth-reader"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
toBeAttachedPodName = "to-be-attached-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
nonDeletableConfigmapName = "nondeletable-configmap"
allowedConfigMapName = "allowed-configmap"
failNamespaceLabelKey = "fail-closed-webhook"
failNamespaceLabelValue = "yes"
failNamespaceName = "fail-closed-namesapce"
addedLabelKey = "added-label"
addedLabelValue = "yes"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
toBeAttachedPodName = "to-be-attached-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
allowedConfigMapName = "allowed-configmap"
failNamespaceLabelKey = "fail-closed-webhook"
failNamespaceLabelValue = "yes"
failNamespaceName = "fail-closed-namesapce"
addedLabelKey = "added-label"
addedLabelValue = "yes"
)
var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
@ -1187,36 +1186,6 @@ func testWebhook(f *framework.Framework) {
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
}
func testBlockingConfigmapDeletion(f *framework.Framework) {
ginkgo.By("create a configmap that should be denied by the webhook when deleting")
client := f.ClientSet
configmap := nonDeletableConfigmap(f)
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
ginkgo.By("deleting the configmap should be denied by the webhook")
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configmap.Name, &metav1.DeleteOptions{})
framework.ExpectError(err, "deleting configmap %s in namespace: %s should be denied", configmap.Name, f.Namespace.Name)
expectedErrMsg1 := "the configmap cannot be deleted because it contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg1) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
}
ginkgo.By("remove the offending key and value from the configmap data")
toCompliantFn := func(cm *v1.ConfigMap) {
if cm.Data == nil {
cm.Data = map[string]string{}
}
cm.Data["webhook-e2e-test"] = "webhook-allow"
}
_, err = updateConfigMap(client, f.Namespace.Name, configmap.Name, toCompliantFn)
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
ginkgo.By("deleting the updated configmap should be successful")
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configmap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
}
func testAttachingPodWebhook(f *framework.Framework) {
ginkgo.By("create a pod")
client := f.ClientSet
@ -1659,17 +1628,6 @@ func namedNonCompliantConfigMap(name string, f *framework.Framework) *v1.ConfigM
}
}
func nonDeletableConfigmap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: nonDeletableConfigmapName,
},
Data: map[string]string{
"webhook-e2e-test": "webhook-nondeletable",
},
}
}
func toBeMutatedConfigMap(f *framework.Framework) *v1.ConfigMap {
return namedToBeMutatedConfigMap("to-be-mutated", f)
}