mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Remove unused variables (only assigned to) from test code.
This is revealed by the go/types package, which is stricter than the Go compiler about unused variables. See also: golang/go#8560
This commit is contained in:
parent
b79fe10730
commit
e04b91facf
@ -308,7 +308,6 @@ func TestGetNodeConditionPredicate(t *testing.T) {
|
||||
func TestProcessServiceUpdate(t *testing.T) {
|
||||
|
||||
var controller *ServiceController
|
||||
var cloud *fakecloud.FakeCloud
|
||||
|
||||
//A pair of old and new loadbalancer IP address
|
||||
oldLBIP := "192.168.1.1"
|
||||
@ -327,7 +326,7 @@ func TestProcessServiceUpdate(t *testing.T) {
|
||||
svc: defaultExternalService(),
|
||||
updateFn: func(svc *v1.Service) *v1.Service {
|
||||
|
||||
controller, cloud, _ = newController()
|
||||
controller, _, _ = newController()
|
||||
controller.cache.getOrCreate("validKey")
|
||||
return svc
|
||||
|
||||
@ -398,7 +397,6 @@ func TestProcessServiceUpdate(t *testing.T) {
|
||||
func TestSyncService(t *testing.T) {
|
||||
|
||||
var controller *ServiceController
|
||||
var cloud *fakecloud.FakeCloud
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
@ -410,7 +408,7 @@ func TestSyncService(t *testing.T) {
|
||||
testName: "if an invalid service name is synced",
|
||||
key: "invalid/key/string",
|
||||
updateFn: func() {
|
||||
controller, cloud, _ = newController()
|
||||
controller, _, _ = newController()
|
||||
|
||||
},
|
||||
expectedFn: func(e error) error {
|
||||
@ -429,7 +427,7 @@ func TestSyncService(t *testing.T) {
|
||||
testName: "if an invalid service is synced",
|
||||
key: "somethingelse",
|
||||
updateFn: func() {
|
||||
controller, cloud, _ = newController()
|
||||
controller, _, _ = newController()
|
||||
srv := controller.cache.getOrCreate("external-balancer")
|
||||
srv.state = defaultExternalService()
|
||||
},
|
||||
@ -443,7 +441,7 @@ func TestSyncService(t *testing.T) {
|
||||
key: "external-balancer",
|
||||
updateFn: func() {
|
||||
testSvc := defaultExternalService()
|
||||
controller, cloud, _ = newController()
|
||||
controller, _, _ = newController()
|
||||
controller.enqueueService(testSvc)
|
||||
svc := controller.cache.getOrCreate("external-balancer")
|
||||
svc.state = testSvc
|
||||
|
@ -45,9 +45,7 @@ func TestNewWithDelegate(t *testing.T) {
|
||||
t.Fatal("unable to create fake client set")
|
||||
}
|
||||
|
||||
delegateHealthzCalled := false
|
||||
delegateConfig.HealthzChecks = append(delegateConfig.HealthzChecks, healthz.NamedCheck("delegate-health", func(r *http.Request) error {
|
||||
delegateHealthzCalled = true
|
||||
return fmt.Errorf("delegate failed healthcheck")
|
||||
}))
|
||||
|
||||
@ -74,9 +72,7 @@ func TestNewWithDelegate(t *testing.T) {
|
||||
wrappingConfig.LoopbackClientConfig = &rest.Config{}
|
||||
wrappingConfig.SwaggerConfig = DefaultSwaggerConfig()
|
||||
|
||||
wrappingHealthzCalled := false
|
||||
wrappingConfig.HealthzChecks = append(wrappingConfig.HealthzChecks, healthz.NamedCheck("wrapping-health", func(r *http.Request) error {
|
||||
wrappingHealthzCalled = true
|
||||
return fmt.Errorf("wrapping failed healthcheck")
|
||||
}))
|
||||
|
||||
|
@ -104,25 +104,20 @@ func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) er
|
||||
|
||||
var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke", "aws")
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
} else {
|
||||
group = framework.TestContext.CloudConfig.NodeInstanceGroup
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -41,7 +41,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/discovery"
|
||||
kubeaggrcs "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
)
|
||||
@ -50,13 +49,11 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
var kubeClient clientset.Interface
|
||||
var kubeAggrClient kubeaggrcs.Interface
|
||||
var customMetricsClient customclient.CustomMetricsClient
|
||||
var discoveryClient *discovery.DiscoveryClient
|
||||
|
||||
It("should run Custom Metrics - Stackdriver Adapter [Feature:StackdriverCustomMetrics]", func() {
|
||||
kubeClient = f.ClientSet
|
||||
kubeAggrClient = f.AggregatorClient
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to load config: %s", err)
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
@ -40,14 +39,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
f := framework.NewDefaultFramework("services")
|
||||
|
||||
var cs clientset.Interface
|
||||
var internalClientset internalclientset.Interface
|
||||
serviceLBNames := []string{}
|
||||
|
||||
BeforeEach(func() {
|
||||
// This test suite requires the GCE environment.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
cs = f.ClientSet
|
||||
internalClientset = f.InternalClientset
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
|
@ -372,7 +372,6 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
var (
|
||||
nfsServerPod *v1.Pod
|
||||
nfsIP string
|
||||
NFSconfig framework.VolumeTestConfig
|
||||
pod *v1.Pod // client pod
|
||||
)
|
||||
|
||||
@ -390,7 +389,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
NFSconfig, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
_, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
|
@ -31,7 +31,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utiluuid "k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -349,7 +348,6 @@ var _ = SIGDescribe("Density", func() {
|
||||
var nodeCpuCapacity int64
|
||||
var nodeMemCapacity int64
|
||||
var nodes *v1.NodeList
|
||||
var masters sets.String
|
||||
|
||||
testCaseBaseName := "density"
|
||||
missingMeasurements := 0
|
||||
@ -417,7 +415,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
ns = f.Namespace.Name
|
||||
testPhaseDurations = timer.NewTestPhaseTimer()
|
||||
|
||||
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
|
||||
_, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
@ -62,7 +61,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
var nodeList *v1.NodeList
|
||||
var systemPodsNo int
|
||||
var ns string
|
||||
var masterNodes sets.String
|
||||
f := framework.NewDefaultFramework("sched-priority")
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
|
||||
@ -75,7 +73,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
nodeList = &v1.NodeList{}
|
||||
|
||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
||||
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
||||
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
||||
|
||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -147,14 +147,12 @@ const (
|
||||
// - lack of eviction of short-tolerating pod after taint removal.
|
||||
var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
|
||||
var cs clientset.Interface
|
||||
var nodeList *v1.NodeList
|
||||
var ns string
|
||||
f := framework.NewDefaultFramework("taint-control")
|
||||
|
||||
BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
nodeList = &v1.NodeList{}
|
||||
|
||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
||||
|
||||
|
@ -182,7 +182,6 @@ var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
|
||||
ns *v1.Namespace
|
||||
node v1.Node
|
||||
config framework.VolumeTestConfig
|
||||
suffix string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -197,7 +196,6 @@ var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
|
||||
ServerNodeName: node.Name,
|
||||
WaitForCompletion: true,
|
||||
}
|
||||
suffix = ns.Name
|
||||
})
|
||||
|
||||
// Create one of these for each of the drivers to be tested
|
||||
|
@ -202,7 +202,7 @@ func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.Ku
|
||||
}
|
||||
|
||||
func runCPUManagerTests(f *framework.Framework) {
|
||||
var cpuCap, cpuAlloc, cpuRes int64
|
||||
var cpuCap, cpuAlloc int64
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
var cpuListString, expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
@ -213,7 +213,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
var pod, pod1, pod2 *v1.Pod
|
||||
|
||||
It("should assign CPUs as expected based on the Pod spec", func() {
|
||||
cpuCap, cpuAlloc, cpuRes = getLocalNodeCPUDetails(f)
|
||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
||||
|
||||
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
||||
if cpuCap < 2 {
|
||||
|
Loading…
Reference in New Issue
Block a user