drop unused code pointed out by linter

Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
Davanum Srinivas 2024-04-24 19:55:26 -04:00
parent bf268f02a3
commit efdb2961b6
No known key found for this signature in database
GPG Key ID: 80D83A796103BF59
4 changed files with 1 additions and 460 deletions

View File

@ -61,20 +61,8 @@ const (
// The no. of NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 30
// The max no. of NodeSpec updates that can be enqueued.
cidrUpdateQueueSize = 5000
// cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it.
cidrUpdateRetries = 3
// updateRetryTimeout is the time to wait before requeing a failed node for retry
updateRetryTimeout = 250 * time.Millisecond
// maxUpdateRetryTimeout is the maximum amount of time between timeouts.
maxUpdateRetryTimeout = 5 * time.Second
// updateMaxRetries is the max retries for a failed node
updateMaxRetries = 10
)
// nodePollInterval is used in listing node

View File

@ -19,9 +19,6 @@ package nodeipam
import (
"context"
"fmt"
"net"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
@ -33,18 +30,7 @@ import (
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
)
const (
// ipamResyncInterval is the amount of time between when the cloud and node
// CIDR range assignments are synchronized.
ipamResyncInterval = 30 * time.Second
// ipamMaxBackoff is the maximum backoff for retrying synchronization of a
// given in the error state.
ipamMaxBackoff = 10 * time.Second
// ipamInitialRetry is the initial retry interval for retrying synchronization of a
// given in the error state.
ipamInitialBackoff = 250 * time.Millisecond
"net"
)
// ipamController is an interface abstracting an interface for

View File

@ -23,7 +23,6 @@ import (
"fmt"
"math/rand"
"net"
"net/http"
"sort"
"strconv"
"strings"
@ -40,7 +39,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
@ -52,7 +50,6 @@ import (
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry"
cloudprovider "k8s.io/cloud-provider"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
@ -429,21 +426,6 @@ func verifyServeHostnameServiceDown(ctx context.Context, c clientset.Interface,
return fmt.Errorf("waiting for service to be down timed out")
}
// testNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
func testNotReachableHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := e2enetwork.PokeHTTP(host, port, "/", nil)
if result.Code == 0 {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
// UDPPokeParams is a struct for UDP poke parameters.
type UDPPokeParams struct {
Timeout time.Duration
@ -474,241 +456,6 @@ const (
// Any time we add new errors, we should audit all callers of this.
)
// pokeUDP tries to connect to a host on a port and send the given request. Callers
// can specify additional success parameters, if desired.
//
// The result status will be characterized as precisely as possible, given the
// known users of this.
//
// The result error will be populated for any status other than Success.
//
// The result response will be populated if the UDP transaction was completed, even
// if the other test params make this a failure).
func pokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPokeResult {
hostPort := net.JoinHostPort(host, strconv.Itoa(port))
url := fmt.Sprintf("udp://%s", hostPort)
ret := UDPPokeResult{}
// Sanity check inputs, because it has happened. These are the only things
// that should hard fail the test - they are basically ASSERT()s.
if host == "" {
framework.Failf("Got empty host for UDP poke (%s)", url)
return ret
}
if port == 0 {
framework.Failf("Got port==0 for UDP poke (%s)", url)
return ret
}
// Set default params.
if params == nil {
params = &UDPPokeParams{}
}
framework.Logf("Poking %v", url)
con, err := net.Dial("udp", hostPort)
if err != nil {
ret.Status = UDPError
ret.Error = err
framework.Logf("Poke(%q): %v", url, err)
return ret
}
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
if err != nil {
ret.Error = err
neterr, ok := err.(net.Error)
if ok && neterr.Timeout() {
ret.Status = UDPTimeout
} else if strings.Contains(err.Error(), "connection refused") {
ret.Status = UDPRefused
} else {
ret.Status = UDPError
}
framework.Logf("Poke(%q): %v", url, err)
return ret
}
if params.Timeout != 0 {
err = con.SetDeadline(time.Now().Add(params.Timeout))
if err != nil {
ret.Status = UDPError
ret.Error = err
framework.Logf("Poke(%q): %v", url, err)
return ret
}
}
bufsize := len(params.Response) + 1
if bufsize == 0 {
bufsize = 4096
}
var buf = make([]byte, bufsize)
n, err := con.Read(buf)
if err != nil {
ret.Error = err
neterr, ok := err.(net.Error)
if ok && neterr.Timeout() {
ret.Status = UDPTimeout
} else if strings.Contains(err.Error(), "connection refused") {
ret.Status = UDPRefused
} else {
ret.Status = UDPError
}
framework.Logf("Poke(%q): %v", url, err)
return ret
}
ret.Response = buf[0:n]
if params.Response != "" && string(ret.Response) != params.Response {
ret.Status = UDPBadResponse
ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
framework.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Status = UDPSuccess
framework.Logf("Poke(%q): success", url)
return ret
}
// testReachableUDP tests that the given host serves UDP on the given port.
func testReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := pokeUDP(host, port, "echo hello", &UDPPokeParams{
Timeout: 3 * time.Second,
Response: "hello",
})
if result.Status == UDPSuccess {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
// testNotReachableUDP tests that the given host doesn't serve UDP on the given port.
func testNotReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := pokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
if result.Status != UDPSuccess && result.Status != UDPError {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
// testRejectedUDP tests that the given host rejects a UDP request on the given port.
func testRejectedUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := pokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
if result.Status == UDPRefused {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("UDP service %v:%v not rejected: %v", host, port, err)
}
}
// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port.
func TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error {
count := 0
condition := func() (bool, error) {
success, _ := testHTTPHealthCheckNodePort(host, port, request)
if success && expectSucceed ||
!success && !expectSucceed {
count++
}
if count >= threshold {
return true, nil
}
return false, nil
}
if err := wait.PollImmediate(time.Second, timeout, condition); err != nil {
return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v%v, got %d", threshold, expectSucceed, host, port, count)
}
return nil
}
func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) {
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
url := fmt.Sprintf("http://%s%s", ipPort, request)
if ip == "" || port == 0 {
framework.Failf("Got empty IP for reachability check (%s)", url)
return false, fmt.Errorf("invalid input ip or port")
}
framework.Logf("Testing HTTP health check on %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
if err != nil {
framework.Logf("Got error testing for reachability of %s: %v", url, err)
return false, err
}
defer resp.Body.Close()
if err != nil {
framework.Logf("Got error reading response from %s: %v", url, err)
return false, err
}
// HealthCheck responder returns 503 for no local endpoints
if resp.StatusCode == 503 {
return false, nil
}
// HealthCheck responder returns 200 for non-zero local endpoints
if resp.StatusCode == 200 {
return true, nil
}
return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
}
func testHTTPHealthCheckNodePortFromTestContainer(ctx context.Context, config *e2enetwork.NetworkingTestConfig, host string, port int, timeout time.Duration, expectSucceed bool, threshold int) error {
count := 0
pollFn := func() (bool, error) {
statusCode, err := config.GetHTTPCodeFromTestContainer(ctx,
"/healthz",
host,
port)
if err != nil {
framework.Logf("Got error reading status code from http://%s:%d/healthz via test container: %v", host, port, err)
return false, nil
}
framework.Logf("Got status code from http://%s:%d/healthz via test container: %d", host, port, statusCode)
success := statusCode == 200
if (success && expectSucceed) ||
(!success && !expectSucceed) {
count++
}
return count >= threshold, nil
}
err := wait.PollImmediate(time.Second, timeout, pollFn)
if err != nil {
return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v:%v/healthz, got %d", threshold, expectSucceed, host, port, count)
}
return nil
}
// Does an HTTP GET, but does not reuse TCP connections
// This masks problems where the iptables rule has changed, but we don't see it
func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
tr := utilnet.SetTransportDefaults(&http.Transport{
DisableKeepAlives: true,
})
client := &http.Client{
Transport: tr,
Timeout: timeout,
}
return client.Get(url)
}
func getServeHostnameService(name string) *v1.Service {
svc := defaultServeHostnameService.DeepCopy()
svc.ObjectMeta.Name = name
@ -727,23 +474,6 @@ func waitForAPIServerUp(ctx context.Context, c clientset.Interface) error {
return fmt.Errorf("waiting for apiserver timed out")
}
// getEndpointNodesWithInternalIP returns a map of nodenames:internal-ip on which the
// endpoints of the Service are running.
func getEndpointNodesWithInternalIP(ctx context.Context, jig *e2eservice.TestJig) (map[string]string, error) {
nodesWithIPs, err := jig.GetEndpointNodesWithIP(ctx, v1.NodeInternalIP)
if err != nil {
return nil, err
}
endpointsNodeMap := make(map[string]string, len(nodesWithIPs))
for nodeName, internalIPs := range nodesWithIPs {
if len(internalIPs) < 1 {
return nil, fmt.Errorf("no internal ip found for node %s", nodeName)
}
endpointsNodeMap[nodeName] = internalIPs[0]
}
return endpointsNodeMap, nil
}
var _ = common.SIGDescribe("Services", func() {
f := framework.NewDefaultFramework("services")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@ -4026,62 +3756,6 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(ctx context.Context,
}
}
func execAffinityTestForLBServiceWithTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
execAffinityTestForLBServiceWithOptionalTransition(ctx, f, cs, svc, true)
}
func execAffinityTestForLBService(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
execAffinityTestForLBServiceWithOptionalTransition(ctx, f, cs, svc, false)
}
// execAffinityTestForLBServiceWithOptionalTransition is a helper function that wrap the logic of
// affinity test for load balancer services, similar to
// execAffinityTestForNonLBServiceWithOptionalTransition.
func execAffinityTestForLBServiceWithOptionalTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name
ginkgo.By("creating service in namespace " + ns)
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
jig := e2eservice.NewTestJig(cs, ns, serviceName)
ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName)
svc, err = jig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs))
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
podNodePairs, err := e2enode.PodNodePairs(ctx, cs, ns)
framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
_ = StopServeHostnameService(ctx, cs, ns, serviceName)
lb := cloudprovider.DefaultLoadBalancerName(svc)
framework.Logf("cleaning load balancer resource for %s", lb)
e2eservice.CleanupServiceResources(ctx, cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
})
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := int(svc.Spec.Ports[0].Port)
if !isTransitionTest {
if !checkAffinity(ctx, cs, nil, ingressIP, port, true) {
framework.Failf("Failed to verify affinity for loadbalance service %s/%s", ns, serviceName)
}
}
if isTransitionTest {
svc, err = jig.UpdateService(ctx, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
framework.ExpectNoError(err)
if !checkAffinity(ctx, cs, nil, ingressIP, port, false) {
framework.Failf("Failed to verify affinity for loadbalance service %s/%s without session affinity ", ns, serviceName)
}
svc, err = jig.UpdateService(ctx, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
framework.ExpectNoError(err)
if !checkAffinity(ctx, cs, nil, ingressIP, port, true) {
framework.Failf("Failed to verify affinity for loadbalance service %s/%s with session affinity ", ns, serviceName)
}
}
}
func createAndGetExternalServiceFQDN(ctx context.Context, cs clientset.Interface, ns, serviceName string) string {
_, _, err := StartServeHostnameService(ctx, cs, getServeHostnameService(serviceName), ns, 2)
framework.ExpectNoError(err, "Expected Service %s to be running", serviceName)
@ -4133,31 +3807,6 @@ func launchHostExecPod(ctx context.Context, client clientset.Interface, ns, name
return pod
}
// checkReachabilityFromPod checks reachability from the specified pod.
func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := e2eoutput.RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil {
framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil
}
if !expectToBeReachable && err == nil {
framework.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
// enableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer
// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others.
func enableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) {
return framework.TestContext.CloudConfig.Provider.EnableAndDisableInternalLB()
}
func validatePorts(ep, expectedEndpoints portsByPodUID) error {
if len(ep) != len(expectedEndpoints) {
// should not happen because we check this condition before

View File

@ -1,82 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipamperf
import (
"context"
"time"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
const (
maxCreateRetries = 10
retryDelay = 10 * time.Second
)
var (
baseNodeTemplate = &v1.Node{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sample-node-",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
},
}
)
func deleteNodes(clientSet *clientset.Clientset) {
klog.Info("Deleting nodes")
noGrace := int64(0)
if err := clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil {
klog.Errorf("Error deleting node: %v", err)
}
}
func createNodes(clientSet *clientset.Clientset, config *Config) error {
klog.Infof("Creating %d nodes", config.NumNodes)
for i := 0; i < config.NumNodes; i++ {
var err error
for j := 0; j < maxCreateRetries; j++ {
if _, err = clientSet.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate, metav1.CreateOptions{}); err != nil && apierrors.IsServerTimeout(err) {
klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay)
time.Sleep(retryDelay)
continue
}
break
}
if err != nil {
klog.Errorf("Error creating nodes: %v", err)
return err
}
}
klog.Infof("%d nodes created", config.NumNodes)
return nil
}