mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #81771 from RainbowMango/pr_fix_static_error_for_gce
Cleanup gce static analysis issues
This commit is contained in:
commit
ec344a1c77
@ -251,7 +251,6 @@ vendor/k8s.io/kubectl/pkg/describe/versioned
|
||||
vendor/k8s.io/kubectl/pkg/scale
|
||||
vendor/k8s.io/legacy-cloud-providers/aws
|
||||
vendor/k8s.io/legacy-cloud-providers/azure
|
||||
vendor/k8s.io/legacy-cloud-providers/gce
|
||||
vendor/k8s.io/legacy-cloud-providers/vsphere
|
||||
vendor/k8s.io/metrics/pkg/client/custom_metrics
|
||||
vendor/k8s.io/sample-controller
|
||||
|
@ -863,7 +863,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
|
||||
if tokenSource == nil {
|
||||
var err error
|
||||
tokenSource, err = google.DefaultTokenSource(
|
||||
oauth2.NoContext,
|
||||
context.Background(),
|
||||
compute.CloudPlatformScope,
|
||||
compute.ComputeScope)
|
||||
klog.Infof("Using DefaultTokenSource %#v", tokenSource)
|
||||
@ -890,7 +890,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return oauth2.NewClient(oauth2.NoContext, tokenSource), nil
|
||||
return oauth2.NewClient(context.Background(), tokenSource), nil
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getProjectsAPIEndpoint() string {
|
||||
@ -901,12 +901,3 @@ func (manager *gceServiceManager) getProjectsAPIEndpoint() string {
|
||||
|
||||
return projectsAPIEndpoint
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getProjectsAPIEndpointBeta() string {
|
||||
projectsAPIEndpoint := gceComputeAPIEndpointBeta + "projects/"
|
||||
if manager.gce.service != nil {
|
||||
projectsAPIEndpoint = manager.gce.serviceBeta.BasePath
|
||||
}
|
||||
|
||||
return projectsAPIEndpoint
|
||||
}
|
||||
|
@ -18,10 +18,6 @@ limitations under the License.
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
// AlphaFeatureNetworkTiers allows Services backed by a GCP load balancer to choose
|
||||
// what network tier to use. Currently supports "Standard" and "Premium" (default).
|
||||
@ -54,10 +50,3 @@ func NewAlphaFeatureGate(features []string) *AlphaFeatureGate {
|
||||
}
|
||||
return &AlphaFeatureGate{featureMap}
|
||||
}
|
||||
|
||||
func (g *Cloud) alphaFeatureEnabled(feature string) error {
|
||||
if !g.AlphaFeatureGate.Enabled(feature) {
|
||||
return fmt.Errorf("alpha feature %q is not enabled", feature)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ func (g *Cloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAdd
|
||||
// NodeAddressesByProviderID will not be called from the node that is requesting this ID.
|
||||
// i.e. metadata service and other local methods cannot be used here
|
||||
func (g *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
timeoutCtx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
_, zone, name, err := splitProviderID(providerID)
|
||||
@ -119,7 +119,7 @@ func (g *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string
|
||||
return []v1.NodeAddress{}, err
|
||||
}
|
||||
|
||||
instance, err := g.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone))
|
||||
instance, err := g.c.Instances().Get(timeoutCtx, meta.ZonalKey(canonicalizeInstanceName(name), zone))
|
||||
if err != nil {
|
||||
return []v1.NodeAddress{}, fmt.Errorf("error while querying for providerID %q: %v", providerID, err)
|
||||
}
|
||||
|
@ -571,6 +571,7 @@ func TestTargetPoolNeedsRecreation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
hostNames := nodeNames(nodes)
|
||||
hosts, err := gce.getInstancesByNames(hostNames)
|
||||
require.NoError(t, err)
|
||||
|
||||
var instances []string
|
||||
for _, host := range hosts {
|
||||
@ -762,6 +763,7 @@ func TestFirewallNeedsUpdate(t *testing.T) {
|
||||
fw.Allowed[0].IPProtocol = "tcp"
|
||||
fw.SourceRanges[0] = trueSourceRange
|
||||
fw, err = gce.GetFirewall(MakeFirewallName(tc.lbName))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fw.Allowed[0].IPProtocol, "tcp")
|
||||
require.Equal(t, fw.SourceRanges[0], trueSourceRange)
|
||||
|
||||
@ -801,6 +803,7 @@ func TestEnsureTargetPoolAndHealthCheck(t *testing.T) {
|
||||
|
||||
hostNames := nodeNames(nodes)
|
||||
hosts, err := gce.getInstancesByNames(hostNames)
|
||||
require.NoError(t, err)
|
||||
clusterID := vals.ClusterID
|
||||
|
||||
ipAddr := status.Ingress[0].IP
|
||||
@ -813,15 +816,19 @@ func TestEnsureTargetPoolAndHealthCheck(t *testing.T) {
|
||||
// Apply a tag on the target pool. By verifying the change of the tag, target pool update can be ensured.
|
||||
tag := "A Tag"
|
||||
pool, err := gce.GetTargetPool(lbName, region)
|
||||
require.NoError(t, err)
|
||||
pool.CreationTimestamp = tag
|
||||
pool, err = gce.GetTargetPool(lbName, region)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tag, pool.CreationTimestamp)
|
||||
err = gce.ensureTargetPoolAndHealthCheck(true, true, svc, lbName, clusterID, ipAddr, hosts, hcToCreate, hcToDelete)
|
||||
assert.NoError(t, err)
|
||||
pool, err = gce.GetTargetPool(lbName, region)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, pool.CreationTimestamp, tag)
|
||||
|
||||
pool, err = gce.GetTargetPool(lbName, region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pool.Instances))
|
||||
var manyNodeName [maxTargetPoolCreateInstances + 1]string
|
||||
for i := 0; i < maxTargetPoolCreateInstances+1; i++ {
|
||||
@ -831,15 +838,18 @@ func TestEnsureTargetPoolAndHealthCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
manyHostNames := nodeNames(manyNodes)
|
||||
manyHosts, err := gce.getInstancesByNames(manyHostNames)
|
||||
require.NoError(t, err)
|
||||
err = gce.ensureTargetPoolAndHealthCheck(true, true, svc, lbName, clusterID, ipAddr, manyHosts, hcToCreate, hcToDelete)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pool, err = gce.GetTargetPool(lbName, region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, maxTargetPoolCreateInstances+1, len(pool.Instances))
|
||||
|
||||
err = gce.ensureTargetPoolAndHealthCheck(true, false, svc, lbName, clusterID, ipAddr, hosts, hcToCreate, hcToDelete)
|
||||
assert.NoError(t, err)
|
||||
pool, err = gce.GetTargetPool(lbName, region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pool.Instances))
|
||||
}
|
||||
|
||||
@ -1011,6 +1021,7 @@ func TestEnsureExternalLoadBalancerErrors(t *testing.T) {
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
gce, err := fakeGCECloud(DefaultTestClusterValues())
|
||||
require.NoError(t, err)
|
||||
nodes, err := createAndInsertNodes(gce, []string{"test-node-1"}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
svc := fakeLoadbalancerService("")
|
||||
|
@ -64,6 +64,7 @@ func TestEnsureInternalBackendServiceUpdates(t *testing.T) {
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
require.NoError(t, err)
|
||||
@ -110,6 +111,7 @@ func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
require.NoError(t, err)
|
||||
@ -180,6 +182,7 @@ func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
require.NoError(t, err)
|
||||
@ -499,6 +502,7 @@ func TestClearPreviousInternalResources(t *testing.T) {
|
||||
err = gce.ensureInternalBackendService(svc.ObjectMeta.Name, "", svc.Spec.SessionAffinity, cloud.SchemeInternal, v1.ProtocolTCP, []string{}, "")
|
||||
require.NoError(t, err)
|
||||
backendSvc, err := gce.GetRegionBackendService(svc.ObjectMeta.Name, gce.region)
|
||||
require.NoError(t, err)
|
||||
backendSvc.HealthChecks = []string{hc1.SelfLink, hc2.SelfLink}
|
||||
|
||||
c.MockRegionBackendServices.DeleteHook = mock.DeleteRegionBackendServicesErrHook
|
||||
@ -747,6 +751,7 @@ func TestEnsureInternalLoadBalancerErrors(t *testing.T) {
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
gce, err := fakeGCECloud(DefaultTestClusterValues())
|
||||
require.NoError(t, err)
|
||||
nodes, err := createAndInsertNodes(gce, []string{"test-node-1"}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
params = newEnsureILBParams(nodes)
|
||||
|
@ -40,13 +40,13 @@ func newRoutesMetricContext(request string) *metricContext {
|
||||
|
||||
// ListRoutes in the cloud environment.
|
||||
func (g *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
timeoutCtx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newRoutesMetricContext("list")
|
||||
prefix := truncateClusterName(clusterName)
|
||||
f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", g.NetworkURL()).AndRegexp("description", k8sNodeRouteTag)
|
||||
routes, err := g.c.Routes().List(ctx, f)
|
||||
routes, err := g.c.Routes().List(timeoutCtx, f)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
@ -66,7 +66,7 @@ func (g *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpro
|
||||
|
||||
// CreateRoute in the cloud environment.
|
||||
func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
timeoutCtx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newRoutesMetricContext("create")
|
||||
@ -84,7 +84,7 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st
|
||||
Priority: 1000,
|
||||
Description: k8sNodeRouteTag,
|
||||
}
|
||||
err = g.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr)
|
||||
err = g.c.Routes().Insert(timeoutCtx, meta.GlobalKey(cr.Name), cr)
|
||||
if isHTTPErrorCode(err, http.StatusConflict) {
|
||||
klog.Infof("Route %q already exists.", cr.Name)
|
||||
err = nil
|
||||
@ -94,11 +94,11 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st
|
||||
|
||||
// DeleteRoute from the cloud environment.
|
||||
func (g *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
timeoutCtx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newRoutesMetricContext("delete")
|
||||
return mc.Observe(g.c.Routes().Delete(ctx, meta.GlobalKey(route.Name)))
|
||||
return mc.Observe(g.c.Routes().Delete(timeoutCtx, meta.GlobalKey(route.Name)))
|
||||
}
|
||||
|
||||
func truncateClusterName(clusterName string) string {
|
||||
|
@ -19,6 +19,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -106,7 +107,7 @@ func (a *AltTokenSource) token() (*oauth2.Token, error) {
|
||||
|
||||
// NewAltTokenSource constructs a new alternate token source for generating tokens.
|
||||
func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource {
|
||||
client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
|
||||
client := oauth2.NewClient(context.Background(), google.ComputeTokenSource(""))
|
||||
a := &AltTokenSource{
|
||||
oauthClient: client,
|
||||
tokenURL: tokenURL,
|
||||
|
Loading…
Reference in New Issue
Block a user