Merge pull request #48003 from MrHohn/gce-xlb-cleanup

Automatic merge from submit-queue (batch tested with PRs 48139, 48042, 47645, 48054, 48003)

Pipe clusterID into gce_loadbalancer_external.go

**What this PR does / why we need it**: Small cleanup for GCE ELB codes.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #48002

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-06-27 14:08:21 -07:00 committed by GitHub
commit a7f16b553b
6 changed files with 26 additions and 38 deletions

View File

@ -135,7 +135,7 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, svc *v1.Service, nod
case schemeInternal:
err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc)
default:
err = gce.ensureExternalLoadBalancerDeleted(clusterName, svc)
err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc)
}
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme, err)
if err != nil {
@ -149,7 +149,7 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, svc *v1.Service, nod
case schemeInternal:
status, err = gce.ensureInternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes)
default:
status, err = gce.ensureExternalLoadBalancer(clusterName, svc, existingFwdRule, nodes)
status, err = gce.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes)
}
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err)
return status, err
@ -191,7 +191,7 @@ func (gce *GCECloud) EnsureLoadBalancerDeleted(clusterName string, svc *v1.Servi
case schemeInternal:
err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc)
default:
err = gce.ensureExternalLoadBalancerDeleted(clusterName, svc)
err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc)
}
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err)
return err

View File

@ -42,7 +42,7 @@ import (
// Due to an interesting series of design decisions, this handles both creating
// new load balancers and updating existing load balancers, recognizing when
// each is needed.
func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
if len(nodes) == 0 {
return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts")
}
@ -222,10 +222,6 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, apiService *
glog.Infof("Target pool %v for Service %v/%v doesn't exist", loadBalancerName, apiService.Namespace, apiService.Name)
}
clusterID, err := gce.ClusterID.GetID()
if err != nil {
return nil, fmt.Errorf("error getting cluster ID %s: %v", loadBalancerName, err)
}
// Check which health check needs to create and which health check needs to delete.
// Health check management is coupled with target pool operation to prevent leaking.
var hcToCreate, hcToDelete *compute.HttpHealthCheck
@ -283,7 +279,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, apiService *
if hcToDelete != nil {
hcNames = append(hcNames, hcToDelete.Name)
}
if err := gce.DeleteExternalTargetPoolAndChecks(loadBalancerName, gce.region, hcNames...); err != nil {
if err := gce.DeleteExternalTargetPoolAndChecks(loadBalancerName, gce.region, clusterID, hcNames...); err != nil {
return nil, fmt.Errorf("failed to delete existing target pool %s for load balancer update: %v", loadBalancerName, err)
}
glog.Infof("EnsureLoadBalancer(%v(%v)): deleted target pool", loadBalancerName, serviceName)
@ -297,7 +293,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, apiService *
createInstances = createInstances[:maxTargetPoolCreateInstances]
}
// Pass healthchecks to createTargetPool which needs them as health check links in the target pool
if err := gce.createTargetPool(loadBalancerName, serviceName.String(), ipAddress, gce.region, createInstances, affinityType, hcToCreate); err != nil {
if err := gce.createTargetPool(loadBalancerName, serviceName.String(), ipAddress, gce.region, clusterID, createInstances, affinityType, hcToCreate); err != nil {
return nil, fmt.Errorf("failed to create target pool %s: %v", loadBalancerName, err)
}
if hcToCreate != nil {
@ -358,7 +354,7 @@ func (gce *GCECloud) updateExternalLoadBalancer(clusterName string, service *v1.
}
// ensureExternalLoadBalancerDeleted is the external implementation of LoadBalancer.EnsureLoadBalancerDeleted
func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName string, service *v1.Service) error {
func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
var hcNames []string
@ -370,10 +366,6 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName string, servi
}
hcNames = append(hcNames, hcToDelete.Name)
} else {
clusterID, err := gce.ClusterID.GetID()
if err != nil {
return fmt.Errorf("error getting cluster ID %s: %v", loadBalancerName, err)
}
// EnsureLoadBalancerDeleted() could be triggered by changing service from
// LoadBalancer type to others. In this case we have no idea whether it was
// using local traffic health check or nodes health check. Attempt to delete
@ -394,7 +386,7 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName string, servi
if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil {
return err
}
if err := gce.DeleteExternalTargetPoolAndChecks(loadBalancerName, gce.region, hcNames...); err != nil {
if err := gce.DeleteExternalTargetPoolAndChecks(loadBalancerName, gce.region, clusterID, hcNames...); err != nil {
return err
}
return nil
@ -406,7 +398,7 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName string, servi
return nil
}
func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(name, region string, hcNames ...string) error {
func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(name, region, clusterID string, hcNames ...string) error {
if err := gce.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) {
glog.Infof("Target pool %s already deleted. Continuing to delete other resources.", name)
} else if err != nil {
@ -444,10 +436,6 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(name, region string, hcNa
// We continue to delete the healthcheck firewall to prevent leaking.
glog.V(4).Infof("Health check %v is already deleted.", hcName)
}
clusterID, err := gce.ClusterID.GetID()
if err != nil {
return fmt.Errorf("error getting cluster ID: %v", err)
}
// If health check is deleted without error, it means no load-balancer is using it.
// So we should delete the health check firewall as well.
fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck)
@ -468,7 +456,7 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(name, region string, hcNa
return nil
}
func (gce *GCECloud) createTargetPool(name, serviceName, ipAddress, region string, hosts []*gceInstance, affinityType v1.ServiceAffinity, hc *compute.HttpHealthCheck) error {
func (gce *GCECloud) createTargetPool(name, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, affinityType v1.ServiceAffinity, hc *compute.HttpHealthCheck) error {
// health check management is coupled with targetPools to prevent leaks. A
// target pool is the only thing that requires a health check, so we delete
// associated checks on teardown, and ensure checks on setup.
@ -482,7 +470,7 @@ func (gce *GCECloud) createTargetPool(name, serviceName, ipAddress, region strin
defer gce.sharedResourceLock.Unlock()
}
if !gce.OnXPN() {
if err := gce.ensureHttpHealthCheckFirewall(serviceName, ipAddress, region, hosts, hc.Name, int32(hc.Port), isNodesHealthCheck); err != nil {
if err := gce.ensureHttpHealthCheckFirewall(serviceName, ipAddress, region, clusterID, hosts, hc.Name, int32(hc.Port), isNodesHealthCheck); err != nil {
return err
}
}
@ -776,12 +764,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st
return true, false, nil
}
func (gce *GCECloud) ensureHttpHealthCheckFirewall(serviceName, ipAddress, region string, hosts []*gceInstance, hcName string, hcPort int32, isNodesHealthCheck bool) error {
clusterID, err := gce.ClusterID.GetID()
if err != nil {
return fmt.Errorf("error getting cluster ID: %v", err)
}
func (gce *GCECloud) ensureHttpHealthCheckFirewall(serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hcName string, hcPort int32, isNodesHealthCheck bool) error {
// Prepare the firewall params for creating / checking.
desc := fmt.Sprintf(`{"kubernetes.io/cluster-id":"%s"}`, clusterID)
if !isNodesHealthCheck {

View File

@ -1300,17 +1300,17 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
return fmt.Errorf("waiting for service to be down timed out")
}
func CleanupServiceResources(loadBalancerName, zone string) {
func CleanupServiceResources(c clientset.Interface, loadBalancerName, zone string) {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
CleanupServiceGCEResources(loadBalancerName, zone)
CleanupServiceGCEResources(c, loadBalancerName, zone)
}
// TODO: we need to add this function with other cloud providers, if there is a need.
}
func CleanupServiceGCEResources(loadBalancerName, zone string) {
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, zone string) {
if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
if err := CleanupGCEResources(loadBalancerName, zone); err != nil {
if err := CleanupGCEResources(c, loadBalancerName, zone); err != nil {
Logf("Still waiting for glbc to cleanup: %v", err)
return false, nil
}

View File

@ -4505,7 +4505,7 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func CleanupGCEResources(loadBalancerName, zone string) (retErr error) {
func CleanupGCEResources(c clientset.Interface, loadBalancerName, zone string) (retErr error) {
gceCloud, err := GetGCECloud()
if err != nil {
return err
@ -4536,7 +4536,12 @@ func CleanupGCEResources(loadBalancerName, zone string) (retErr error) {
if hc != nil {
hcNames = append(hcNames, hc.Name)
}
if err := gceCloud.DeleteExternalTargetPoolAndChecks(loadBalancerName, region, hcNames...); err != nil &&
clusterID, err := GetClusterID(c)
if err != nil {
retErr = fmt.Errorf("%v\n%v", retErr, err)
return
}
if err := gceCloud.DeleteExternalTargetPoolAndChecks(loadBalancerName, region, clusterID, hcNames...); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}

View File

@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("Services", func() {
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(lb, framework.TestContext.CloudConfig.Zone)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -1337,7 +1337,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() {
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(lb, framework.TestContext.CloudConfig.Zone)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}

View File

@ -248,7 +248,7 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
zone := fedframework.GetZoneFromClusterName(name)
serviceLBName := cloudprovider.GetLoadBalancerName(cSvc)
framework.Logf("cleaning cloud provider resource for service %q in namespace %q, in cluster %q", service.Name, namespace, name)
framework.CleanupServiceResources(serviceLBName, zone)
framework.CleanupServiceResources(c.Clientset, serviceLBName, zone)
}
err = cleanupServiceShard(c.Clientset, name, namespace, cSvc, fedframework.FederatedDefaultTestTimeout)