Add LoadBalancer status to ServiceStatus

This will replace publicIPs
This commit is contained in:
Justin Santa Barbara
2015-05-22 17:33:29 -04:00
parent 1ad4549f5f
commit 3884d5fc59
21 changed files with 500 additions and 95 deletions

View File

@@ -240,20 +240,16 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
} else {
// If we don't have any cached memory of the load balancer, we have to ask
// the cloud provider for what it knows about it.
endpoint, exists, err := s.balancer.GetTCPLoadBalancer(s.loadBalancerName(service), s.zone.Region)
status, exists, err := s.balancer.GetTCPLoadBalancer(s.loadBalancerName(service), s.zone.Region)
if err != nil {
return fmt.Errorf("Error getting LB for service %s", namespacedName), retryable
}
if exists && stringSlicesEqual(service.Spec.PublicIPs, []string{endpoint}) {
// TODO: If we could read more of the spec (ports, affinityType) of the
// existing load balancer, we could better determine if an update is
// necessary in more cases. For now, we optimistically assume that a
// matching IP suffices.
glog.Infof("LB already exists with endpoint %s for previously uncached service %s", endpoint, namespacedName)
if exists && api.LoadBalancerStatusEqual(status, &service.Status.LoadBalancer) {
glog.Infof("LB already exists with status %s for previously uncached service %s", status, namespacedName)
return nil, notRetryable
} else if exists {
glog.Infof("Deleting old LB for previously uncached service %s whose endpoint %s doesn't match the service's desired IPs %v",
namespacedName, endpoint, service.Spec.PublicIPs)
namespacedName, status, service.Spec.PublicIPs)
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil {
return err, retryable
}
@@ -268,20 +264,24 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
glog.V(2).Infof("Creating LB for service %s", namespacedName)
// The load balancer doesn't exist yet, so create it.
publicIPstring := fmt.Sprint(service.Spec.PublicIPs)
// Save the state so we can avoid a write if it doesn't change
previousState := api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
err := s.createExternalLoadBalancer(service)
if err != nil {
return fmt.Errorf("failed to create external load balancer for service %s: %v", namespacedName, err), retryable
}
if publicIPstring == fmt.Sprint(service.Spec.PublicIPs) {
// Write the state if changed
if api.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) {
glog.Infof("Not persisting unchanged service to registry.")
return nil, notRetryable
}
// If creating the load balancer succeeded, persist the updated service.
if err = s.persistUpdate(service); err != nil {
return fmt.Errorf("Failed to persist updated publicIPs to apiserver, even after retries. Giving up: %v", err), notRetryable
return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
}
return nil, notRetryable
}
@@ -301,13 +301,13 @@ func (s *ServiceController) persistUpdate(service *api.Service) error {
return nil
}
// TODO: Try to resolve the conflict if the change was unrelated to load
// balancers and public IPs. For now, just rely on the fact that we'll
// balancer status. For now, just rely on the fact that we'll
// also process the update that caused the resource version to change.
if errors.IsConflict(err) {
glog.Infof("Not persisting update to service that has been changed since we received it: %v", err)
return nil
}
glog.Warningf("Failed to persist updated PublicIPs to service %s after creating its external load balancer: %v",
glog.Warningf("Failed to persist updated LoadBalancerStatus to service %s after creating its external load balancer: %v",
service.Name, err)
time.Sleep(clientRetryInterval)
}
@@ -328,21 +328,23 @@ func (s *ServiceController) createExternalLoadBalancer(service *api.Service) err
for _, publicIP := range service.Spec.PublicIPs {
// TODO: Make this actually work for multiple IPs by using different
// names for each. For now, we'll just create the first and break.
endpoint, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
if err != nil {
return err
} else {
service.Status.LoadBalancer = *status
}
service.Spec.PublicIPs = []string{endpoint}
break
}
} else {
endpoint, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
if err != nil {
return err
} else {
service.Status.LoadBalancer = *status
}
service.Spec.PublicIPs = []string{endpoint}
}
return nil
}