mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
[OpenStack]Remove the LbaasV1 of OpenStack cloud provider
The Neutron LbaasV1 has been declared obsolete, LbaasV2 is a better choice. So let's remove the codes of LbaasV1, only support LbaasV2. Talked at: #52609 Related to: #52821 Reference OpenStack doc: https://docs.openstack.org/mitaka/networking-guide/config-lbaas.html
This commit is contained in:
parent
3ae0b84e0b
commit
b308e36819
@ -39,10 +39,6 @@ go_library(
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors:go_default_library",
|
||||
|
@ -78,7 +78,7 @@ type LoadBalancer struct {
|
||||
}
|
||||
|
||||
type LoadBalancerOpts struct {
|
||||
LBVersion string `gcfg:"lb-version"` // overrides autodetection. v1 or v2
|
||||
LBVersion string `gcfg:"lb-version"` // overrides autodetection. Only support v2.
|
||||
SubnetId string `gcfg:"subnet-id"` // overrides autodetection.
|
||||
FloatingNetworkId string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip.
|
||||
LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN.
|
||||
@ -506,39 +506,17 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LBaaS v1 is deprecated in the OpenStack Liberty release.
|
||||
// Currently kubernetes OpenStack cloud provider just support LBaaS v2.
|
||||
lbVersion := os.lbOpts.LBVersion
|
||||
if lbVersion == "" {
|
||||
// No version specified, try newest supported by server
|
||||
netExts, err := networkExtensions(network)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to list neutron extensions: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if netExts["lbaasv2"] {
|
||||
lbVersion = "v2"
|
||||
} else if netExts["lbaas"] {
|
||||
lbVersion = "v1"
|
||||
} else {
|
||||
glog.Warningf("Failed to find neutron LBaaS extension (v1 or v2)")
|
||||
return nil, false
|
||||
}
|
||||
glog.V(3).Infof("Using LBaaS extension %v", lbVersion)
|
||||
if lbVersion != "" && lbVersion != "v2" {
|
||||
glog.Warningf("Config error: currently only support LBaaS v2, unrecognised lb-version \"%v\"", lbVersion)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
glog.V(1).Info("Claiming to support LoadBalancer")
|
||||
|
||||
if lbVersion == "v2" {
|
||||
return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true
|
||||
} else if lbVersion == "v1" {
|
||||
// Since LBaaS v1 is deprecated in the OpenStack Liberty release, so deprecate LBaaSV1 at V1.8, then remove LBaaSV1 after V1.9.
|
||||
// Reference OpenStack doc: https://docs.openstack.org/mitaka/networking-guide/config-lbaas.html
|
||||
glog.Warningf("The LBaaS v1 of OpenStack cloud provider has been deprecated, Please use LBaaS v2")
|
||||
return &LbaasV1{LoadBalancer{network, compute, os.lbOpts}}, true
|
||||
} else {
|
||||
glog.Warningf("Config error: unrecognised lb-version \"%v\"", lbVersion)
|
||||
return nil, false
|
||||
}
|
||||
return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true
|
||||
}
|
||||
|
||||
func isNotFound(err error) bool {
|
||||
|
@ -26,10 +26,6 @@ import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
|
||||
v2monitors "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
|
||||
@ -77,13 +73,6 @@ const (
|
||||
ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/openstack-internal-load-balancer"
|
||||
)
|
||||
|
||||
// Deprecated; Since LBaaS v1 is deprecated in the OpenStack Liberty release, Kubernetes deprecated it at V1.8.
|
||||
// TODO(FengyunPan): remove LBaaS v1 after kubernetes V1.9.
|
||||
// LoadBalancer implementation for LBaaS v1
|
||||
type LbaasV1 struct {
|
||||
LoadBalancer
|
||||
}
|
||||
|
||||
// LoadBalancer implementation for LBaaS v2
|
||||
type LbaasV2 struct {
|
||||
LoadBalancer
|
||||
@ -144,76 +133,6 @@ func getFloatingIPByPortID(client *gophercloud.ServiceClient, portID string) (*f
|
||||
return &floatingIPList[0], nil
|
||||
}
|
||||
|
||||
func getPoolByName(client *gophercloud.ServiceClient, name string) (*pools.Pool, error) {
|
||||
opts := pools.ListOpts{
|
||||
Name: name,
|
||||
}
|
||||
pager := pools.List(client, opts)
|
||||
|
||||
poolList := make([]pools.Pool, 0, 1)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
p, err := pools.ExtractPools(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
poolList = append(poolList, p...)
|
||||
if len(poolList) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(poolList) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(poolList) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &poolList[0], nil
|
||||
}
|
||||
|
||||
func getVipByName(client *gophercloud.ServiceClient, name string) (*vips.VirtualIP, error) {
|
||||
opts := vips.ListOpts{
|
||||
Name: name,
|
||||
}
|
||||
pager := vips.List(client, opts)
|
||||
|
||||
vipList := make([]vips.VirtualIP, 0, 1)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
v, err := vips.ExtractVIPs(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
vipList = append(vipList, v...)
|
||||
if len(vipList) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(vipList) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(vipList) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &vipList[0], nil
|
||||
}
|
||||
|
||||
func getLoadbalancerByName(client *gophercloud.ServiceClient, name string) (*loadbalancers.LoadBalancer, error) {
|
||||
opts := loadbalancers.ListOpts{
|
||||
Name: name,
|
||||
@ -1500,354 +1419,3 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *LbaasV1) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
vip, err := getVipByName(lb.network, loadBalancerName)
|
||||
if err == ErrNotFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
if vip == nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
|
||||
if vip.PortID != "" {
|
||||
floatingIP, err := getFloatingIPByPortID(lb.network, vip.PortID)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error getting floating ip for port %s: %v", vip.PortID, err)
|
||||
}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: floatingIP.FloatingIP}}
|
||||
} else {
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: vip.Address}}
|
||||
}
|
||||
|
||||
return status, true, err
|
||||
}
|
||||
|
||||
// TODO: This code currently ignores 'region' and always creates a
|
||||
// loadbalancer in only the current OpenStack region. We should take
|
||||
// a list of regions (from config) and query/create loadbalancers in
|
||||
// each region.
|
||||
|
||||
func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name)
|
||||
}
|
||||
|
||||
if len(lb.opts.SubnetId) == 0 {
|
||||
// Get SubnetId automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node.
|
||||
subnetID, err := getSubnetIDForLB(lb.compute, *nodes[0])
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
return nil, fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+
|
||||
"and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
lb.opts.SubnetId = subnetID
|
||||
}
|
||||
|
||||
floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkId, lb.opts.FloatingNetworkId)
|
||||
glog.V(4).Infof("EnsureLoadBalancer using floatingPool: %v", floatingPool)
|
||||
|
||||
var internalAnnotation bool
|
||||
internal := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerInternal, "false")
|
||||
switch internal {
|
||||
case "true":
|
||||
glog.V(4).Infof("Ensure an internal loadbalancer service.")
|
||||
internalAnnotation = true
|
||||
case "false":
|
||||
if len(floatingPool) != 0 {
|
||||
glog.V(4).Infof("Ensure an external loadbalancer service.")
|
||||
internalAnnotation = false
|
||||
} else {
|
||||
return nil, fmt.Errorf("floating-network-id or loadbalancer.openstack.org/floating-network-id should be specified when ensuring an external loadbalancer service")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown service.beta.kubernetes.io/openstack-internal-load-balancer annotation: %v, specify \"true\" or \"false\" ",
|
||||
internal)
|
||||
}
|
||||
|
||||
ports := apiService.Spec.Ports
|
||||
if len(ports) > 1 {
|
||||
return nil, fmt.Errorf("multiple ports are not supported in openstack v1 load balancers")
|
||||
} else if len(ports) == 0 {
|
||||
return nil, fmt.Errorf("no ports provided to openstack load balancer")
|
||||
}
|
||||
|
||||
// The service controller verified all the protocols match on the ports, just check and use the first one
|
||||
// TODO: Convert all error messages to use an event recorder
|
||||
if ports[0].Protocol != v1.ProtocolTCP {
|
||||
return nil, fmt.Errorf("only TCP LoadBalancer is supported for openstack load balancers")
|
||||
}
|
||||
|
||||
affinity := apiService.Spec.SessionAffinity
|
||||
var persistence *vips.SessionPersistence
|
||||
switch affinity {
|
||||
case v1.ServiceAffinityNone:
|
||||
persistence = nil
|
||||
case v1.ServiceAffinityClientIP:
|
||||
persistence = &vips.SessionPersistence{Type: "SOURCE_IP"}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity)
|
||||
}
|
||||
|
||||
sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !service.IsAllowAll(sourceRanges) {
|
||||
return nil, fmt.Errorf("source range restrictions are not supported for openstack load balancers")
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Checking if openstack load balancer already exists: %s", cloudprovider.GetLoadBalancerName(apiService))
|
||||
_, exists, err := lb.GetLoadBalancer(clusterName, apiService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking if openstack load balancer already exists: %v", err)
|
||||
}
|
||||
|
||||
// TODO: Implement a more efficient update strategy for common changes than delete & create
|
||||
// In particular, if we implement hosts update, we can get rid of UpdateHosts
|
||||
if exists {
|
||||
err := lb.EnsureLoadBalancerDeleted(clusterName, apiService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error deleting existing openstack load balancer: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
lbmethod := pools.LBMethod(lb.opts.LBMethod)
|
||||
if lbmethod == "" {
|
||||
lbmethod = pools.LBMethodRoundRobin
|
||||
}
|
||||
name := cloudprovider.GetLoadBalancerName(apiService)
|
||||
pool, err := pools.Create(lb.network, pools.CreateOpts{
|
||||
Name: name,
|
||||
Protocol: pools.ProtocolTCP,
|
||||
SubnetID: lb.opts.SubnetId,
|
||||
LBMethod: lbmethod,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating pool for openstack load balancer %s: %v", name, err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
addr, err := nodeAddressForLB(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = members.Create(lb.network, members.CreateOpts{
|
||||
PoolID: pool.ID,
|
||||
ProtocolPort: int(ports[0].NodePort), //Note: only handles single port
|
||||
Address: addr,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating member for the pool(%s) of openstack load balancer %s: %v",
|
||||
pool.ID, name, err)
|
||||
}
|
||||
}
|
||||
|
||||
var mon *monitors.Monitor
|
||||
if lb.opts.CreateMonitor {
|
||||
mon, err = monitors.Create(lb.network, monitors.CreateOpts{
|
||||
Type: monitors.TypeTCP,
|
||||
Delay: int(lb.opts.MonitorDelay.Duration.Seconds()),
|
||||
Timeout: int(lb.opts.MonitorTimeout.Duration.Seconds()),
|
||||
MaxRetries: int(lb.opts.MonitorMaxRetries),
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating monitor for openstack load balancer %s: %v", name, err)
|
||||
}
|
||||
|
||||
_, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error associating monitor(%s) with pool(%s) for"+
|
||||
"openstack load balancer %s: %v", mon.ID, pool.ID, name, err)
|
||||
}
|
||||
}
|
||||
|
||||
createOpts := vips.CreateOpts{
|
||||
Name: name,
|
||||
Description: fmt.Sprintf("Kubernetes external service %s", name),
|
||||
Protocol: "TCP",
|
||||
ProtocolPort: int(ports[0].Port), //TODO: need to handle multi-port
|
||||
PoolID: pool.ID,
|
||||
SubnetID: lb.opts.SubnetId,
|
||||
Persistence: persistence,
|
||||
}
|
||||
|
||||
loadBalancerIP := apiService.Spec.LoadBalancerIP
|
||||
if loadBalancerIP != "" && internalAnnotation {
|
||||
createOpts.Address = loadBalancerIP
|
||||
}
|
||||
|
||||
vip, err := vips.Create(lb.network, createOpts).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating vip for openstack load balancer %s: %v", name, err)
|
||||
}
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
if floatingPool != "" && !internalAnnotation {
|
||||
floatIPOpts := floatingips.CreateOpts{
|
||||
FloatingNetworkID: floatingPool,
|
||||
PortID: vip.PortID,
|
||||
}
|
||||
|
||||
loadBalancerIP := apiService.Spec.LoadBalancerIP
|
||||
if loadBalancerIP != "" {
|
||||
floatIPOpts.FloatingIP = loadBalancerIP
|
||||
}
|
||||
|
||||
floatIP, err := floatingips.Create(lb.network, floatIPOpts).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating floatingip for openstack load balancer %s: %v", name, err)
|
||||
}
|
||||
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: floatIP.FloatingIP}}
|
||||
} else {
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: vip.Address}}
|
||||
}
|
||||
|
||||
return status, nil
|
||||
|
||||
}
|
||||
|
||||
func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes)
|
||||
|
||||
vip, err := getVipByName(lb.network, loadBalancerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set of member (addresses) that _should_ exist
|
||||
addrs := map[string]bool{}
|
||||
for _, node := range nodes {
|
||||
addr, err := nodeAddressForLB(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addrs[addr] = true
|
||||
}
|
||||
|
||||
// Iterate over members that _do_ exist
|
||||
pager := members.List(lb.network, members.ListOpts{PoolID: vip.PoolID})
|
||||
err = pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
memList, err := members.ExtractMembers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, member := range memList {
|
||||
if _, found := addrs[member.Address]; found {
|
||||
// Member already exists
|
||||
delete(addrs, member.Address)
|
||||
} else {
|
||||
// Member needs to be deleted
|
||||
err = members.Delete(lb.network, member.ID).ExtractErr()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Anything left in addrs is a new member that needs to be added
|
||||
for addr := range addrs {
|
||||
_, err := members.Create(lb.network, members.CreateOpts{
|
||||
PoolID: vip.PoolID,
|
||||
Address: addr,
|
||||
ProtocolPort: vip.ProtocolPort,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *LbaasV1) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName)
|
||||
|
||||
vip, err := getVipByName(lb.network, loadBalancerName)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if vip != nil && vip.PortID != "" {
|
||||
floatingIP, err := getFloatingIPByPortID(lb.network, vip.PortID)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
if floatingIP != nil {
|
||||
err = floatingips.Delete(lb.network, floatingIP.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We have to delete the VIP before the pool can be deleted,
|
||||
// so no point continuing if this fails.
|
||||
if vip != nil {
|
||||
err := vips.Delete(lb.network, vip.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var pool *pools.Pool
|
||||
if vip != nil {
|
||||
pool, err = pools.Get(lb.network, vip.PoolID).Extract()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// The VIP is gone, but it is conceivable that a Pool
|
||||
// still exists that we failed to delete on some
|
||||
// previous occasion. Make a best effort attempt to
|
||||
// cleanup any pools with the same name as the VIP.
|
||||
pool, err = getPoolByName(lb.network, service.Name)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pool != nil {
|
||||
for _, monId := range pool.MonitorIDs {
|
||||
_, err = pools.DisassociateMonitor(lb.network, pool.ID, monId).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = monitors.Delete(lb.network, monId).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, memberId := range pool.MemberIDs {
|
||||
err = members.Delete(lb.network, memberId).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = pools.Delete(lb.network, pool.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user