mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-20 01:01:22 +00:00
Merge pull request #30649 from dagnello/openstack-lbaasv2-multiport
Automatic merge from submit-queue fix Openstack provider to allow more than one service port for lbaas v2 This resolves bug #30477 where if a service defines multiple ports for load balancer, the plugin will fail with multiple ports are not supported. @anguslees @jianhuiz
This commit is contained in:
commit
e427ab0baa
@ -56,6 +56,8 @@ type LbaasV2 struct {
|
|||||||
LoadBalancer
|
LoadBalancer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type empty struct{}
|
||||||
|
|
||||||
func networkExtensions(client *gophercloud.ServiceClient) (map[string]bool, error) {
|
func networkExtensions(client *gophercloud.ServiceClient) (map[string]bool, error) {
|
||||||
seen := make(map[string]bool)
|
seen := make(map[string]bool)
|
||||||
|
|
||||||
@ -238,22 +240,23 @@ func getLoadbalancerByName(client *gophercloud.ServiceClient, name string) (*loa
|
|||||||
return &loadbalancerList[0], nil
|
return &loadbalancerList[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, loadbalancerID string) error {
|
func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, loadbalancerID string) (string, error) {
|
||||||
start := time.Now().Second()
|
start := time.Now().Second()
|
||||||
for {
|
for {
|
||||||
loadbalancer, err := loadbalancers.Get(client, loadbalancerID).Extract()
|
loadbalancer, err := loadbalancers.Get(client, loadbalancerID).Extract()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", err
|
||||||
}
|
}
|
||||||
if loadbalancer.ProvisioningStatus == "ACTIVE" {
|
if loadbalancer.ProvisioningStatus == "ACTIVE" {
|
||||||
return nil
|
return "ACTIVE", nil
|
||||||
|
} else if loadbalancer.ProvisioningStatus == "ERROR" {
|
||||||
|
return "ERROR", fmt.Errorf("Loadbalancer has gone into ERROR state")
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
if time.Now().Second()-start >= loadbalancerActiveTimeoutSeconds {
|
if time.Now().Second()-start >= loadbalancerActiveTimeoutSeconds {
|
||||||
return fmt.Errorf("Loadbalancer failed to go into ACTIVE provisioning status within alloted time")
|
return loadbalancer.ProvisioningStatus, fmt.Errorf("Loadbalancer failed to go into ACTIVE provisioning status within alloted time")
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -304,17 +307,17 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
|||||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, apiService.Annotations)
|
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, apiService.Annotations)
|
||||||
|
|
||||||
ports := apiService.Spec.Ports
|
ports := apiService.Spec.Ports
|
||||||
if len(ports) > 1 {
|
if len(ports) == 0 {
|
||||||
return nil, fmt.Errorf("multiple ports are not yet supported in openstack load balancers")
|
|
||||||
} else if len(ports) == 0 {
|
|
||||||
return nil, fmt.Errorf("no ports provided to openstack load balancer")
|
return nil, fmt.Errorf("no ports provided to openstack load balancer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The service controller verified all the protocols match on the ports, just check and use the first one
|
// Check for TCP protocol on each port
|
||||||
// TODO: Convert all error messages to use an event recorder
|
// TODO: Convert all error messages to use an event recorder
|
||||||
if ports[0].Protocol != api.ProtocolTCP {
|
for _, port := range ports {
|
||||||
|
if port.Protocol != api.ProtocolTCP {
|
||||||
return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers")
|
return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
affinity := api.ServiceAffinityNone //apiService.Spec.SessionAffinity
|
affinity := api.ServiceAffinityNone //apiService.Spec.SessionAffinity
|
||||||
var persistence *v2_pools.SessionPersistence
|
var persistence *v2_pools.SessionPersistence
|
||||||
@ -377,10 +380,11 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
|||||||
|
|
||||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
|
|
||||||
|
for portIndex, port := range ports {
|
||||||
listener, err := listeners.Create(lbaas.network, listeners.CreateOpts{
|
listener, err := listeners.Create(lbaas.network, listeners.CreateOpts{
|
||||||
Name: name,
|
Name: fmt.Sprintf("listener_%s_%d", name, portIndex),
|
||||||
Protocol: listeners.ProtocolTCP,
|
Protocol: listeners.Protocol(port.Protocol),
|
||||||
ProtocolPort: (int)(ports[0].Port), //TODO: need to handle multi-port
|
ProtocolPort: int(port.Port),
|
||||||
LoadbalancerID: loadbalancer.ID,
|
LoadbalancerID: loadbalancer.ID,
|
||||||
}).Extract()
|
}).Extract()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -392,8 +396,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
|||||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
|
|
||||||
pool, err := v2_pools.Create(lbaas.network, v2_pools.CreateOpts{
|
pool, err := v2_pools.Create(lbaas.network, v2_pools.CreateOpts{
|
||||||
Name: name,
|
Name: fmt.Sprintf("pool_%s_%d", name, portIndex),
|
||||||
Protocol: v2_pools.ProtocolTCP,
|
Protocol: v2_pools.Protocol(port.Protocol),
|
||||||
LBMethod: lbmethod,
|
LBMethod: lbmethod,
|
||||||
ListenerID: listener.ID,
|
ListenerID: listener.ID,
|
||||||
Persistence: persistence,
|
Persistence: persistence,
|
||||||
@ -404,16 +408,18 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
|
|
||||||
for _, host := range hosts {
|
for _, host := range hosts {
|
||||||
addr, err := getAddressByName(lbaas.compute, host)
|
addr, err := getAddressByName(lbaas.compute, host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// cleanup what was created so far
|
||||||
|
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
|
||||||
|
|
||||||
_, err = v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{
|
_, err = v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{
|
||||||
ProtocolPort: int(ports[0].NodePort), //TODO: need to handle multi-port
|
ProtocolPort: int(port.NodePort),
|
||||||
Address: addr,
|
Address: addr,
|
||||||
SubnetID: lbaas.opts.SubnetId,
|
SubnetID: lbaas.opts.SubnetId,
|
||||||
}).Extract()
|
}).Extract()
|
||||||
@ -422,14 +428,14 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
|||||||
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if lbaas.opts.CreateMonitor {
|
if lbaas.opts.CreateMonitor {
|
||||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
|
||||||
|
|
||||||
_, err = v2_monitors.Create(lbaas.network, v2_monitors.CreateOpts{
|
_, err = v2_monitors.Create(lbaas.network, v2_monitors.CreateOpts{
|
||||||
PoolID: pool.ID,
|
PoolID: pool.ID,
|
||||||
Type: monitors.TypeTCP,
|
Type: string(port.Protocol),
|
||||||
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
|
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
|
||||||
Timeout: int(lbaas.opts.MonitorTimeout.Duration.Seconds()),
|
Timeout: int(lbaas.opts.MonitorTimeout.Duration.Seconds()),
|
||||||
MaxRetries: int(lbaas.opts.MonitorMaxRetries),
|
MaxRetries: int(lbaas.opts.MonitorMaxRetries),
|
||||||
@ -439,6 +445,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *api.Ser
|
|||||||
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
status := &api.LoadBalancerStatus{}
|
status := &api.LoadBalancerStatus{}
|
||||||
@ -475,9 +483,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Servic
|
|||||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, hosts)
|
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, hosts)
|
||||||
|
|
||||||
ports := service.Spec.Ports
|
ports := service.Spec.Ports
|
||||||
if len(ports) > 1 {
|
if len(ports) == 0 {
|
||||||
return fmt.Errorf("multiple ports are not yet supported in openstack load balancers")
|
|
||||||
} else if len(ports) == 0 {
|
|
||||||
return fmt.Errorf("no ports provided to openstack load balancer")
|
return fmt.Errorf("no ports provided to openstack load balancer")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,50 +495,37 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Servic
|
|||||||
return fmt.Errorf("Loadbalancer %s does not exist", loadBalancerName)
|
return fmt.Errorf("Loadbalancer %s does not exist", loadBalancerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set of member (addresses) that _should_ exist
|
// Get all listeners for this loadbalancer, by "port key".
|
||||||
addrs := map[string]bool{}
|
type portKey struct {
|
||||||
for _, host := range hosts {
|
Protocol string
|
||||||
addr, err := getAddressByName(lbaas.compute, host)
|
Port int
|
||||||
|
}
|
||||||
|
lbListeners := make(map[portKey]listeners.Listener)
|
||||||
|
err = listeners.List(lbaas.network, listeners.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
|
||||||
|
listenersList, err := listeners.ExtractListeners(page)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
for _, l := range listenersList {
|
||||||
|
key := portKey{Protocol: l.Protocol, Port: l.ProtocolPort}
|
||||||
|
lbListeners[key] = l
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
addrs[addr] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over members in each pool that _do_ exist
|
// Get all pools for this loadbalancer, by listener ID.
|
||||||
var poolID string
|
lbPools := make(map[string]v2_pools.Pool)
|
||||||
err = v2_pools.List(lbaas.network, v2_pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
|
err = v2_pools.List(lbaas.network, v2_pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
|
||||||
poolsList, err := v2_pools.ExtractPools(page)
|
poolsList, err := v2_pools.ExtractPools(page)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
for _, p := range poolsList {
|
||||||
for _, pool := range poolsList {
|
for _, l := range p.Listeners {
|
||||||
poolID = pool.ID
|
lbPools[l.ID] = p
|
||||||
err := v2_pools.ListAssociateMembers(lbaas.network, poolID, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
|
|
||||||
membersList, err := v2_pools.ExtractMembers(page)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, member := range membersList {
|
|
||||||
if _, found := addrs[member.Address]; found {
|
|
||||||
// Member already exists, remove from update list
|
|
||||||
delete(addrs, member.Address)
|
|
||||||
} else {
|
|
||||||
// Member needs to be deleted
|
|
||||||
err = v2_pools.DeleteMember(lbaas.network, poolID, member.ID).ExtractErr()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -541,11 +534,58 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Servic
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anything left in addrs is a new member that needs to be added to a pool
|
// Compose Set of member (addresses) that _should_ exist
|
||||||
|
addrs := map[string]empty{}
|
||||||
|
for _, host := range hosts {
|
||||||
|
addr, err := getAddressByName(lbaas.compute, host)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
addrs[addr] = empty{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for adding/removing members associated with each port
|
||||||
|
for _, port := range ports {
|
||||||
|
// Get listener associated with this port
|
||||||
|
listener, ok := lbListeners[portKey{
|
||||||
|
Protocol: string(port.Protocol),
|
||||||
|
Port: int(port.Port),
|
||||||
|
}]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Loadbalancer %s does not contain required listener for port %d and protocol %s", loadBalancerName, port.Port, port.Protocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get pool associated with this listener
|
||||||
|
pool, ok := lbPools[listener.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Loadbalancer %s does not contain required pool for listener %s", loadBalancerName, listener.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find existing pool members (by address) for this port
|
||||||
|
members := make(map[string]v2_pools.Member)
|
||||||
|
err := v2_pools.ListAssociateMembers(lbaas.network, pool.ID, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
|
||||||
|
membersList, err := v2_pools.ExtractMembers(page)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
for _, member := range membersList {
|
||||||
|
members[member.Address] = member
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add any new members for this port
|
||||||
for addr := range addrs {
|
for addr := range addrs {
|
||||||
_, err := v2_pools.CreateAssociateMember(lbaas.network, poolID, v2_pools.MemberCreateOpts{
|
if _, ok := members[addr]; ok {
|
||||||
|
// Already exists, do not create member
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, err := v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{
|
||||||
Address: addr,
|
Address: addr,
|
||||||
ProtocolPort: int(ports[0].NodePort),
|
ProtocolPort: int(port.NodePort),
|
||||||
SubnetID: lbaas.opts.SubnetId,
|
SubnetID: lbaas.opts.SubnetId,
|
||||||
}).Extract()
|
}).Extract()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -554,6 +594,19 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *api.Servic
|
|||||||
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove any old members for this port
|
||||||
|
for _, member := range members {
|
||||||
|
if _, ok := addrs[member.Address]; ok {
|
||||||
|
// Still present, do not delete member
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
|
||||||
|
if err != nil && !isNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -735,7 +788,7 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Servic
|
|||||||
|
|
||||||
ports := apiService.Spec.Ports
|
ports := apiService.Spec.Ports
|
||||||
if len(ports) > 1 {
|
if len(ports) > 1 {
|
||||||
return nil, fmt.Errorf("multiple ports are not yet supported in openstack load balancers")
|
return nil, fmt.Errorf("multiple ports are not supported in openstack v1 load balancers")
|
||||||
} else if len(ports) == 0 {
|
} else if len(ports) == 0 {
|
||||||
return nil, fmt.Errorf("no ports provided to openstack load balancer")
|
return nil, fmt.Errorf("no ports provided to openstack load balancer")
|
||||||
}
|
}
|
||||||
@ -804,7 +857,7 @@ func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *api.Servic
|
|||||||
|
|
||||||
_, err = members.Create(lb.network, members.CreateOpts{
|
_, err = members.Create(lb.network, members.CreateOpts{
|
||||||
PoolID: pool.ID,
|
PoolID: pool.ID,
|
||||||
ProtocolPort: int(ports[0].NodePort), //TODO: need to handle multi-port
|
ProtocolPort: int(ports[0].NodePort), //Note: only handles single port
|
||||||
Address: addr,
|
Address: addr,
|
||||||
}).Extract()
|
}).Extract()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user