mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-17 15:13:08 +00:00
Set names for OpenStack loadbalancer members and monitors
Healthmonitors will be named 'monitor_<lb_name>_<index>' and members will be named 'member_<lb_name>_<index>_<node_name>'.
This commit is contained in:
@@ -817,6 +817,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
|
|||||||
if !memberExists(members, addr, int(port.NodePort)) {
|
if !memberExists(members, addr, int(port.NodePort)) {
|
||||||
glog.V(4).Infof("Creating member for pool %s", pool.ID)
|
glog.V(4).Infof("Creating member for pool %s", pool.ID)
|
||||||
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
||||||
|
Name: fmt.Sprintf("member_%s_%d_%s", name, portIndex, node.Name),
|
||||||
ProtocolPort: int(port.NodePort),
|
ProtocolPort: int(port.NodePort),
|
||||||
Address: addr,
|
Address: addr,
|
||||||
SubnetID: lbaas.opts.SubnetID,
|
SubnetID: lbaas.opts.SubnetID,
|
||||||
@@ -854,6 +855,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
|
|||||||
if monitorID == "" && lbaas.opts.CreateMonitor {
|
if monitorID == "" && lbaas.opts.CreateMonitor {
|
||||||
glog.V(4).Infof("Creating monitor for pool %s", pool.ID)
|
glog.V(4).Infof("Creating monitor for pool %s", pool.ID)
|
||||||
monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{
|
monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{
|
||||||
|
Name: fmt.Sprintf("monitor_%s_%d", name, portIndex),
|
||||||
PoolID: pool.ID,
|
PoolID: pool.ID,
|
||||||
Type: string(port.Protocol),
|
Type: string(port.Protocol),
|
||||||
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
|
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
|
||||||
@@ -1214,17 +1216,17 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Compose Set of member (addresses) that _should_ exist
|
// Compose Set of member (addresses) that _should_ exist
|
||||||
addrs := map[string]empty{}
|
addrs := make(map[string]*v1.Node)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
addr, err := nodeAddressForLB(node)
|
addr, err := nodeAddressForLB(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
addrs[addr] = empty{}
|
addrs[addr] = node
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for adding/removing members associated with each port
|
// Check for adding/removing members associated with each port
|
||||||
for _, port := range ports {
|
for portIndex, port := range ports {
|
||||||
// Get listener associated with this port
|
// Get listener associated with this port
|
||||||
listener, ok := lbListeners[portKey{
|
listener, ok := lbListeners[portKey{
|
||||||
Protocol: toListenersProtocol(port.Protocol),
|
Protocol: toListenersProtocol(port.Protocol),
|
||||||
@@ -1251,12 +1253,13 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add any new members for this port
|
// Add any new members for this port
|
||||||
for addr := range addrs {
|
for addr, node := range addrs {
|
||||||
if _, ok := members[addr]; ok && members[addr].ProtocolPort == int(port.NodePort) {
|
if _, ok := members[addr]; ok && members[addr].ProtocolPort == int(port.NodePort) {
|
||||||
// Already exists, do not create member
|
// Already exists, do not create member
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
||||||
|
Name: fmt.Sprintf("member_%s_%d_%s", loadbalancer.Name, portIndex, node.Name),
|
||||||
Address: addr,
|
Address: addr,
|
||||||
ProtocolPort: int(port.NodePort),
|
ProtocolPort: int(port.NodePort),
|
||||||
SubnetID: lbaas.opts.SubnetID,
|
SubnetID: lbaas.opts.SubnetID,
|
||||||
|
Reference in New Issue
Block a user