mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-25 19:43:22 +00:00
azure: Update LB API hosts->nodes
Update EnsureLoadBalancer/UpdateLoadBalancer API to use node objects.
This commit is contained in:
parent
398c62d1ff
commit
a718f78a5d
@ -62,7 +62,7 @@ func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (statu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||||
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodeNames []string) (*v1.LoadBalancerStatus, error) {
|
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||||
lbName := getLoadBalancerName(clusterName)
|
lbName := getLoadBalancerName(clusterName)
|
||||||
pipName := getPublicIPName(clusterName, service)
|
pipName := getPublicIPName(clusterName, service)
|
||||||
serviceName := getServiceName(service)
|
serviceName := getServiceName(service)
|
||||||
@ -101,7 +101,7 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodeNames)
|
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -116,9 +116,9 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
|
|||||||
// Add the machines to the backend pool if they're not already
|
// Add the machines to the backend pool if they're not already
|
||||||
lbBackendName := getBackendPoolName(clusterName)
|
lbBackendName := getBackendPoolName(clusterName)
|
||||||
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName)
|
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName)
|
||||||
hostUpdates := make([]func() error, len(nodeNames))
|
hostUpdates := make([]func() error, len(nodes))
|
||||||
for i, nodeName := range nodeNames {
|
for i, node := range nodes {
|
||||||
localNodeName := nodeName
|
localNodeName := node.Name
|
||||||
f := func() error {
|
f := func() error {
|
||||||
err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID)
|
err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -141,8 +141,8 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||||
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodeNames []string) error {
|
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||||
_, err := az.EnsureLoadBalancer(clusterName, service, nodeNames)
|
_, err := az.EnsureLoadBalancer(clusterName, service, nodes)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if existsLb {
|
if existsLb {
|
||||||
lb, lbNeedsUpdate, reconcileErr := az.reconcileLoadBalancer(lb, nil, clusterName, service, []string{})
|
lb, lbNeedsUpdate, reconcileErr := az.reconcileLoadBalancer(lb, nil, clusterName, service, []*v1.Node{})
|
||||||
if reconcileErr != nil {
|
if reconcileErr != nil {
|
||||||
return reconcileErr
|
return reconcileErr
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error {
|
|||||||
// This ensures load balancer exists and the frontend ip config is setup.
|
// This ensures load balancer exists and the frontend ip config is setup.
|
||||||
// This also reconciles the Service's Ports with the LoadBalancer config.
|
// This also reconciles the Service's Ports with the LoadBalancer config.
|
||||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
||||||
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *v1.Service, nodeNames []string) (network.LoadBalancer, bool, error) {
|
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *v1.Service, nodes []*v1.Node) (network.LoadBalancer, bool, error) {
|
||||||
lbName := getLoadBalancerName(clusterName)
|
lbName := getLoadBalancerName(clusterName)
|
||||||
serviceName := getServiceName(service)
|
serviceName := getServiceName(service)
|
||||||
lbFrontendIPConfigName := getFrontendIPConfigName(service)
|
lbFrontendIPConfigName := getFrontendIPConfigName(service)
|
||||||
|
@ -38,9 +38,9 @@ func TestReconcileLoadBalancerAddPort(t *testing.T) {
|
|||||||
svc := getTestService("servicea", 80)
|
svc := getTestService("servicea", 80)
|
||||||
pip := getTestPublicIP()
|
pip := getTestPublicIP()
|
||||||
lb := getTestLoadBalancer()
|
lb := getTestLoadBalancer()
|
||||||
hosts := []string{}
|
nodes := []*v1.Node{}
|
||||||
|
|
||||||
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, hosts)
|
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
@ -67,9 +67,9 @@ func TestReconcileLoadBalancerNodeHealth(t *testing.T) {
|
|||||||
pip := getTestPublicIP()
|
pip := getTestPublicIP()
|
||||||
lb := getTestLoadBalancer()
|
lb := getTestLoadBalancer()
|
||||||
|
|
||||||
hosts := []string{}
|
nodes := []*v1.Node{}
|
||||||
|
|
||||||
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, hosts)
|
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
@ -92,15 +92,15 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T)
|
|||||||
svc := getTestService("servicea", 80)
|
svc := getTestService("servicea", 80)
|
||||||
lb := getTestLoadBalancer()
|
lb := getTestLoadBalancer()
|
||||||
pip := getTestPublicIP()
|
pip := getTestPublicIP()
|
||||||
hosts := []string{}
|
nodes := []*v1.Node{}
|
||||||
|
|
||||||
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, hosts)
|
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
svcUpdated := getTestService("servicea")
|
svcUpdated := getTestService("servicea")
|
||||||
lb, updated, err = az.reconcileLoadBalancer(lb, nil, testClusterName, &svcUpdated, hosts)
|
lb, updated, err = az.reconcileLoadBalancer(lb, nil, testClusterName, &svcUpdated, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
@ -122,12 +122,12 @@ func TestReconcileLoadBalancerRemovesPort(t *testing.T) {
|
|||||||
az := getTestCloud()
|
az := getTestCloud()
|
||||||
svc := getTestService("servicea", 80, 443)
|
svc := getTestService("servicea", 80, 443)
|
||||||
pip := getTestPublicIP()
|
pip := getTestPublicIP()
|
||||||
hosts := []string{}
|
nodes := []*v1.Node{}
|
||||||
|
|
||||||
existingLoadBalancer := getTestLoadBalancer(svc)
|
existingLoadBalancer := getTestLoadBalancer(svc)
|
||||||
|
|
||||||
svcUpdated := getTestService("servicea", 80)
|
svcUpdated := getTestService("servicea", 80)
|
||||||
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svcUpdated, hosts)
|
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svcUpdated, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
@ -141,16 +141,16 @@ func TestReconcileLoadBalancerMultipleServices(t *testing.T) {
|
|||||||
svc1 := getTestService("servicea", 80, 443)
|
svc1 := getTestService("servicea", 80, 443)
|
||||||
svc2 := getTestService("serviceb", 80)
|
svc2 := getTestService("serviceb", 80)
|
||||||
pip := getTestPublicIP()
|
pip := getTestPublicIP()
|
||||||
hosts := []string{}
|
nodes := []*v1.Node{}
|
||||||
|
|
||||||
existingLoadBalancer := getTestLoadBalancer()
|
existingLoadBalancer := getTestLoadBalancer()
|
||||||
|
|
||||||
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svc1, hosts)
|
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svc1, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
updatedLoadBalancer, _, err = az.reconcileLoadBalancer(updatedLoadBalancer, &pip, testClusterName, &svc2, hosts)
|
updatedLoadBalancer, _, err = az.reconcileLoadBalancer(updatedLoadBalancer, &pip, testClusterName, &svc2, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %q", err)
|
t.Errorf("Unexpected error: %q", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user