mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #60459 from agau4779/gce-internal-lb-tests
Automatic merge from submit-queue (batch tested with PRs 57871, 61094, 60459, 61089, 61105). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. [GCE] Internal Loadbalancer Tests **What this PR does / why we need it**: Adds unit tests for the GCE Internal Loadbalancer. According to the test coverage tool, this PR brings test coverage of `gce_loadbalancer_internal.go` from 0% to 74.0%. ```release-note NONE ``` /assign nicksardo bowei
This commit is contained in:
commit
22a040a1b9
@ -101,14 +101,18 @@ go_test(
|
|||||||
"gce_disks_test.go",
|
"gce_disks_test.go",
|
||||||
"gce_healthchecks_test.go",
|
"gce_healthchecks_test.go",
|
||||||
"gce_loadbalancer_external_test.go",
|
"gce_loadbalancer_external_test.go",
|
||||||
|
"gce_loadbalancer_internal_test.go",
|
||||||
|
"gce_loadbalancer_utils_test.go",
|
||||||
"gce_test.go",
|
"gce_test.go",
|
||||||
"gce_util_test.go",
|
"gce_util_test.go",
|
||||||
"metrics_test.go",
|
"metrics_test.go",
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//pkg/api/v1/service:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||||
|
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
|
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
|
||||||
"//pkg/kubelet/apis:go_default_library",
|
"//pkg/kubelet/apis:go_default_library",
|
||||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||||
|
@ -7,6 +7,7 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||||
|
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||||
|
@ -28,12 +28,14 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
alpha "google.golang.org/api/compute/v0.alpha"
|
alpha "google.golang.org/api/compute/v0.alpha"
|
||||||
beta "google.golang.org/api/compute/v0.beta"
|
beta "google.golang.org/api/compute/v0.beta"
|
||||||
ga "google.golang.org/api/compute/v1"
|
ga "google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -226,3 +228,183 @@ func InsertAlphaAddressHook(ctx context.Context, key *meta.Key, obj *alpha.Addre
|
|||||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses")
|
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses")
|
||||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID)
|
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstanceGroupAttributes maps from InstanceGroup key to a map of Instances
|
||||||
|
type InstanceGroupAttributes struct {
|
||||||
|
InstanceMap map[meta.Key]map[string]*ga.InstanceWithNamedPorts
|
||||||
|
Lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddInstances adds a list of Instances passed by InstanceReference
|
||||||
|
func (igAttrs *InstanceGroupAttributes) AddInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error {
|
||||||
|
igAttrs.Lock.Lock()
|
||||||
|
defer igAttrs.Lock.Unlock()
|
||||||
|
|
||||||
|
instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key]
|
||||||
|
if !ok {
|
||||||
|
instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, instance := range instanceRefs {
|
||||||
|
iWithPort := &ga.InstanceWithNamedPorts{
|
||||||
|
Instance: instance.Instance,
|
||||||
|
}
|
||||||
|
|
||||||
|
instancesWithNamedPorts[instance.Instance] = iWithPort
|
||||||
|
}
|
||||||
|
|
||||||
|
igAttrs.InstanceMap[*key] = instancesWithNamedPorts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveInstances removes a list of Instances passed by InstanceReference
|
||||||
|
func (igAttrs *InstanceGroupAttributes) RemoveInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error {
|
||||||
|
igAttrs.Lock.Lock()
|
||||||
|
defer igAttrs.Lock.Unlock()
|
||||||
|
|
||||||
|
instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key]
|
||||||
|
if !ok {
|
||||||
|
instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, instanceToRemove := range instanceRefs {
|
||||||
|
if _, ok := instancesWithNamedPorts[instanceToRemove.Instance]; ok {
|
||||||
|
delete(instancesWithNamedPorts, instanceToRemove.Instance)
|
||||||
|
} else {
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Message: fmt.Sprintf("%s is not a member of %s", instanceToRemove.Instance, key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
igAttrs.InstanceMap[*key] = instancesWithNamedPorts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List gets a list of InstanceWithNamedPorts
|
||||||
|
func (igAttrs *InstanceGroupAttributes) List(key *meta.Key) []*ga.InstanceWithNamedPorts {
|
||||||
|
igAttrs.Lock.Lock()
|
||||||
|
defer igAttrs.Lock.Unlock()
|
||||||
|
|
||||||
|
instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key]
|
||||||
|
if !ok {
|
||||||
|
instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts)
|
||||||
|
}
|
||||||
|
|
||||||
|
var instanceList []*ga.InstanceWithNamedPorts
|
||||||
|
for _, val := range instancesWithNamedPorts {
|
||||||
|
instanceList = append(instanceList, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return instanceList
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddInstancesHook mocks adding instances from an InstanceGroup
|
||||||
|
func AddInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsAddInstancesRequest, m *cloud.MockInstanceGroups) error {
|
||||||
|
_, err := m.Get(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrs InstanceGroupAttributes
|
||||||
|
attrs = m.X.(InstanceGroupAttributes)
|
||||||
|
attrs.AddInstances(key, req.Instances)
|
||||||
|
m.X = attrs
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListInstancesHook mocks listing instances from an InstanceGroup
|
||||||
|
func ListInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsListInstancesRequest, filter *filter.F, m *cloud.MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) {
|
||||||
|
_, err := m.Get(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, &googleapi.Error{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrs InstanceGroupAttributes
|
||||||
|
attrs = m.X.(InstanceGroupAttributes)
|
||||||
|
instances := attrs.List(key)
|
||||||
|
|
||||||
|
return instances, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveInstancesHook mocks removing instances from an InstanceGroup
|
||||||
|
func RemoveInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsRemoveInstancesRequest, m *cloud.MockInstanceGroups) error {
|
||||||
|
_, err := m.Get(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrs InstanceGroupAttributes
|
||||||
|
attrs = m.X.(InstanceGroupAttributes)
|
||||||
|
attrs.RemoveInstances(key, req.Instances)
|
||||||
|
m.X = attrs
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFirewallHook defines the hook for updating a Firewall. It replaces the
|
||||||
|
// object with the same key in the mock with the updated object.
|
||||||
|
func UpdateFirewallHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error {
|
||||||
|
_, err := m.Get(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Message: fmt.Sprintf("Key: %s was not found in Firewalls", key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.Name = key.Name
|
||||||
|
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "firewalls")
|
||||||
|
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "firewalls", key)
|
||||||
|
|
||||||
|
m.Objects[*key] = &cloud.MockFirewallsObj{Obj: obj}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateHealthCheckHook defines the hook for updating a HealthCheck. It
|
||||||
|
// replaces the object with the same key in the mock with the updated object.
|
||||||
|
func UpdateHealthCheckHook(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *cloud.MockHealthChecks) error {
|
||||||
|
_, err := m.Get(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Message: fmt.Sprintf("Key: %s was not found in HealthChecks", key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.Name = key.Name
|
||||||
|
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "healthChecks")
|
||||||
|
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "healthChecks", key)
|
||||||
|
|
||||||
|
m.Objects[*key] = &cloud.MockHealthChecksObj{Obj: obj}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRegionBackendServiceHook defines the hook for updating a Region
|
||||||
|
// BackendsService. It replaces the object with the same key in the mock with
|
||||||
|
// the updated object.
|
||||||
|
func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockRegionBackendServices) error {
|
||||||
|
_, err := m.Get(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Message: fmt.Sprintf("Key: %s was not found in RegionBackendServices", key.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.Name = key.Name
|
||||||
|
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices")
|
||||||
|
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key)
|
||||||
|
|
||||||
|
m.Objects[*key] = &cloud.MockRegionBackendServicesObj{Obj: obj}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -18,112 +18,45 @@ package gce
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||||
compute "google.golang.org/api/compute/v1"
|
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
projectID = "test-project"
|
|
||||||
region = "us-central1"
|
|
||||||
zoneName = "us-central1-b"
|
|
||||||
nodeName = "test-node-1"
|
|
||||||
clusterName = "Test Cluster Name"
|
|
||||||
clusterID = "test-cluster-id"
|
|
||||||
serviceName = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
var apiService = &v1.Service{
|
|
||||||
Spec: v1.ServiceSpec{
|
|
||||||
SessionAffinity: v1.ServiceAffinityClientIP,
|
|
||||||
Type: v1.ServiceTypeClusterIP,
|
|
||||||
Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: int32(123)}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeRoundTripper struct{}
|
|
||||||
|
|
||||||
func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
|
|
||||||
return nil, fmt.Errorf("err: test used fake http client")
|
|
||||||
}
|
|
||||||
|
|
||||||
func fakeGCECloud() (*GCECloud, error) {
|
|
||||||
c := &http.Client{Transport: &fakeRoundTripper{}}
|
|
||||||
|
|
||||||
service, err := compute.New(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used in disk unit tests
|
|
||||||
fakeManager := newFakeManager(projectID, region)
|
|
||||||
zonesWithNodes := createNodeZones([]string{zoneName})
|
|
||||||
|
|
||||||
alphaFeatureGate, err := NewAlphaFeatureGate([]string{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
gce := &GCECloud{
|
|
||||||
region: region,
|
|
||||||
service: service,
|
|
||||||
manager: fakeManager,
|
|
||||||
managedZones: []string{zoneName},
|
|
||||||
projectID: projectID,
|
|
||||||
networkProjectID: projectID,
|
|
||||||
AlphaFeatureGate: alphaFeatureGate,
|
|
||||||
nodeZones: zonesWithNodes,
|
|
||||||
nodeInformerSynced: func() bool { return true },
|
|
||||||
}
|
|
||||||
|
|
||||||
cloud := cloud.NewMockGCE(&gceProjectRouter{gce})
|
|
||||||
cloud.MockTargetPools.AddInstanceHook = mock.AddInstanceHook
|
|
||||||
cloud.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook
|
|
||||||
cloud.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook
|
|
||||||
cloud.MockAddresses.InsertHook = mock.InsertAddressHook
|
|
||||||
cloud.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook
|
|
||||||
|
|
||||||
gce.c = cloud
|
|
||||||
|
|
||||||
return gce, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnsureStaticIP(t *testing.T) {
|
func TestEnsureStaticIP(t *testing.T) {
|
||||||
gce, err := fakeGCECloud()
|
gce, err := fakeGCECloud(DefaultTestClusterValues())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ipName := "some-static-ip"
|
ipName := "some-static-ip"
|
||||||
|
serviceName := "some-service"
|
||||||
|
|
||||||
// First ensure call
|
// First ensure call
|
||||||
ip, existed, err := ensureStaticIP(gce, ipName, serviceName, region, "", cloud.NetworkTierDefault)
|
ip, existed, err := ensureStaticIP(gce, ipName, serviceName, gce.region, "", cloud.NetworkTierDefault)
|
||||||
if err != nil || existed {
|
if err != nil || existed {
|
||||||
t.Fatalf(`ensureStaticIP(%v, %v, %v, %v, "") = %v, %v, %v; want valid ip, false, nil`, gce, ipName, serviceName, region, ip, existed, err)
|
t.Fatalf(`ensureStaticIP(%v, %v, %v, %v, "") = %v, %v, %v; want valid ip, false, nil`, gce, ipName, serviceName, gce.region, ip, existed, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second ensure call
|
// Second ensure call
|
||||||
var ipPrime string
|
var ipPrime string
|
||||||
ipPrime, existed, err = ensureStaticIP(gce, ipName, serviceName, region, ip, cloud.NetworkTierDefault)
|
ipPrime, existed, err = ensureStaticIP(gce, ipName, serviceName, gce.region, ip, cloud.NetworkTierDefault)
|
||||||
if err != nil || !existed || ip != ipPrime {
|
if err != nil || !existed || ip != ipPrime {
|
||||||
t.Fatalf(`ensureStaticIP(%v, %v, %v, %v, %v) = %v, %v, %v; want %v, true, nil`, gce, ipName, serviceName, region, ip, ipPrime, existed, err, ip)
|
t.Fatalf(`ensureStaticIP(%v, %v, %v, %v, %v) = %v, %v, %v; want %v, true, nil`, gce, ipName, serviceName, gce.region, ip, ipPrime, existed, err, ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnsureStaticIPWithTier(t *testing.T) {
|
func TestEnsureStaticIPWithTier(t *testing.T) {
|
||||||
s, err := fakeGCECloud()
|
s, err := fakeGCECloud(DefaultTestClusterValues())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
serviceName := "some-service"
|
||||||
|
|
||||||
for desc, tc := range map[string]struct {
|
for desc, tc := range map[string]struct {
|
||||||
name string
|
name string
|
||||||
netTier cloud.NetworkTier
|
netTier cloud.NetworkTier
|
||||||
@ -141,13 +74,13 @@ func TestEnsureStaticIPWithTier(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(desc, func(t *testing.T) {
|
t.Run(desc, func(t *testing.T) {
|
||||||
ip, existed, err := ensureStaticIP(s, tc.name, serviceName, region, "", tc.netTier)
|
ip, existed, err := ensureStaticIP(s, tc.name, serviceName, s.region, "", tc.netTier)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.False(t, existed)
|
assert.False(t, existed)
|
||||||
assert.NotEqual(t, ip, "")
|
assert.NotEqual(t, ip, "")
|
||||||
// Get the Address from the fake address service and verify that the tier
|
// Get the Address from the fake address service and verify that the tier
|
||||||
// is set correctly.
|
// is set correctly.
|
||||||
alphaAddr, err := s.GetAlphaRegionAddress(tc.name, region)
|
alphaAddr, err := s.GetAlphaRegionAddress(tc.name, s.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tc.expected, alphaAddr.NetworkTier)
|
assert.Equal(t, tc.expected, alphaAddr.NetworkTier)
|
||||||
})
|
})
|
||||||
@ -196,13 +129,13 @@ func TestVerifyRequestedIP(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(desc, func(t *testing.T) {
|
t.Run(desc, func(t *testing.T) {
|
||||||
s, err := fakeGCECloud()
|
s, err := fakeGCECloud(DefaultTestClusterValues())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, addr := range tc.addrList {
|
for _, addr := range tc.addrList {
|
||||||
s.ReserveAlphaRegionAddress(addr, region)
|
s.ReserveAlphaRegionAddress(addr, s.region)
|
||||||
}
|
}
|
||||||
isUserOwnedIP, err := verifyUserRequestedIP(s, region, tc.requestedIP, tc.fwdRuleIP, lbRef, tc.netTier)
|
isUserOwnedIP, err := verifyUserRequestedIP(s, s.region, tc.requestedIP, tc.fwdRuleIP, lbRef, tc.netTier)
|
||||||
assert.Equal(t, tc.expectErr, err != nil, fmt.Sprintf("err: %v", err))
|
assert.Equal(t, tc.expectErr, err != nil, fmt.Sprintf("err: %v", err))
|
||||||
assert.Equal(t, tc.expectUserOwned, isUserOwnedIP)
|
assert.Equal(t, tc.expectUserOwned, isUserOwnedIP)
|
||||||
})
|
})
|
||||||
@ -213,7 +146,9 @@ func TestCreateForwardingRuleWithTier(t *testing.T) {
|
|||||||
// Common variables among the tests.
|
// Common variables among the tests.
|
||||||
ports := []v1.ServicePort{{Name: "foo", Protocol: v1.ProtocolTCP, Port: int32(123)}}
|
ports := []v1.ServicePort{{Name: "foo", Protocol: v1.ProtocolTCP, Port: int32(123)}}
|
||||||
target := "test-target-pool"
|
target := "test-target-pool"
|
||||||
svcName := "foo-svc"
|
vals := DefaultTestClusterValues()
|
||||||
|
serviceName := "foo-svc"
|
||||||
|
|
||||||
baseLinkUrl := "https://www.googleapis.com/compute/%v/projects/%v/regions/%v/forwardingRules/%v"
|
baseLinkUrl := "https://www.googleapis.com/compute/%v/projects/%v/regions/%v/forwardingRules/%v"
|
||||||
|
|
||||||
for desc, tc := range map[string]struct {
|
for desc, tc := range map[string]struct {
|
||||||
@ -230,7 +165,7 @@ func TestCreateForwardingRuleWithTier(t *testing.T) {
|
|||||||
PortRange: "123-123",
|
PortRange: "123-123",
|
||||||
Target: target,
|
Target: target,
|
||||||
NetworkTier: "PREMIUM",
|
NetworkTier: "PREMIUM",
|
||||||
SelfLink: fmt.Sprintf(baseLinkUrl, "v1", projectID, region, "lb-1"),
|
SelfLink: fmt.Sprintf(baseLinkUrl, "v1", vals.ProjectID, vals.Region, "lb-1"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"Standard tier": {
|
"Standard tier": {
|
||||||
@ -243,21 +178,21 @@ func TestCreateForwardingRuleWithTier(t *testing.T) {
|
|||||||
PortRange: "123-123",
|
PortRange: "123-123",
|
||||||
Target: target,
|
Target: target,
|
||||||
NetworkTier: "STANDARD",
|
NetworkTier: "STANDARD",
|
||||||
SelfLink: fmt.Sprintf(baseLinkUrl, "alpha", projectID, region, "lb-2"),
|
SelfLink: fmt.Sprintf(baseLinkUrl, "alpha", vals.ProjectID, vals.Region, "lb-2"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(desc, func(t *testing.T) {
|
t.Run(desc, func(t *testing.T) {
|
||||||
s, err := fakeGCECloud()
|
s, err := fakeGCECloud(vals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
lbName := tc.expectedRule.Name
|
lbName := tc.expectedRule.Name
|
||||||
ipAddr := tc.expectedRule.IPAddress
|
ipAddr := tc.expectedRule.IPAddress
|
||||||
|
|
||||||
err = createForwardingRule(s, lbName, svcName, region, ipAddr, target, ports, tc.netTier)
|
err = createForwardingRule(s, lbName, serviceName, s.region, ipAddr, target, ports, tc.netTier)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
alphaRule, err := s.GetAlphaRegionForwardingRule(lbName, region)
|
alphaRule, err := s.GetAlphaRegionForwardingRule(lbName, s.region)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, tc.expectedRule, alphaRule)
|
assert.Equal(t, tc.expectedRule, alphaRule)
|
||||||
})
|
})
|
||||||
@ -267,7 +202,7 @@ func TestCreateForwardingRuleWithTier(t *testing.T) {
|
|||||||
func TestDeleteAddressWithWrongTier(t *testing.T) {
|
func TestDeleteAddressWithWrongTier(t *testing.T) {
|
||||||
lbRef := "test-lb"
|
lbRef := "test-lb"
|
||||||
|
|
||||||
s, err := fakeGCECloud()
|
s, err := fakeGCECloud(DefaultTestClusterValues())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Enable the cloud.NetworkTiers feature
|
// Enable the cloud.NetworkTiers feature
|
||||||
@ -304,17 +239,17 @@ func TestDeleteAddressWithWrongTier(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
t.Run(desc, func(t *testing.T) {
|
t.Run(desc, func(t *testing.T) {
|
||||||
for _, addr := range tc.addrList {
|
for _, addr := range tc.addrList {
|
||||||
s.ReserveAlphaRegionAddress(addr, region)
|
s.ReserveAlphaRegionAddress(addr, s.region)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sanity check to ensure we inject the right address.
|
// Sanity check to ensure we inject the right address.
|
||||||
_, err = s.GetRegionAddress(tc.addrName, region)
|
_, err = s.GetRegionAddress(tc.addrName, s.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = deleteAddressWithWrongTier(s, region, tc.addrName, lbRef, tc.netTier)
|
err = deleteAddressWithWrongTier(s, s.region, tc.addrName, lbRef, tc.netTier)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// Check whether the address still exists.
|
// Check whether the address still exists.
|
||||||
_, err = s.GetRegionAddress(tc.addrName, region)
|
_, err = s.GetRegionAddress(tc.addrName, s.region)
|
||||||
if tc.expectDelete {
|
if tc.expectDelete {
|
||||||
assert.True(t, isNotFound(err))
|
assert.True(t, isNotFound(err))
|
||||||
} else {
|
} else {
|
||||||
@ -324,56 +259,8 @@ func TestDeleteAddressWithWrongTier(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAndInsertNodes(gce *GCECloud, nodeNames []string) ([]*v1.Node, error) {
|
func createExternalLoadBalancer(gce *GCECloud, nodeNames []string, clusterName, clusterID, zoneName string) (*v1.LoadBalancerStatus, error) {
|
||||||
nodes := []*v1.Node{}
|
nodes, err := createAndInsertNodes(gce, nodeNames, zoneName)
|
||||||
|
|
||||||
for _, name := range nodeNames {
|
|
||||||
// Inserting the same node name twice causes an error - here we check if
|
|
||||||
// the instance exists already before insertion.
|
|
||||||
// TestUpdateExternalLoadBalancer inserts a new node, and relies on an older
|
|
||||||
// node to already have been inserted.
|
|
||||||
instance, _ := gce.getInstanceByName(name)
|
|
||||||
|
|
||||||
if instance == nil {
|
|
||||||
err := gce.InsertInstance(
|
|
||||||
projectID,
|
|
||||||
zoneName,
|
|
||||||
&compute.Instance{
|
|
||||||
Name: name,
|
|
||||||
Tags: &compute.Tags{
|
|
||||||
Items: []string{name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nodes, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes = append(
|
|
||||||
nodes,
|
|
||||||
&v1.Node{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Labels: map[string]string{
|
|
||||||
kubeletapis.LabelHostname: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Status: v1.NodeStatus{
|
|
||||||
NodeInfo: v1.NodeSystemInfo{
|
|
||||||
KubeProxyVersion: "v1.7.2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createExternalLoadBalancer(gce *GCECloud) (*v1.LoadBalancerStatus, error) {
|
|
||||||
nodes, err := createAndInsertNodes(gce, []string{nodeName})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -381,27 +268,30 @@ func createExternalLoadBalancer(gce *GCECloud) (*v1.LoadBalancerStatus, error) {
|
|||||||
return gce.ensureExternalLoadBalancer(
|
return gce.ensureExternalLoadBalancer(
|
||||||
clusterName,
|
clusterName,
|
||||||
clusterID,
|
clusterID,
|
||||||
apiService,
|
fakeApiService,
|
||||||
nil,
|
nil,
|
||||||
nodes,
|
nodes,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnsureExternalLoadBalancer(t *testing.T) {
|
func TestEnsureExternalLoadBalancer(t *testing.T) {
|
||||||
gce, err := fakeGCECloud()
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeName := "test-node-1"
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
status, err := createExternalLoadBalancer(gce)
|
status, err := createExternalLoadBalancer(gce, []string{nodeName}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotEmpty(t, status.Ingress)
|
assert.NotEmpty(t, status.Ingress)
|
||||||
|
|
||||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
hcName := MakeNodesHealthCheckName(clusterID)
|
hcName := MakeNodesHealthCheckName(vals.ClusterID)
|
||||||
|
|
||||||
// Check that Firewalls are created for the LoadBalancer and the HealthCheck
|
// Check that Firewalls are created for the LoadBalancer and the HealthCheck
|
||||||
fwNames := []string{
|
fwNames := []string{
|
||||||
MakeFirewallName(lbName),
|
MakeFirewallName(lbName),
|
||||||
MakeHealthCheckFirewallName(clusterID, hcName, true),
|
MakeHealthCheckFirewallName(vals.ClusterID, hcName, true),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fwName := range fwNames {
|
for _, fwName := range fwNames {
|
||||||
@ -412,7 +302,7 @@ func TestEnsureExternalLoadBalancer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that TargetPool is Created
|
// Check that TargetPool is Created
|
||||||
pool, err := gce.GetTargetPool(lbName, region)
|
pool, err := gce.GetTargetPool(lbName, gce.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, lbName, pool.Name)
|
assert.Equal(t, lbName, pool.Name)
|
||||||
assert.NotEmpty(t, pool.HealthChecks)
|
assert.NotEmpty(t, pool.HealthChecks)
|
||||||
@ -424,7 +314,7 @@ func TestEnsureExternalLoadBalancer(t *testing.T) {
|
|||||||
assert.Equal(t, hcName, healthcheck.Name)
|
assert.Equal(t, hcName, healthcheck.Name)
|
||||||
|
|
||||||
// Check that ForwardingRule is created
|
// Check that ForwardingRule is created
|
||||||
fwdRule, err := gce.GetRegionForwardingRule(lbName, region)
|
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, lbName, fwdRule.Name)
|
assert.Equal(t, lbName, fwdRule.Name)
|
||||||
assert.Equal(t, "TCP", fwdRule.IPProtocol)
|
assert.Equal(t, "TCP", fwdRule.IPProtocol)
|
||||||
@ -432,74 +322,78 @@ func TestEnsureExternalLoadBalancer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateExternalLoadBalancer(t *testing.T) {
|
func TestUpdateExternalLoadBalancer(t *testing.T) {
|
||||||
gce, err := fakeGCECloud()
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeName := "test-node-1"
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud((DefaultTestClusterValues()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = createExternalLoadBalancer(gce)
|
_, err = createExternalLoadBalancer(gce, []string{nodeName}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
newNodeName := "test-node-2"
|
newNodeName := "test-node-2"
|
||||||
newNodes, err := createAndInsertNodes(gce, []string{nodeName, newNodeName})
|
newNodes, err := createAndInsertNodes(gce, []string{nodeName, newNodeName}, vals.ZoneName)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Add the new node, then check that it is properly added to the TargetPool
|
// Add the new node, then check that it is properly added to the TargetPool
|
||||||
err = gce.updateExternalLoadBalancer(clusterName, apiService, newNodes)
|
err = gce.updateExternalLoadBalancer("", fakeApiService, newNodes)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
|
||||||
pool, err := gce.GetTargetPool(lbName, region)
|
pool, err := gce.GetTargetPool(lbName, gce.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// TODO: when testify is updated to v1.2.0+, use ElementsMatch instead
|
// TODO: when testify is updated to v1.2.0+, use ElementsMatch instead
|
||||||
assert.Contains(
|
assert.Contains(
|
||||||
t,
|
t,
|
||||||
pool.Instances,
|
pool.Instances,
|
||||||
fmt.Sprintf("/zones/%s/instances/%s", zoneName, nodeName),
|
fmt.Sprintf("/zones/%s/instances/%s", vals.ZoneName, nodeName),
|
||||||
)
|
)
|
||||||
|
|
||||||
assert.Contains(
|
assert.Contains(
|
||||||
t,
|
t,
|
||||||
pool.Instances,
|
pool.Instances,
|
||||||
fmt.Sprintf("/zones/%s/instances/%s", zoneName, newNodeName),
|
fmt.Sprintf("/zones/%s/instances/%s", vals.ZoneName, newNodeName),
|
||||||
)
|
)
|
||||||
|
|
||||||
newNodes, err = createAndInsertNodes(gce, []string{nodeName})
|
newNodes, err = createAndInsertNodes(gce, []string{nodeName}, vals.ZoneName)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Remove the new node by calling updateExternalLoadBalancer with a list
|
// Remove the new node by calling updateExternalLoadBalancer with a list
|
||||||
// only containing the old node, and test that the TargetPool no longer
|
// only containing the old node, and test that the TargetPool no longer
|
||||||
// contains the new node.
|
// contains the new node.
|
||||||
err = gce.updateExternalLoadBalancer(clusterName, apiService, newNodes)
|
err = gce.updateExternalLoadBalancer(vals.ClusterName, fakeApiService, newNodes)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
pool, err = gce.GetTargetPool(lbName, region)
|
pool, err = gce.GetTargetPool(lbName, gce.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
[]string{fmt.Sprintf("/zones/%s/instances/%s", zoneName, nodeName)},
|
[]string{fmt.Sprintf("/zones/%s/instances/%s", vals.ZoneName, nodeName)},
|
||||||
pool.Instances,
|
pool.Instances,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnsureExternalLoadBalancerDeleted(t *testing.T) {
|
func TestEnsureExternalLoadBalancerDeleted(t *testing.T) {
|
||||||
gce, err := fakeGCECloud()
|
vals := DefaultTestClusterValues()
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = createExternalLoadBalancer(gce)
|
_, err = createExternalLoadBalancer(gce, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, apiService)
|
err = gce.ensureExternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, fakeApiService)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
hcName := MakeNodesHealthCheckName(clusterID)
|
hcName := MakeNodesHealthCheckName(vals.ClusterID)
|
||||||
|
|
||||||
// Check that Firewalls are deleted for the LoadBalancer and the HealthCheck
|
// Check that Firewalls are deleted for the LoadBalancer and the HealthCheck
|
||||||
fwNames := []string{
|
fwNames := []string{
|
||||||
MakeFirewallName(lbName),
|
MakeFirewallName(lbName),
|
||||||
MakeHealthCheckFirewallName(clusterID, hcName, true),
|
MakeHealthCheckFirewallName(vals.ClusterID, hcName, true),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fwName := range fwNames {
|
for _, fwName := range fwNames {
|
||||||
@ -509,7 +403,7 @@ func TestEnsureExternalLoadBalancerDeleted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that TargetPool is deleted
|
// Check that TargetPool is deleted
|
||||||
pool, err := gce.GetTargetPool(lbName, region)
|
pool, err := gce.GetTargetPool(lbName, gce.region)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Nil(t, pool)
|
assert.Nil(t, pool)
|
||||||
|
|
||||||
@ -519,36 +413,37 @@ func TestEnsureExternalLoadBalancerDeleted(t *testing.T) {
|
|||||||
assert.Nil(t, healthcheck)
|
assert.Nil(t, healthcheck)
|
||||||
|
|
||||||
// Check forwarding rule is deleted
|
// Check forwarding rule is deleted
|
||||||
fwdRule, err := gce.GetRegionForwardingRule(lbName, region)
|
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Nil(t, fwdRule)
|
assert.Nil(t, fwdRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadBalancerWrongTierResourceDeletion(t *testing.T) {
|
func TestLoadBalancerWrongTierResourceDeletion(t *testing.T) {
|
||||||
gce, err := fakeGCECloud()
|
vals := DefaultTestClusterValues()
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Enable the cloud.NetworkTiers feature
|
// Enable the cloud.NetworkTiers feature
|
||||||
gce.AlphaFeatureGate.features[AlphaFeatureNetworkTiers] = true
|
gce.AlphaFeatureGate.features[AlphaFeatureNetworkTiers] = true
|
||||||
apiService.Annotations = map[string]string{NetworkTierAnnotationKey: "Premium"}
|
fakeApiService.Annotations = map[string]string{NetworkTierAnnotationKey: "Premium"}
|
||||||
|
|
||||||
// cloud.NetworkTier defaults to Premium
|
// cloud.NetworkTier defaults to Premium
|
||||||
desiredTier, err := gce.getServiceNetworkTier(apiService)
|
desiredTier, err := gce.getServiceNetworkTier(fakeApiService)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, cloud.NetworkTierPremium, desiredTier)
|
assert.Equal(t, cloud.NetworkTierPremium, desiredTier)
|
||||||
|
|
||||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
|
serviceName := types.NamespacedName{Namespace: fakeApiService.Namespace, Name: fakeApiService.Name}
|
||||||
|
|
||||||
// create ForwardingRule and Address with the wrong tier
|
// create ForwardingRule and Address with the wrong tier
|
||||||
err = createForwardingRule(
|
err = createForwardingRule(
|
||||||
gce,
|
gce,
|
||||||
lbName,
|
lbName,
|
||||||
serviceName.String(),
|
serviceName.String(),
|
||||||
region,
|
gce.region,
|
||||||
"",
|
"",
|
||||||
gce.targetPoolURL(lbName),
|
gce.targetPoolURL(lbName),
|
||||||
apiService.Spec.Ports,
|
fakeApiService.Spec.Ports,
|
||||||
cloud.NetworkTierStandard,
|
cloud.NetworkTierStandard,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -559,18 +454,18 @@ func TestLoadBalancerWrongTierResourceDeletion(t *testing.T) {
|
|||||||
NetworkTier: cloud.NetworkTierStandard.ToGCEValue(),
|
NetworkTier: cloud.NetworkTierStandard.ToGCEValue(),
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gce.ReserveAlphaRegionAddress(addressObj, region)
|
err = gce.ReserveAlphaRegionAddress(addressObj, gce.region)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = createExternalLoadBalancer(gce)
|
_, err = createExternalLoadBalancer(gce, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Expect forwarding rule tier to not be Standard
|
// Expect forwarding rule tier to not be Standard
|
||||||
tier, err := gce.getNetworkTierFromForwardingRule(lbName, region)
|
tier, err := gce.getNetworkTierFromForwardingRule(lbName, gce.region)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, cloud.NetworkTierDefault.ToGCEValue(), tier)
|
assert.Equal(t, cloud.NetworkTierDefault.ToGCEValue(), tier)
|
||||||
|
|
||||||
// Expect address to be deleted
|
// Expect address to be deleted
|
||||||
_, err = gce.GetRegionAddress(lbName, region)
|
_, err = gce.GetRegionAddress(lbName, gce.region)
|
||||||
assert.True(t, isNotFound(err))
|
assert.True(t, isNotFound(err))
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,419 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package gce
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createInternalLoadBalancer(gce *GCECloud, existingFwdRule *compute.ForwardingRule, nodeNames []string, clusterName, clusterID, zoneName string) (*v1.LoadBalancerStatus, error) {
|
||||||
|
nodes, err := createAndInsertNodes(gce, nodeNames, zoneName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return gce.ensureInternalLoadBalancer(
|
||||||
|
clusterName,
|
||||||
|
clusterID,
|
||||||
|
fakeApiService,
|
||||||
|
existingFwdRule,
|
||||||
|
nodes,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalBackendServiceUpdates(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeNames := []string{"test-node-1"}
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||||
|
igName := makeInstanceGroupName(vals.ClusterID)
|
||||||
|
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sharedBackend := shareBackendService(fakeApiService)
|
||||||
|
bsName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", fakeApiService.Spec.SessionAffinity)
|
||||||
|
err = gce.ensureInternalBackendService(bsName, "description", fakeApiService.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Update the Internal Backend Service with a new ServiceAffinity
|
||||||
|
err = gce.ensureInternalBackendService(bsName, "description", v1.ServiceAffinityNone, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bs, err := gce.GetRegionBackendService(bsName, gce.region)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, bs.SessionAffinity, strings.ToUpper(string(v1.ServiceAffinityNone)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeNames := []string{"test-node-1"}
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||||
|
igName := makeInstanceGroupName(vals.ClusterID)
|
||||||
|
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sharedBackend := shareBackendService(fakeApiService)
|
||||||
|
bsName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", fakeApiService.Spec.SessionAffinity)
|
||||||
|
err = gce.ensureInternalBackendService(bsName, "description", fakeApiService.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Update the BackendService with new Instances
|
||||||
|
newNodeNames := []string{"new-test-node-1", "new-test-node-2"}
|
||||||
|
err = gce.ensureInternalBackendServiceGroups(bsName, newNodeNames)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
bs, err := gce.GetRegionBackendService(bsName, gce.region)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Check that the instances are updated
|
||||||
|
newNodes, err := createAndInsertNodes(gce, newNodeNames, vals.ZoneName)
|
||||||
|
newIgLinks, err := gce.ensureInternalInstanceGroups(igName, newNodes)
|
||||||
|
backends := backendsFromGroupLinks(newIgLinks)
|
||||||
|
assert.Equal(t, bs.Backends, backends)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalLoadBalancer(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeName := "test-node-1"
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
status, err := createInternalLoadBalancer(gce, nil, []string{nodeName}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotEmpty(t, status.Ingress)
|
||||||
|
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
|
||||||
|
// Check that Instance Group is created
|
||||||
|
igName := makeInstanceGroupName(vals.ClusterID)
|
||||||
|
ig, err := gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, igName, ig.Name)
|
||||||
|
|
||||||
|
// Check that Firewalls are created for the LoadBalancer and the HealthCheck
|
||||||
|
fwNames := []string{
|
||||||
|
lbName,
|
||||||
|
makeHealthCheckFirewallName(lbName, vals.ClusterID, true),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fwName := range fwNames {
|
||||||
|
firewall, err := gce.GetFirewall(fwName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, []string{nodeName}, firewall.TargetTags)
|
||||||
|
assert.NotEmpty(t, firewall.SourceRanges)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that HealthCheck is created
|
||||||
|
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(fakeApiService)
|
||||||
|
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||||
|
healthcheck, err := gce.GetHealthCheck(hcName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, hcName, healthcheck.Name)
|
||||||
|
|
||||||
|
// Check that BackendService exists
|
||||||
|
sharedBackend := shareBackendService(fakeApiService)
|
||||||
|
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", fakeApiService.Spec.SessionAffinity)
|
||||||
|
backendServiceLink := gce.getBackendServiceLink(backendServiceName)
|
||||||
|
|
||||||
|
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "TCP", bs.Protocol)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
[]string{healthcheck.SelfLink},
|
||||||
|
bs.HealthChecks,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Check that ForwardingRule is created
|
||||||
|
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, lbName, fwdRule.Name)
|
||||||
|
assert.Equal(t, "TCP", fwdRule.IPProtocol)
|
||||||
|
assert.Equal(t, backendServiceLink, fwdRule.BackendService)
|
||||||
|
// if no Subnetwork specified, defaults to the GCE NetworkURL
|
||||||
|
assert.Equal(t, gce.NetworkURL(), fwdRule.Subnetwork)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeNames := []string{"test-node-1"}
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create the expected resources necessary for an Internal Load Balancer
|
||||||
|
nm := types.NamespacedName{Name: fakeApiService.Name, Namespace: fakeApiService.Namespace}
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
|
||||||
|
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(fakeApiService)
|
||||||
|
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||||
|
hcPath, hcPort := GetNodesHealthCheckPath(), GetNodesHealthCheckPort()
|
||||||
|
existingHC := newInternalLBHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort)
|
||||||
|
err = gce.CreateHealthCheck(existingHC)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||||
|
igName := makeInstanceGroupName(vals.ClusterID)
|
||||||
|
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sharedBackend := shareBackendService(fakeApiService)
|
||||||
|
bsDescription := makeBackendServiceDescription(nm, sharedBackend)
|
||||||
|
bsName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", fakeApiService.Spec.SessionAffinity)
|
||||||
|
err = gce.ensureInternalBackendService(bsName, bsDescription, fakeApiService.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, existingHC.SelfLink)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = createInternalLoadBalancer(gce, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
|
||||||
|
// Create a ForwardingRule that's missing an IP address
|
||||||
|
existingFwdRule := &compute.ForwardingRule{
|
||||||
|
Name: lbName,
|
||||||
|
IPAddress: "",
|
||||||
|
Ports: []string{"123"},
|
||||||
|
IPProtocol: "TCP",
|
||||||
|
LoadBalancingScheme: string(cloud.SchemeInternal),
|
||||||
|
}
|
||||||
|
gce.CreateRegionForwardingRule(existingFwdRule, gce.region)
|
||||||
|
|
||||||
|
// Create a Firewall that's missing a Description
|
||||||
|
existingFirewall := &compute.Firewall{
|
||||||
|
Name: lbName,
|
||||||
|
Network: gce.networkURL,
|
||||||
|
Allowed: []*compute.FirewallAllowed{
|
||||||
|
{
|
||||||
|
IPProtocol: "tcp",
|
||||||
|
Ports: []string{"123"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
gce.CreateFirewall(existingFirewall)
|
||||||
|
|
||||||
|
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(fakeApiService)
|
||||||
|
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||||
|
hcPath, hcPort := GetNodesHealthCheckPath(), GetNodesHealthCheckPort()
|
||||||
|
nm := types.NamespacedName{Name: fakeApiService.Name, Namespace: fakeApiService.Namespace}
|
||||||
|
|
||||||
|
// Create a healthcheck with an incorrect threshold
|
||||||
|
existingHC := newInternalLBHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort)
|
||||||
|
existingHC.HealthyThreshold = gceHcHealthyThreshold * 10
|
||||||
|
gce.CreateHealthCheck(existingHC)
|
||||||
|
|
||||||
|
// Create a backend Service that's missing Description and Backends
|
||||||
|
sharedBackend := shareBackendService(fakeApiService)
|
||||||
|
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", fakeApiService.Spec.SessionAffinity)
|
||||||
|
existingBS := &compute.BackendService{
|
||||||
|
Name: lbName,
|
||||||
|
Protocol: "TCP",
|
||||||
|
HealthChecks: []string{existingHC.SelfLink},
|
||||||
|
SessionAffinity: translateAffinityType(fakeApiService.Spec.SessionAffinity),
|
||||||
|
LoadBalancingScheme: string(cloud.SchemeInternal),
|
||||||
|
}
|
||||||
|
|
||||||
|
gce.CreateRegionBackendService(existingBS, gce.region)
|
||||||
|
existingFwdRule.BackendService = existingBS.Name
|
||||||
|
|
||||||
|
_, err = createInternalLoadBalancer(gce, existingFwdRule, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Expect new resources with the correct attributes to be created
|
||||||
|
rule, _ := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||||
|
assert.NotEqual(t, existingFwdRule, rule)
|
||||||
|
|
||||||
|
firewall, err := gce.GetFirewall(lbName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEqual(t, firewall, existingFirewall)
|
||||||
|
|
||||||
|
healthcheck, err := gce.GetHealthCheck(hcName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEqual(t, healthcheck, existingHC)
|
||||||
|
|
||||||
|
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEqual(t, bs, existingBS)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateInternalLoadBalancerBackendServices(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
nodeName := "test-node-1"
|
||||||
|
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = createInternalLoadBalancer(gce, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// BackendService exists prior to updateInternalLoadBalancer call, but has
|
||||||
|
// incorrect (missing) attributes.
|
||||||
|
// ensureInternalBackendServiceGroups is called and creates the correct
|
||||||
|
// BackendService
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
sharedBackend := shareBackendService(fakeApiService)
|
||||||
|
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", fakeApiService.Spec.SessionAffinity)
|
||||||
|
existingBS := &compute.BackendService{
|
||||||
|
Name: backendServiceName,
|
||||||
|
Protocol: "TCP",
|
||||||
|
SessionAffinity: translateAffinityType(fakeApiService.Spec.SessionAffinity),
|
||||||
|
LoadBalancingScheme: string(cloud.SchemeInternal),
|
||||||
|
}
|
||||||
|
|
||||||
|
gce.CreateRegionBackendService(existingBS, gce.region)
|
||||||
|
|
||||||
|
nodes, err := createAndInsertNodes(gce, []string{nodeName}, vals.ZoneName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = gce.updateInternalLoadBalancer(vals.ClusterName, vals.ClusterID, fakeApiService, nodes)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Check that the new BackendService has the correct attributes
|
||||||
|
url_base := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s", vals.ProjectID)
|
||||||
|
|
||||||
|
assert.NotEqual(t, existingBS, bs)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
bs.SelfLink,
|
||||||
|
fmt.Sprintf("%s/regions/%s/backendServices/%s", url_base, vals.Region, bs.Name),
|
||||||
|
)
|
||||||
|
assert.Equal(t, bs.Description, `{"kubernetes.io/service-name":"/"}`)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
bs.HealthChecks,
|
||||||
|
[]string{fmt.Sprintf("%s/healthChecks/k8s-%s-node", url_base, vals.ClusterID)},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateInternalLoadBalancerNodes(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = createInternalLoadBalancer(gce, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Remove the old Node and insert a new Node.
|
||||||
|
newNodeName := "test-node-2"
|
||||||
|
newNodes, err := createAndInsertNodes(gce, []string{newNodeName}, vals.ZoneName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = gce.updateInternalLoadBalancer(vals.ClusterName, vals.ClusterID, fakeApiService, newNodes)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Expect node 1 to be deleted and node 2 to still exist
|
||||||
|
igName := makeInstanceGroupName(vals.ClusterID)
|
||||||
|
instances, err := gce.ListInstancesInInstanceGroup(igName, vals.ZoneName, "ALL")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, len(instances))
|
||||||
|
assert.Contains(
|
||||||
|
t,
|
||||||
|
instances[0].Instance,
|
||||||
|
fmt.Sprintf("projects/%s/zones/%s/instances/%s", vals.ProjectID, vals.ZoneName, newNodeName),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalLoadBalancerDeleted(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = createInternalLoadBalancer(gce, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, fakeApiService)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
lbName := cloudprovider.GetLoadBalancerName(fakeApiService)
|
||||||
|
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(fakeApiService)
|
||||||
|
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||||
|
|
||||||
|
// Check that Firewalls are deleted for the LoadBalancer and the HealthCheck
|
||||||
|
fwNames := []string{
|
||||||
|
MakeFirewallName(lbName),
|
||||||
|
MakeHealthCheckFirewallName(vals.ClusterID, hcName, true),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fwName := range fwNames {
|
||||||
|
firewall, err := gce.GetFirewall(fwName)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Nil(t, firewall)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that Instance Group is deleted
|
||||||
|
igName := makeInstanceGroupName(vals.ClusterID)
|
||||||
|
ig, err := gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, ig)
|
||||||
|
|
||||||
|
// Check that HealthCheck is deleted
|
||||||
|
healthcheck, err := gce.GetHealthCheck(hcName)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Nil(t, healthcheck)
|
||||||
|
|
||||||
|
// Check forwarding rule is deleted
|
||||||
|
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Nil(t, fwdRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) {
|
||||||
|
vals := DefaultTestClusterValues()
|
||||||
|
gce, err := fakeGCECloud(vals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = createInternalLoadBalancer(gce, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, fakeApiService)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Deleting the loadbalancer and resources again should not cause an error.
|
||||||
|
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, fakeApiService)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
175
pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go
Normal file
175
pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file contains shared functions and variables to set up for tests for
|
||||||
|
// ExternalLoadBalancer and InternalLoadBalancers. It currently cannot live in a
|
||||||
|
// separate package from GCE because then it would cause a circular import.
|
||||||
|
|
||||||
|
package gce
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
||||||
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestClusterValues struct {
|
||||||
|
ProjectID string
|
||||||
|
Region string
|
||||||
|
ZoneName string
|
||||||
|
ClusterID string
|
||||||
|
ClusterName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultTestClusterValues() TestClusterValues {
|
||||||
|
return TestClusterValues{
|
||||||
|
ProjectID: "test-project",
|
||||||
|
Region: "us-central1",
|
||||||
|
ZoneName: "us-central1-b",
|
||||||
|
ClusterID: "test-cluster-id",
|
||||||
|
ClusterName: "Test Cluster Name",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var fakeApiService = &v1.Service{
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
SessionAffinity: v1.ServiceAffinityClientIP,
|
||||||
|
Type: v1.ServiceTypeClusterIP,
|
||||||
|
Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: int32(123)}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeRoundTripper struct{}
|
||||||
|
|
||||||
|
func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
|
||||||
|
return nil, fmt.Errorf("err: test used fake http client")
|
||||||
|
}
|
||||||
|
|
||||||
|
func fakeGCECloud(vals TestClusterValues) (*GCECloud, error) {
|
||||||
|
client := &http.Client{Transport: &fakeRoundTripper{}}
|
||||||
|
|
||||||
|
service, err := compute.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used in disk unit tests
|
||||||
|
fakeManager := newFakeManager(vals.ProjectID, vals.Region)
|
||||||
|
zonesWithNodes := createNodeZones([]string{vals.ZoneName})
|
||||||
|
|
||||||
|
alphaFeatureGate, err := NewAlphaFeatureGate([]string{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gce := &GCECloud{
|
||||||
|
region: vals.Region,
|
||||||
|
service: service,
|
||||||
|
manager: fakeManager,
|
||||||
|
managedZones: []string{vals.ZoneName},
|
||||||
|
projectID: vals.ProjectID,
|
||||||
|
networkProjectID: vals.ProjectID,
|
||||||
|
AlphaFeatureGate: alphaFeatureGate,
|
||||||
|
nodeZones: zonesWithNodes,
|
||||||
|
nodeInformerSynced: func() bool { return true },
|
||||||
|
}
|
||||||
|
|
||||||
|
c := cloud.NewMockGCE(&gceProjectRouter{gce})
|
||||||
|
c.MockTargetPools.AddInstanceHook = mock.AddInstanceHook
|
||||||
|
c.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook
|
||||||
|
c.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook
|
||||||
|
c.MockAddresses.InsertHook = mock.InsertAddressHook
|
||||||
|
c.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook
|
||||||
|
|
||||||
|
c.MockInstanceGroups.X = mock.InstanceGroupAttributes{
|
||||||
|
InstanceMap: make(map[meta.Key]map[string]*compute.InstanceWithNamedPorts),
|
||||||
|
Lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
c.MockInstanceGroups.AddInstancesHook = mock.AddInstancesHook
|
||||||
|
c.MockInstanceGroups.RemoveInstancesHook = mock.RemoveInstancesHook
|
||||||
|
c.MockInstanceGroups.ListInstancesHook = mock.ListInstancesHook
|
||||||
|
|
||||||
|
c.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook
|
||||||
|
c.MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook
|
||||||
|
c.MockFirewalls.UpdateHook = mock.UpdateFirewallHook
|
||||||
|
|
||||||
|
keyGA := meta.GlobalKey("key-ga")
|
||||||
|
c.MockZones.Objects[*keyGA] = &cloud.MockZonesObj{
|
||||||
|
Obj: &compute.Zone{Name: vals.ZoneName, Region: gce.getRegionLink(vals.Region)},
|
||||||
|
}
|
||||||
|
|
||||||
|
gce.c = c
|
||||||
|
|
||||||
|
return gce, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAndInsertNodes(gce *GCECloud, nodeNames []string, zoneName string) ([]*v1.Node, error) {
|
||||||
|
nodes := []*v1.Node{}
|
||||||
|
|
||||||
|
for _, name := range nodeNames {
|
||||||
|
// Inserting the same node name twice causes an error - here we check if
|
||||||
|
// the instance exists already before insertion.
|
||||||
|
// TestUpdateExternalLoadBalancer inserts a new node, and relies on an older
|
||||||
|
// node to already have been inserted.
|
||||||
|
instance, _ := gce.getInstanceByName(name)
|
||||||
|
|
||||||
|
if instance == nil {
|
||||||
|
err := gce.InsertInstance(
|
||||||
|
gce.ProjectID(),
|
||||||
|
zoneName,
|
||||||
|
&compute.Instance{
|
||||||
|
Name: name,
|
||||||
|
Tags: &compute.Tags{
|
||||||
|
Items: []string{name},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nodes, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes = append(
|
||||||
|
nodes,
|
||||||
|
&v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
kubeletapis.LabelHostname: name,
|
||||||
|
kubeletapis.LabelZoneFailureDomain: zoneName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
NodeInfo: v1.NodeSystemInfo{
|
||||||
|
KubeProxyVersion: "v1.7.2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes, nil
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user