mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Make firewall test get tag from config instead of instance and fix multi-zone issue
This commit is contained in:
parent
0acdc89d96
commit
23dfe56b53
@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||||
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
@ -59,6 +60,8 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||||||
framework.Logf("Got cluster ID: %v", clusterID)
|
framework.Logf("Got cluster ID: %v", clusterID)
|
||||||
|
|
||||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||||
|
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
|
||||||
|
Expect(nodeList).NotTo(BeNil())
|
||||||
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
|
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
|
||||||
if len(nodesNames) <= 0 {
|
if len(nodesNames) <= 0 {
|
||||||
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
|
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
|
||||||
@ -84,14 +87,13 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||||||
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
||||||
|
|
||||||
By("Checking if service's firewall rule is correct")
|
By("Checking if service's firewall rule is correct")
|
||||||
nodeTags := framework.GetInstanceTags(cloudConfig, nodesNames[0])
|
lbFw := framework.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
|
||||||
lbFw := framework.ConstructFirewallForLBService(svc, nodeTags.Items)
|
|
||||||
fw, err := gceCloud.GetFirewall(lbFw.Name)
|
fw, err := gceCloud.GetFirewall(lbFw.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Checking if service's nodes health check firewall rule is correct")
|
By("Checking if service's nodes health check firewall rule is correct")
|
||||||
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, nodeTags.Items, true)
|
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
|
||||||
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
|
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||||
@ -107,7 +109,7 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Waiting for the correct local traffic health check firewall rule to be created")
|
By("Waiting for the correct local traffic health check firewall rule to be created")
|
||||||
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, nodeTags.Items, false)
|
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
||||||
fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||||
@ -132,11 +134,17 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||||||
// that's much harder to do in the current e2e framework.
|
// that's much harder to do in the current e2e framework.
|
||||||
By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
|
By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
|
||||||
nodesSet.Delete(nodesNames[0])
|
nodesSet.Delete(nodesNames[0])
|
||||||
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], []string{})
|
// Instance could run in a different zone in multi-zone test. Figure out which zone
|
||||||
|
// it is in before proceeding.
|
||||||
|
zone := cloudConfig.Zone
|
||||||
|
if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||||
|
zone = zoneInLabel
|
||||||
|
}
|
||||||
|
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
|
||||||
defer func() {
|
defer func() {
|
||||||
By("Adding tags back to the node and wait till the traffic is recovered")
|
By("Adding tags back to the node and wait till the traffic is recovered")
|
||||||
nodesSet.Insert(nodesNames[0])
|
nodesSet.Insert(nodesNames[0])
|
||||||
framework.SetInstanceTags(cloudConfig, nodesNames[0], removedTags)
|
framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
||||||
// Make sure traffic is recovered before exit
|
// Make sure traffic is recovered before exit
|
||||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||||
}()
|
}()
|
||||||
@ -146,19 +154,13 @@ var _ = framework.KubeDescribe("Firewall rule", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("should have correct firewall rules for e2e cluster", func() {
|
It("should have correct firewall rules for e2e cluster", func() {
|
||||||
By("Gathering firewall related information")
|
|
||||||
masterTags := framework.GetInstanceTags(cloudConfig, cloudConfig.MasterName)
|
|
||||||
Expect(len(masterTags.Items)).Should(Equal(1))
|
|
||||||
|
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
if len(nodes.Items) <= 0 {
|
if len(nodes.Items) <= 0 {
|
||||||
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
|
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
|
||||||
}
|
}
|
||||||
nodeTags := framework.GetInstanceTags(cloudConfig, nodes.Items[0].Name)
|
|
||||||
Expect(len(nodeTags.Items)).Should(Equal(1))
|
|
||||||
|
|
||||||
By("Checking if e2e firewall rules are correct")
|
By("Checking if e2e firewall rules are correct")
|
||||||
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, masterTags.Items[0], nodeTags.Items[0], cloudConfig.Network) {
|
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network) {
|
||||||
fw, err := gceCloud.GetFirewall(expFw.Name)
|
fw, err := gceCloud.GetFirewall(expFw.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||||
|
@ -51,13 +51,13 @@ func MakeFirewallNameForLBService(name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||||
func ConstructFirewallForLBService(svc *v1.Service, nodesTags []string) *compute.Firewall {
|
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||||
}
|
}
|
||||||
fw := compute.Firewall{}
|
fw := compute.Firewall{}
|
||||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
||||||
fw.TargetTags = nodesTags
|
fw.TargetTags = []string{nodeTag}
|
||||||
if svc.Spec.LoadBalancerSourceRanges == nil {
|
if svc.Spec.LoadBalancerSourceRanges == nil {
|
||||||
fw.SourceRanges = []string{"0.0.0.0/0"}
|
fw.SourceRanges = []string{"0.0.0.0/0"}
|
||||||
} else {
|
} else {
|
||||||
@ -77,13 +77,13 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodesTags []string, isNodesHealthCheck bool) *compute.Firewall {
|
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||||
}
|
}
|
||||||
fw := compute.Firewall{}
|
fw := compute.Firewall{}
|
||||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
||||||
fw.TargetTags = nodesTags
|
fw.TargetTags = []string{nodeTag}
|
||||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||||
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
||||||
if !isNodesHealthCheck {
|
if !isNodesHealthCheck {
|
||||||
@ -98,14 +98,6 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
|
|||||||
return &fw
|
return &fw
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeTags gets tags from one of the Kubernetes nodes
|
|
||||||
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) *compute.Tags {
|
|
||||||
nodes := GetReadySchedulableNodesOrDie(c)
|
|
||||||
Expect(len(nodes.Items) > 0).Should(BeTrue())
|
|
||||||
nodeTags := GetInstanceTags(cloudConfig, nodes.Items[0].Name)
|
|
||||||
return nodeTags
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInstanceTags gets tags from GCE instance with given name.
|
// GetInstanceTags gets tags from GCE instance with given name.
|
||||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||||
@ -118,12 +110,12 @@ func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetInstanceTags sets tags on GCE instance with given name.
|
// SetInstanceTags sets tags on GCE instance with given name.
|
||||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName string, tags []string) []string {
|
func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string {
|
||||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||||
_, err := gceCloud.GetComputeService().Instances.SetTags(
|
_, err := gceCloud.GetComputeService().Instances.SetTags(
|
||||||
cloudConfig.ProjectID, cloudConfig.Zone, instanceName,
|
cloudConfig.ProjectID, zone, instanceName,
|
||||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Failf("failed to set instance tags: %v", err)
|
Failf("failed to set instance tags: %v", err)
|
||||||
|
@ -974,14 +974,13 @@ func (j *IngressTestJig) GetIngressNodePorts() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
// ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
||||||
func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressController) *compute.Firewall {
|
func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressController, nodeTag string) *compute.Firewall {
|
||||||
nodeTags := GetNodeTags(j.Client, gceController.Cloud)
|
|
||||||
nodePorts := j.GetIngressNodePorts()
|
nodePorts := j.GetIngressNodePorts()
|
||||||
|
|
||||||
fw := compute.Firewall{}
|
fw := compute.Firewall{}
|
||||||
fw.Name = gceController.GetFirewallRuleName()
|
fw.Name = gceController.GetFirewallRuleName()
|
||||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||||
fw.TargetTags = nodeTags.Items
|
fw.TargetTags = []string{nodeTag}
|
||||||
fw.Allowed = []*compute.FirewallAllowed{
|
fw.Allowed = []*compute.FirewallAllowed{
|
||||||
{
|
{
|
||||||
IPProtocol: "tcp",
|
IPProtocol: "tcp",
|
||||||
|
@ -36,6 +36,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||||||
ns string
|
ns string
|
||||||
jig *framework.IngressTestJig
|
jig *framework.IngressTestJig
|
||||||
conformanceTests []framework.IngressConformanceTests
|
conformanceTests []framework.IngressConformanceTests
|
||||||
|
cloudConfig framework.CloudConfig
|
||||||
)
|
)
|
||||||
f := framework.NewDefaultFramework("ingress")
|
f := framework.NewDefaultFramework("ingress")
|
||||||
|
|
||||||
@ -43,6 +44,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||||||
f.BeforeEach()
|
f.BeforeEach()
|
||||||
jig = framework.NewIngressTestJig(f.ClientSet)
|
jig = framework.NewIngressTestJig(f.ClientSet)
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
|
cloudConfig = framework.TestContext.CloudConfig
|
||||||
|
|
||||||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||||
// lying around so we don't have to race any caches
|
// lying around so we don't have to race any caches
|
||||||
@ -122,7 +124,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||||||
|
|
||||||
By("should have correct firewall rule for ingress")
|
By("should have correct firewall rule for ingress")
|
||||||
fw := gceController.GetFirewallRule()
|
fw := gceController.GetFirewallRule()
|
||||||
expFw := jig.ConstructFirewallForIngress(gceController)
|
expFw := jig.ConstructFirewallForIngress(gceController, cloudConfig.NodeTag)
|
||||||
// Passed the last argument as `true` to verify the backend ports is a subset
|
// Passed the last argument as `true` to verify the backend ports is a subset
|
||||||
// of the allowed ports in firewall rule, given there may be other existing
|
// of the allowed ports in firewall rule, given there may be other existing
|
||||||
// ingress resources and backends we are not aware of.
|
// ingress resources and backends we are not aware of.
|
||||||
|
Loading…
Reference in New Issue
Block a user