mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Fixing kubemci conformance test
This commit is contained in:
parent
cdfbb54db2
commit
bd6b5c8092
@ -165,7 +165,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
|||||||
updateURLMapHost := "bar.baz.com"
|
updateURLMapHost := "bar.baz.com"
|
||||||
updateURLMapPath := "/testurl"
|
updateURLMapPath := "/testurl"
|
||||||
// Platform agnostic list of tests that must be satisfied by all controllers
|
// Platform agnostic list of tests that must be satisfied by all controllers
|
||||||
return []IngressConformanceTests{
|
tests := []IngressConformanceTests{
|
||||||
{
|
{
|
||||||
fmt.Sprintf("should create a basic HTTP ingress"),
|
fmt.Sprintf("should create a basic HTTP ingress"),
|
||||||
func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) },
|
func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) },
|
||||||
@ -176,27 +176,6 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
|||||||
func() { jig.AddHTTPS(tlsSecretName, tlsHost) },
|
func() { jig.AddHTTPS(tlsSecretName, tlsHost) },
|
||||||
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
|
||||||
func() {
|
|
||||||
jig.Update(func(ing *extensions.Ingress) {
|
|
||||||
newRules := []extensions.IngressRule{}
|
|
||||||
for _, rule := range ing.Spec.Rules {
|
|
||||||
if rule.Host != tlsHost {
|
|
||||||
newRules = append(newRules, rule)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
newRules = append(newRules, extensions.IngressRule{
|
|
||||||
Host: updatedTLSHost,
|
|
||||||
IngressRuleValue: rule.IngressRuleValue,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
ing.Spec.Rules = newRules
|
|
||||||
})
|
|
||||||
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
|
||||||
},
|
|
||||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||||
func() {
|
func() {
|
||||||
@ -233,6 +212,31 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
|||||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// Skip the Update TLS cert test for kubemci: https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/141.
|
||||||
|
if jig.Class != MulticlusterIngressClassValue {
|
||||||
|
tests = append(tests, IngressConformanceTests{
|
||||||
|
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||||
|
func() {
|
||||||
|
jig.Update(func(ing *extensions.Ingress) {
|
||||||
|
newRules := []extensions.IngressRule{}
|
||||||
|
for _, rule := range ing.Spec.Rules {
|
||||||
|
if rule.Host != tlsHost {
|
||||||
|
newRules = append(newRules, rule)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newRules = append(newRules, extensions.IngressRule{
|
||||||
|
Host: updatedTLSHost,
|
||||||
|
IngressRuleValue: rule.IngressRuleValue,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ing.Spec.Rules = newRules
|
||||||
|
})
|
||||||
|
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
||||||
|
},
|
||||||
|
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return tests
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateRSACerts generates a basic self signed certificate using a key length
|
// GenerateRSACerts generates a basic self signed certificate using a key length
|
||||||
@ -1131,7 +1135,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m
|
|||||||
for k, v := range ingAnnotations {
|
for k, v := range ingAnnotations {
|
||||||
j.Ingress.Annotations[k] = v
|
j.Ingress.Annotations[k] = v
|
||||||
}
|
}
|
||||||
j.Logger.Infof(fmt.Sprintf("creating" + j.Ingress.Name + " ingress"))
|
j.Logger.Infof(fmt.Sprintf("creating " + j.Ingress.Name + " ingress"))
|
||||||
j.Ingress, err = j.runCreate(j.Ingress)
|
j.Ingress, err = j.runCreate(j.Ingress)
|
||||||
ExpectNoError(err)
|
ExpectNoError(err)
|
||||||
}
|
}
|
||||||
@ -1334,7 +1338,9 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st
|
|||||||
}
|
}
|
||||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||||
if waitForNodePort {
|
if waitForNodePort {
|
||||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil {
|
nodePort := int(p.Backend.ServicePort.IntVal)
|
||||||
|
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, nodePort); err != nil {
|
||||||
|
j.Logger.Infof("Error in waiting for nodeport %d on service %v/%v: %s", nodePort, ing.Namespace, p.Backend.ServiceName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2147,7 +2147,6 @@ func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
|
|||||||
|
|
||||||
func (b kubectlBuilder) ExecOrDie() string {
|
func (b kubectlBuilder) ExecOrDie() string {
|
||||||
str, err := b.Exec()
|
str, err := b.Exec()
|
||||||
Logf("stdout: %q", str)
|
|
||||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||||
if isTimeout(err) {
|
if isTimeout(err) {
|
||||||
@ -2206,6 +2205,7 @@ func (b kubectlBuilder) Exec() (string, error) {
|
|||||||
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
|
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
|
||||||
}
|
}
|
||||||
Logf("stderr: %q", stderr.String())
|
Logf("stderr: %q", stderr.String())
|
||||||
|
Logf("stdout: %q", stdout.String())
|
||||||
return stdout.String(), nil
|
return stdout.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -602,10 +602,20 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||||
|
var gceController *framework.GCEIngressController
|
||||||
|
|
||||||
// Platform specific setup
|
// Platform specific setup
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
jig.Class = framework.MulticlusterIngressClassValue
|
jig.Class = framework.MulticlusterIngressClassValue
|
||||||
|
By("Initializing gce controller")
|
||||||
|
gceController = &framework.GCEIngressController{
|
||||||
|
Ns: ns,
|
||||||
|
Client: jig.Client,
|
||||||
|
Cloud: framework.TestContext.CloudConfig,
|
||||||
|
}
|
||||||
|
err := gceController.Init()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Platform specific cleanup
|
// Platform specific cleanup
|
||||||
@ -619,16 +629,26 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
}
|
}
|
||||||
By("Deleting ingress")
|
By("Deleting ingress")
|
||||||
jig.TryDeleteIngress()
|
jig.TryDeleteIngress()
|
||||||
|
|
||||||
|
By("Cleaning up cloud resources")
|
||||||
|
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should conform to Ingress spec", func() {
|
It("should conform to Ingress spec", func() {
|
||||||
jig.PollInterval = 5 * time.Second
|
jig.PollInterval = 5 * time.Second
|
||||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
// Use the randomly generated namespace name as the ip address name.
|
||||||
|
ipName := ns
|
||||||
|
// ip released when the rest of lb resources are deleted in CleanupGCEIngressController
|
||||||
|
ipAddress := gceController.CreateStaticIP(ipName)
|
||||||
|
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress))
|
||||||
|
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||||
|
framework.IngressStaticIPKey: ipName,
|
||||||
|
})
|
||||||
for _, t := range conformanceTests {
|
for _, t := range conformanceTests {
|
||||||
By(t.EntryLog)
|
By(t.EntryLog)
|
||||||
t.Execute()
|
t.Execute()
|
||||||
By(t.ExitLog)
|
By(t.ExitLog)
|
||||||
jig.WaitForIngress(true /*waitForNodePort*/)
|
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -3,6 +3,10 @@ kind: Ingress
|
|||||||
metadata:
|
metadata:
|
||||||
name: echomap
|
name: echomap
|
||||||
spec:
|
spec:
|
||||||
|
# kubemci requires a default backend.
|
||||||
|
backend:
|
||||||
|
serviceName: echoheadersx
|
||||||
|
servicePort: 80
|
||||||
rules:
|
rules:
|
||||||
- host: foo.bar.com
|
- host: foo.bar.com
|
||||||
http:
|
http:
|
||||||
@ -22,4 +26,3 @@ spec:
|
|||||||
backend:
|
backend:
|
||||||
serviceName: echoheadersx
|
serviceName: echoheadersx
|
||||||
servicePort: 80
|
servicePort: 80
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user