mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #42020 from nicksardo/ingress_upgrade
Automatic merge from submit-queue (batch tested with PRs 41644, 42020, 41753, 42206, 42212) Ingress-glbc upgrade tests Basically #41676 but with some fixes and added comments. @bprashanth has been away this week and it's desirable to have this in before code freeze.
This commit is contained in:
commit
effbad9ad0
@ -48,7 +48,6 @@ go_library(
|
||||
"ha_master.go",
|
||||
"horizontal_pod_autoscaling.go",
|
||||
"ingress.go",
|
||||
"ingress_utils.go",
|
||||
"initial_resources.go",
|
||||
"job.go",
|
||||
"kibana_logging.go",
|
||||
@ -179,7 +178,6 @@ go_library(
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:golang.org/x/net/websocket",
|
||||
"//vendor:golang.org/x/oauth2/google",
|
||||
"//vendor:google.golang.org/api/compute/v1",
|
||||
"//vendor:google.golang.org/api/googleapi",
|
||||
"//vendor:google.golang.org/api/logging/v2beta1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
|
@ -39,6 +39,7 @@ var upgradeTests = []upgrades.Test{
|
||||
&upgrades.HPAUpgradeTest{},
|
||||
&upgrades.PersistentVolumeUpgradeTest{},
|
||||
&upgrades.DaemonSetUpgradeTest{},
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
|
@ -18,6 +18,7 @@ go_library(
|
||||
"framework.go",
|
||||
"get-kubemark-resource-usage.go",
|
||||
"google_compute.go",
|
||||
"ingress_utils.go",
|
||||
"jobs_util.go",
|
||||
"kubelet_stats.go",
|
||||
"log_size_monitoring.go",
|
||||
@ -71,6 +72,7 @@ go_library(
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/metrics:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -31,16 +31,12 @@ import (
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -51,11 +47,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -80,59 +78,84 @@ const (
|
||||
// Name of the default http backend service
|
||||
defaultBackendName = "default-http-backend"
|
||||
|
||||
// IP src range from which the GCE L7 performs health checks.
|
||||
// GCEL7SrcRange is the IP src range from which the GCE L7 performs health checks.
|
||||
GCEL7SrcRange = "130.211.0.0/22"
|
||||
|
||||
// Cloud resources created by the ingress controller older than this
|
||||
// are automatically purged to prevent running out of quota.
|
||||
// TODO(37335): write soak tests and bump this up to a week.
|
||||
maxAge = -48 * time.Hour
|
||||
|
||||
// IngressManifestPath is the parent path to yaml test manifests.
|
||||
IngressManifestPath = "test/e2e/testing-manifests/ingress"
|
||||
|
||||
// IngressReqTimeout is the timeout on a single http request.
|
||||
IngressReqTimeout = 10 * time.Second
|
||||
|
||||
// healthz port used to verify glbc restarted correctly on the master.
|
||||
glbcHealthzPort = 8086
|
||||
|
||||
// General cloud resource poll timeout (eg: create static ip, firewall etc)
|
||||
cloudResourcePollTimeout = 5 * time.Minute
|
||||
|
||||
// Name of the config-map and key the ingress controller stores its uid in.
|
||||
uidConfigMap = "ingress-uid"
|
||||
uidKey = "uid"
|
||||
|
||||
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
)
|
||||
|
||||
type testJig struct {
|
||||
client clientset.Interface
|
||||
rootCAs map[string][]byte
|
||||
address string
|
||||
ing *extensions.Ingress
|
||||
// IngressTestJig holds the relevant state and parameters of the ingress test.
|
||||
type IngressTestJig struct {
|
||||
Client clientset.Interface
|
||||
RootCAs map[string][]byte
|
||||
Address string
|
||||
Ingress *extensions.Ingress
|
||||
// class is the value of the annotation keyed under
|
||||
// `kubernetes.io/ingress.class`. It's added to all ingresses created by
|
||||
// this jig.
|
||||
class string
|
||||
Class string
|
||||
|
||||
// The interval used to poll urls
|
||||
pollInterval time.Duration
|
||||
PollInterval time.Duration
|
||||
}
|
||||
|
||||
type conformanceTests struct {
|
||||
entryLog string
|
||||
execute func()
|
||||
exitLog string
|
||||
// IngressConformanceTests contains a closure with an entry and exit log line.
|
||||
type IngressConformanceTests struct {
|
||||
EntryLog string
|
||||
Execute func()
|
||||
ExitLog string
|
||||
}
|
||||
|
||||
func createComformanceTests(jig *testJig, ns string) []conformanceTests {
|
||||
manifestPath := filepath.Join(ingressManifestPath, "http")
|
||||
// These constants match the manifests used in ingressManifestPath
|
||||
// CreateIngressComformanceTests generates an slice of sequential test cases:
|
||||
// a simple http ingress, ingress with HTTPS, ingress HTTPS with a modified hostname,
|
||||
// ingress https with a modified URLMap
|
||||
func CreateIngressComformanceTests(jig *IngressTestJig, ns string) []IngressConformanceTests {
|
||||
manifestPath := filepath.Join(IngressManifestPath, "http")
|
||||
// These constants match the manifests used in IngressManifestPath
|
||||
tlsHost := "foo.bar.com"
|
||||
tlsSecretName := "foo"
|
||||
updatedTLSHost := "foobar.com"
|
||||
updateURLMapHost := "bar.baz.com"
|
||||
updateURLMapPath := "/testurl"
|
||||
// Platform agnostic list of tests that must be satisfied by all controllers
|
||||
return []conformanceTests{
|
||||
return []IngressConformanceTests{
|
||||
{
|
||||
fmt.Sprintf("should create a basic HTTP ingress"),
|
||||
func() { jig.createIngress(manifestPath, ns, map[string]string{}) },
|
||||
func() { jig.CreateIngress(manifestPath, ns, map[string]string{}) },
|
||||
fmt.Sprintf("waiting for urls on basic HTTP ingress"),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should terminate TLS for host %v", tlsHost),
|
||||
func() { jig.addHTTPS(tlsSecretName, tlsHost) },
|
||||
func() { jig.AddHTTPS(tlsSecretName, tlsHost) },
|
||||
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.update(func(ing *extensions.Ingress) {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
@ -146,7 +169,7 @@ func createComformanceTests(jig *testJig, ns string) []conformanceTests {
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.addHTTPS(tlsSecretName, updatedTLSHost)
|
||||
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
},
|
||||
@ -154,7 +177,7 @@ func createComformanceTests(jig *testJig, ns string) []conformanceTests {
|
||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||
func() {
|
||||
var pathToFail string
|
||||
jig.update(func(ing *extensions.Ingress) {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != updateURLMapHost {
|
||||
@ -180,8 +203,8 @@ func createComformanceTests(jig *testJig, ns string) []conformanceTests {
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
By("Checking that " + pathToFail + " is not exposed by polling for failure")
|
||||
route := fmt.Sprintf("http://%v%v", jig.address, pathToFail)
|
||||
framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true))
|
||||
route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail)
|
||||
ExpectNoError(PollURL(route, updateURLMapHost, LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true))
|
||||
},
|
||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
},
|
||||
@ -248,13 +271,13 @@ func generateRSACerts(host string, isCA bool, keyOut, certOut io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildTransport creates a transport for use in executing HTTPS requests with
|
||||
// buildTransportWithCA creates a transport for use in executing HTTPS requests with
|
||||
// the given certs. Note that the given rootCA must be configured with isCA=true.
|
||||
func buildTransport(serverName string, rootCA []byte) (*http.Transport, error) {
|
||||
func buildTransportWithCA(serverName string, rootCA []byte) (*http.Transport, error) {
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM(rootCA)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unable to load serverCA.")
|
||||
return nil, fmt.Errorf("Unable to load serverCA")
|
||||
}
|
||||
return utilnet.SetTransportDefaults(&http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
@ -265,20 +288,20 @@ func buildTransport(serverName string, rootCA []byte) (*http.Transport, error) {
|
||||
}), nil
|
||||
}
|
||||
|
||||
// buildInsecureClient returns an insecure http client. Can be used for "curl -k".
|
||||
func buildInsecureClient(timeout time.Duration) *http.Client {
|
||||
// BuildInsecureClient returns an insecure http client. Can be used for "curl -k".
|
||||
func BuildInsecureClient(timeout time.Duration) *http.Client {
|
||||
t := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
|
||||
return &http.Client{Timeout: timeout, Transport: utilnet.SetTransportDefaults(t)}
|
||||
}
|
||||
|
||||
// createSecret creates a secret containing TLS certificates for the given Ingress.
|
||||
// If a secret with the same name already exists in the namespace of the
|
||||
// createIngressTLSSecret creates a secret containing TLS certificates for the given Ingress.
|
||||
// If a secret with the same name already pathExists in the namespace of the
|
||||
// Ingress, it's updated.
|
||||
func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host string, rootCA, privKey []byte, err error) {
|
||||
func createIngressTLSSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host string, rootCA, privKey []byte, err error) {
|
||||
var k, c bytes.Buffer
|
||||
tls := ing.Spec.TLS[0]
|
||||
host = strings.Join(tls.Hosts, ",")
|
||||
framework.Logf("Generating RSA cert for host %v", host)
|
||||
Logf("Generating RSA cert for host %v", host)
|
||||
|
||||
if err = generateRSACerts(host, true, &k, &c); err != nil {
|
||||
return
|
||||
@ -297,20 +320,22 @@ func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host
|
||||
var s *v1.Secret
|
||||
if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName, metav1.GetOptions{}); err == nil {
|
||||
// TODO: Retry the update. We don't really expect anything to conflict though.
|
||||
framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
|
||||
Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
|
||||
s.Data = secret.Data
|
||||
_, err = kubeClient.Core().Secrets(ing.Namespace).Update(s)
|
||||
} else {
|
||||
framework.Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
|
||||
Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
|
||||
_, err = kubeClient.Core().Secrets(ing.Namespace).Create(secret)
|
||||
}
|
||||
return host, cert, key, err
|
||||
}
|
||||
|
||||
func cleanupGCE(gceController *GCEIngressController) {
|
||||
pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
// CleanupGCEIngressController calls the GCEIngressController.Cleanup(false)
|
||||
// followed with deleting the static ip, and then a final GCEIngressController.Cleanup(true)
|
||||
func CleanupGCEIngressController(gceController *GCEIngressController) {
|
||||
pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceController.Cleanup(false); err != nil {
|
||||
framework.Logf("Still waiting for glbc to cleanup:\n%v", err)
|
||||
Logf("Still waiting for glbc to cleanup:\n%v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -320,9 +345,9 @@ func cleanupGCE(gceController *GCEIngressController) {
|
||||
// controller. Delete this IP only after the controller has had a chance
|
||||
// to cleanup or it might interfere with the controller, causing it to
|
||||
// throw out confusing events.
|
||||
if ipErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if ipErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceController.deleteStaticIPs(); err != nil {
|
||||
framework.Logf("Failed to delete static-ip: %v\n", err)
|
||||
Logf("Failed to delete static-ip: %v\n", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -342,7 +367,7 @@ func cleanupGCE(gceController *GCEIngressController) {
|
||||
|
||||
// Fail if the controller didn't cleanup
|
||||
if pollErr != nil {
|
||||
framework.Failf("L7 controller failed to delete all cloud resources on time. %v", pollErr)
|
||||
Failf("L7 controller failed to delete all cloud resources on time. %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -350,7 +375,7 @@ func (cont *GCEIngressController) deleteForwardingRule(del bool) string {
|
||||
msg := ""
|
||||
fwList := []compute.ForwardingRule{}
|
||||
for _, regex := range []string{fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter), fmt.Sprintf("%vfws-.*%v.*", k8sPrefix, clusterDelimiter)} {
|
||||
gcloudList("forwarding-rules", regex, cont.cloud.ProjectID, &fwList)
|
||||
gcloudComputeResourceList("forwarding-rules", regex, cont.Cloud.ProjectID, &fwList)
|
||||
if len(fwList) == 0 {
|
||||
continue
|
||||
}
|
||||
@ -360,7 +385,7 @@ func (cont *GCEIngressController) deleteForwardingRule(del bool) string {
|
||||
}
|
||||
msg += fmt.Sprintf("%v (forwarding rule)\n", f.Name)
|
||||
if del {
|
||||
gcloudDelete("forwarding-rules", f.Name, cont.cloud.ProjectID, "--global")
|
||||
GcloudComputeResourceDelete("forwarding-rules", f.Name, cont.Cloud.ProjectID, "--global")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -371,7 +396,7 @@ func (cont *GCEIngressController) deleteAddresses(del bool) string {
|
||||
msg := ""
|
||||
ipList := []compute.Address{}
|
||||
regex := fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudList("addresses", regex, cont.cloud.ProjectID, &ipList)
|
||||
gcloudComputeResourceList("addresses", regex, cont.Cloud.ProjectID, &ipList)
|
||||
if len(ipList) != 0 {
|
||||
for _, ip := range ipList {
|
||||
if !cont.canDelete(ip.Name, ip.CreationTimestamp, del) {
|
||||
@ -379,7 +404,7 @@ func (cont *GCEIngressController) deleteAddresses(del bool) string {
|
||||
}
|
||||
msg += fmt.Sprintf("%v (static-ip)\n", ip.Name)
|
||||
if del {
|
||||
gcloudDelete("addresses", ip.Name, cont.cloud.ProjectID, "--global")
|
||||
GcloudComputeResourceDelete("addresses", ip.Name, cont.Cloud.ProjectID, "--global")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -390,7 +415,7 @@ func (cont *GCEIngressController) deleteTargetProxy(del bool) string {
|
||||
msg := ""
|
||||
tpList := []compute.TargetHttpProxy{}
|
||||
regex := fmt.Sprintf("%vtp-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudList("target-http-proxies", regex, cont.cloud.ProjectID, &tpList)
|
||||
gcloudComputeResourceList("target-http-proxies", regex, cont.Cloud.ProjectID, &tpList)
|
||||
if len(tpList) != 0 {
|
||||
for _, t := range tpList {
|
||||
if !cont.canDelete(t.Name, t.CreationTimestamp, del) {
|
||||
@ -398,13 +423,13 @@ func (cont *GCEIngressController) deleteTargetProxy(del bool) string {
|
||||
}
|
||||
msg += fmt.Sprintf("%v (target-http-proxy)\n", t.Name)
|
||||
if del {
|
||||
gcloudDelete("target-http-proxies", t.Name, cont.cloud.ProjectID)
|
||||
GcloudComputeResourceDelete("target-http-proxies", t.Name, cont.Cloud.ProjectID)
|
||||
}
|
||||
}
|
||||
}
|
||||
tpsList := []compute.TargetHttpsProxy{}
|
||||
regex = fmt.Sprintf("%vtps-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudList("target-https-proxies", regex, cont.cloud.ProjectID, &tpsList)
|
||||
gcloudComputeResourceList("target-https-proxies", regex, cont.Cloud.ProjectID, &tpsList)
|
||||
if len(tpsList) != 0 {
|
||||
for _, t := range tpsList {
|
||||
if !cont.canDelete(t.Name, t.CreationTimestamp, del) {
|
||||
@ -412,15 +437,15 @@ func (cont *GCEIngressController) deleteTargetProxy(del bool) string {
|
||||
}
|
||||
msg += fmt.Sprintf("%v (target-https-proxy)\n", t.Name)
|
||||
if del {
|
||||
gcloudDelete("target-https-proxies", t.Name, cont.cloud.ProjectID)
|
||||
GcloudComputeResourceDelete("target-https-proxies", t.Name, cont.Cloud.ProjectID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteUrlMap(del bool) (msg string) {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
func (cont *GCEIngressController) deleteURLMap(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
umList, err := gceCloud.ListUrlMaps()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
@ -447,7 +472,7 @@ func (cont *GCEIngressController) deleteUrlMap(del bool) (msg string) {
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
beList, err := gceCloud.ListBackendServices()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
@ -456,7 +481,7 @@ func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) {
|
||||
return fmt.Sprintf("Failed to list backend services: %v", err)
|
||||
}
|
||||
if len(beList.Items) == 0 {
|
||||
framework.Logf("No backend services found")
|
||||
Logf("No backend services found")
|
||||
return msg
|
||||
}
|
||||
for _, be := range beList.Items {
|
||||
@ -474,8 +499,8 @@ func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteHttpHealthCheck(del bool) (msg string) {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
func (cont *GCEIngressController) deleteHTTPHealthCheck(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
hcList, err := gceCloud.ListHttpHealthChecks()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
@ -502,7 +527,7 @@ func (cont *GCEIngressController) deleteHttpHealthCheck(del bool) (msg string) {
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteSSLCertificate(del bool) (msg string) {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
sslList, err := gceCloud.ListSslCertificates()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
@ -528,10 +553,10 @@ func (cont *GCEIngressController) deleteSSLCertificate(del bool) (msg string) {
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteInstanceGroup(del bool) (msg string) {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
// TODO: E2E cloudprovider has only 1 zone, but the cluster can have many.
|
||||
// We need to poll on all IGs across all zones.
|
||||
igList, err := gceCloud.ListInstanceGroups(cont.cloud.Zone)
|
||||
igList, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
@ -547,7 +572,7 @@ func (cont *GCEIngressController) deleteInstanceGroup(del bool) (msg string) {
|
||||
}
|
||||
msg += fmt.Sprintf("%v (instance-group)\n", ig.Name)
|
||||
if del {
|
||||
if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.cloud.Zone); err != nil &&
|
||||
if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name)
|
||||
}
|
||||
@ -574,23 +599,25 @@ func (cont *GCEIngressController) canDelete(resourceName, creationTimestamp stri
|
||||
}
|
||||
createdTime, err := time.Parse(time.RFC3339, creationTimestamp)
|
||||
if err != nil {
|
||||
framework.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
|
||||
Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
|
||||
return false
|
||||
}
|
||||
if createdTime.Before(time.Now().Add(maxAge)) {
|
||||
framework.Logf("%v created on %v IS too old", resourceName, creationTimestamp)
|
||||
Logf("%v created on %v IS too old", resourceName, creationTimestamp)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) getFirewallRuleName() string {
|
||||
// GetFirewallRuleName returns the name of the firewall used for the GCEIngressController.
|
||||
func (cont *GCEIngressController) GetFirewallRuleName() string {
|
||||
return fmt.Sprintf("%vfw-l7%v%v", k8sPrefix, clusterDelimiter, cont.UID)
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) getFirewallRule() *compute.Firewall {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
fwName := cont.getFirewallRuleName()
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
fw, err := gceCloud.GetFirewall(fwName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return fw
|
||||
@ -599,7 +626,7 @@ func (cont *GCEIngressController) getFirewallRule() *compute.Firewall {
|
||||
func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) {
|
||||
fwList := []compute.Firewall{}
|
||||
regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudList("firewall-rules", regex, cont.cloud.ProjectID, &fwList)
|
||||
gcloudComputeResourceList("firewall-rules", regex, cont.Cloud.ProjectID, &fwList)
|
||||
if len(fwList) != 0 {
|
||||
for _, f := range fwList {
|
||||
if !cont.canDelete(f.Name, f.CreationTimestamp, del) {
|
||||
@ -607,7 +634,7 @@ func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) {
|
||||
}
|
||||
msg += fmt.Sprintf("%v (firewall rule)\n", f.Name)
|
||||
if del {
|
||||
gcloudDelete("firewall-rules", f.Name, cont.cloud.ProjectID)
|
||||
GcloudComputeResourceDelete("firewall-rules", f.Name, cont.Cloud.ProjectID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -631,9 +658,9 @@ func (cont *GCEIngressController) Cleanup(del bool) error {
|
||||
errMsg += cont.deleteAddresses(del)
|
||||
|
||||
errMsg += cont.deleteTargetProxy(del)
|
||||
errMsg += cont.deleteUrlMap(del)
|
||||
errMsg += cont.deleteURLMap(del)
|
||||
errMsg += cont.deleteBackendService(del)
|
||||
errMsg += cont.deleteHttpHealthCheck(del)
|
||||
errMsg += cont.deleteHTTPHealthCheck(del)
|
||||
|
||||
errMsg += cont.deleteInstanceGroup(del)
|
||||
errMsg += cont.deleteFirewallRule(del)
|
||||
@ -648,63 +675,64 @@ func (cont *GCEIngressController) Cleanup(del bool) error {
|
||||
return fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) init() {
|
||||
// Init initializes the GCEIngressController with an UID
|
||||
func (cont *GCEIngressController) Init() {
|
||||
uid, err := cont.getL7AddonUID()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cont.UID = uid
|
||||
// There's a name limit imposed by GCE. The controller will truncate.
|
||||
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
|
||||
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID)
|
||||
if len(testName) > nameLenLimit {
|
||||
framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
|
||||
Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
|
||||
} else {
|
||||
framework.Logf("Detected cluster UID %v", cont.UID)
|
||||
Logf("Detected cluster UID %v", cont.UID)
|
||||
}
|
||||
}
|
||||
|
||||
// createStaticIP allocates a random static ip with the given name. Returns a string
|
||||
// CreateStaticIP allocates a random static ip with the given name. Returns a string
|
||||
// representation of the ip. Caller is expected to manage cleanup of the ip by
|
||||
// invoking deleteStaticIPs.
|
||||
func (cont *GCEIngressController) createStaticIP(name string) string {
|
||||
gceCloud := cont.cloud.Provider.(*gcecloud.GCECloud)
|
||||
func (cont *GCEIngressController) CreateStaticIP(name string) string {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
ip, err := gceCloud.ReserveGlobalStaticIP(name, "")
|
||||
if err != nil {
|
||||
if delErr := gceCloud.DeleteGlobalStaticIP(name); delErr != nil {
|
||||
if cont.isHTTPErrorCode(delErr, http.StatusNotFound) {
|
||||
framework.Logf("Static ip with name %v was not allocated, nothing to delete", name)
|
||||
Logf("Static ip with name %v was not allocated, nothing to delete", name)
|
||||
} else {
|
||||
framework.Logf("Failed to delete static ip %v: %v", name, delErr)
|
||||
Logf("Failed to delete static ip %v: %v", name, delErr)
|
||||
}
|
||||
}
|
||||
framework.Failf("Failed to allocated static ip %v: %v", name, err)
|
||||
Failf("Failed to allocated static ip %v: %v", name, err)
|
||||
}
|
||||
cont.staticIPName = ip.Name
|
||||
framework.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
|
||||
Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
|
||||
return ip.Address
|
||||
}
|
||||
|
||||
// deleteStaticIPs deletes all static-ips allocated through calls to
|
||||
// createStaticIP.
|
||||
// deleteStaticIPs delets all static-ips allocated through calls to
|
||||
// CreateStaticIP.
|
||||
func (cont *GCEIngressController) deleteStaticIPs() error {
|
||||
if cont.staticIPName != "" {
|
||||
if err := gcloudDelete("addresses", cont.staticIPName, cont.cloud.ProjectID, "--global"); err == nil {
|
||||
if err := GcloudComputeResourceDelete("addresses", cont.staticIPName, cont.Cloud.ProjectID, "--global"); err == nil {
|
||||
cont.staticIPName = ""
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
e2eIPs := []compute.Address{}
|
||||
gcloudList("addresses", "e2e-.*", cont.cloud.ProjectID, &e2eIPs)
|
||||
gcloudComputeResourceList("addresses", "e2e-.*", cont.Cloud.ProjectID, &e2eIPs)
|
||||
ips := []string{}
|
||||
for _, ip := range e2eIPs {
|
||||
ips = append(ips, ip.Name)
|
||||
}
|
||||
framework.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
|
||||
Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gcloudList unmarshals json output of gcloud into given out interface.
|
||||
func gcloudList(resource, regex, project string, out interface{}) {
|
||||
// gcloudComputeResourceList unmarshals json output of gcloud into given out interface.
|
||||
func gcloudComputeResourceList(resource, regex, project string, out interface{}) {
|
||||
// gcloud prints a message to stderr if it has an available update
|
||||
// so we only look at stdout.
|
||||
command := []string{
|
||||
@ -724,204 +752,210 @@ func gcloudList(resource, regex, project string, out interface{}) {
|
||||
errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr))
|
||||
}
|
||||
}
|
||||
framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
|
||||
Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
|
||||
}
|
||||
if err := json.Unmarshal([]byte(output), out); err != nil {
|
||||
framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
|
||||
Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func gcloudDelete(resource, name, project string, args ...string) error {
|
||||
framework.Logf("Deleting %v: %v", resource, name)
|
||||
// GcloudComputeResourceDelete deletes the specified compute resource by name and project.
|
||||
func GcloudComputeResourceDelete(resource, name, project string, args ...string) error {
|
||||
Logf("Deleting %v: %v", resource, name)
|
||||
argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
|
||||
output, err := exec.Command("gcloud", argList...).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func gcloudCreate(resource, name, project string, args ...string) error {
|
||||
framework.Logf("Creating %v in project %v: %v", resource, project, name)
|
||||
// GcloudComputeResourceCreate creates a compute resource with a name and arguments.
|
||||
func GcloudComputeResourceCreate(resource, name, project string, args ...string) error {
|
||||
Logf("Creating %v in project %v: %v", resource, project, name)
|
||||
argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
|
||||
framework.Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
|
||||
Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
|
||||
output, err := exec.Command("gcloud", argsList...).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createIngress creates the Ingress and associated service/rc.
|
||||
// CreateIngress creates the Ingress and associated service/rc.
|
||||
// Required: ing.yaml, rc.yaml, svc.yaml must exist in manifestPath
|
||||
// Optional: secret.yaml, ingAnnotations
|
||||
// If ingAnnotations is specified it will overwrite any annotations in ing.yaml
|
||||
func (j *testJig) createIngress(manifestPath, ns string, ingAnnotations map[string]string) {
|
||||
func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string) {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, manifestPath, file)
|
||||
return filepath.Join(TestContext.RepoRoot, manifestPath, file)
|
||||
}
|
||||
|
||||
framework.Logf("creating replication controller")
|
||||
framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf("creating replication controller")
|
||||
RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", ns))
|
||||
|
||||
framework.Logf("creating service")
|
||||
framework.RunKubectlOrDie("create", "-f", mkpath("svc.yaml"), fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf("creating service")
|
||||
RunKubectlOrDie("create", "-f", mkpath("svc.yaml"), fmt.Sprintf("--namespace=%v", ns))
|
||||
|
||||
if exists(mkpath("secret.yaml")) {
|
||||
framework.Logf("creating secret")
|
||||
framework.RunKubectlOrDie("create", "-f", mkpath("secret.yaml"), fmt.Sprintf("--namespace=%v", ns))
|
||||
if exists, _ := util.FileExists(mkpath("secret.yaml")); exists {
|
||||
Logf("creating secret")
|
||||
RunKubectlOrDie("create", "-f", mkpath("secret.yaml"), fmt.Sprintf("--namespace=%v", ns))
|
||||
}
|
||||
j.ing = ingFromManifest(mkpath("ing.yaml"))
|
||||
j.ing.Namespace = ns
|
||||
j.ing.Annotations = map[string]string{ingressClass: j.class}
|
||||
j.Ingress = createIngressFromManifest(mkpath("ing.yaml"))
|
||||
j.Ingress.Namespace = ns
|
||||
j.Ingress.Annotations = map[string]string{ingressClass: j.Class}
|
||||
for k, v := range ingAnnotations {
|
||||
j.ing.Annotations[k] = v
|
||||
j.Ingress.Annotations[k] = v
|
||||
}
|
||||
framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress"))
|
||||
Logf(fmt.Sprintf("creating" + j.Ingress.Name + " ingress"))
|
||||
var err error
|
||||
j.ing, err = j.client.Extensions().Ingresses(ns).Create(j.ing)
|
||||
framework.ExpectNoError(err)
|
||||
j.Ingress, err = j.Client.Extensions().Ingresses(ns).Create(j.Ingress)
|
||||
ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (j *testJig) update(update func(ing *extensions.Ingress)) {
|
||||
// Update retrieves the ingress, performs the passed function, and then updates it.
|
||||
func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) {
|
||||
var err error
|
||||
ns, name := j.ing.Namespace, j.ing.Name
|
||||
ns, name := j.Ingress.Namespace, j.Ingress.Name
|
||||
for i := 0; i < 3; i++ {
|
||||
j.ing, err = j.client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
j.Ingress, err = j.Client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to get ingress %q: %v", name, err)
|
||||
Failf("failed to get ingress %q: %v", name, err)
|
||||
}
|
||||
update(j.ing)
|
||||
j.ing, err = j.client.Extensions().Ingresses(ns).Update(j.ing)
|
||||
update(j.Ingress)
|
||||
j.Ingress, err = j.Client.Extensions().Ingresses(ns).Update(j.Ingress)
|
||||
if err == nil {
|
||||
framework.DescribeIng(j.ing.Namespace)
|
||||
DescribeIng(j.Ingress.Namespace)
|
||||
return
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
framework.Failf("failed to update ingress %q: %v", name, err)
|
||||
Failf("failed to update ingress %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
framework.Failf("too many retries updating ingress %q", name)
|
||||
Failf("too many retries updating ingress %q", name)
|
||||
}
|
||||
|
||||
func (j *testJig) addHTTPS(secretName string, hosts ...string) {
|
||||
j.ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
// TODO: Just create the secret in getRootCAs once we're watching secrets in
|
||||
// AddHTTPS updates the ingress to use this secret for these hosts.
|
||||
func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) {
|
||||
j.Ingress.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
// TODO: Just create the secret in GetRootCAs once we're watching secrets in
|
||||
// the ingress controller.
|
||||
_, cert, _, err := createSecret(j.client, j.ing)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Updating ingress %v to use secret %v for TLS termination", j.ing.Name, secretName)
|
||||
j.update(func(ing *extensions.Ingress) {
|
||||
_, cert, _, err := createIngressTLSSecret(j.Client, j.Ingress)
|
||||
ExpectNoError(err)
|
||||
Logf("Updating ingress %v to use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
})
|
||||
j.rootCAs[secretName] = cert
|
||||
j.RootCAs[secretName] = cert
|
||||
}
|
||||
|
||||
func (j *testJig) getRootCA(secretName string) (rootCA []byte) {
|
||||
// GetRootCA returns a rootCA from the ingress test jig.
|
||||
func (j *IngressTestJig) GetRootCA(secretName string) (rootCA []byte) {
|
||||
var ok bool
|
||||
rootCA, ok = j.rootCAs[secretName]
|
||||
rootCA, ok = j.RootCAs[secretName]
|
||||
if !ok {
|
||||
framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
|
||||
Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (j *testJig) deleteIngress() {
|
||||
framework.ExpectNoError(j.client.Extensions().Ingresses(j.ing.Namespace).Delete(j.ing.Name, nil))
|
||||
// DeleteIngress deletes the ingress resource
|
||||
func (j *IngressTestJig) DeleteIngress() {
|
||||
ExpectNoError(j.Client.Extensions().Ingresses(j.Ingress.Namespace).Delete(j.Ingress.Name, nil))
|
||||
}
|
||||
|
||||
// waitForIngress waits till the ingress acquires an IP, then waits for its
|
||||
// WaitForIngress waits till the ingress acquires an IP, then waits for its
|
||||
// hosts/urls to respond to a protocol check (either http or https). If
|
||||
// waitForNodePort is true, the NodePort of the Service is verified before
|
||||
// verifying the Ingress. NodePort is currently a requirement for cloudprovider
|
||||
// Ingress.
|
||||
func (j *testJig) waitForIngress(waitForNodePort bool) {
|
||||
func (j *IngressTestJig) WaitForIngress(waitForNodePort bool) {
|
||||
// Wait for the loadbalancer IP.
|
||||
address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, framework.LoadBalancerPollTimeout)
|
||||
address, err := WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, LoadBalancerPollTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
|
||||
Failf("Ingress failed to acquire an IP address within %v", LoadBalancerPollTimeout)
|
||||
}
|
||||
j.address = address
|
||||
framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
|
||||
timeoutClient := &http.Client{Timeout: reqTimeout}
|
||||
j.Address = address
|
||||
Logf("Found address %v for ingress %v", j.Address, j.Ingress.Name)
|
||||
timeoutClient := &http.Client{Timeout: IngressReqTimeout}
|
||||
|
||||
// Check that all rules respond to a simple GET.
|
||||
for _, rules := range j.ing.Spec.Rules {
|
||||
for _, rules := range j.Ingress.Spec.Rules {
|
||||
proto := "http"
|
||||
if len(j.ing.Spec.TLS) > 0 {
|
||||
knownHosts := sets.NewString(j.ing.Spec.TLS[0].Hosts...)
|
||||
if len(j.Ingress.Spec.TLS) > 0 {
|
||||
knownHosts := sets.NewString(j.Ingress.Spec.TLS[0].Hosts...)
|
||||
if knownHosts.Has(rules.Host) {
|
||||
timeoutClient.Transport, err = buildTransport(rules.Host, j.getRootCA(j.ing.Spec.TLS[0].SecretName))
|
||||
framework.ExpectNoError(err)
|
||||
timeoutClient.Transport, err = buildTransportWithCA(rules.Host, j.GetRootCA(j.Ingress.Spec.TLS[0].SecretName))
|
||||
ExpectNoError(err)
|
||||
proto = "https"
|
||||
}
|
||||
}
|
||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||
if waitForNodePort {
|
||||
j.curlServiceNodePort(j.ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
|
||||
j.pollServiceNodePort(j.Ingress.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
|
||||
}
|
||||
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
|
||||
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
|
||||
framework.ExpectNoError(framework.PollURL(route, rules.Host, framework.LoadBalancerPollTimeout, j.pollInterval, timeoutClient, false))
|
||||
Logf("Testing route %v host %v with simple GET", route, rules.Host)
|
||||
ExpectNoError(PollURL(route, rules.Host, LoadBalancerPollTimeout, j.PollInterval, timeoutClient, false))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifyURL polls for the given iterations, in intervals, and fails if the
|
||||
// VerifyURL polls for the given iterations, in intervals, and fails if the
|
||||
// given url returns a non-healthy http code even once.
|
||||
func (j *testJig) verifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error {
|
||||
func (j *IngressTestJig) VerifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error {
|
||||
for i := 0; i < iterations; i++ {
|
||||
b, err := framework.SimpleGET(httpClient, route, host)
|
||||
b, err := SimpleGET(httpClient, route, host)
|
||||
if err != nil {
|
||||
framework.Logf(b)
|
||||
Logf(b)
|
||||
return err
|
||||
}
|
||||
framework.Logf("Verfied %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
||||
Logf("Verfied %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *testJig) curlServiceNodePort(ns, name string, port int) {
|
||||
func (j *IngressTestJig) pollServiceNodePort(ns, name string, port int) {
|
||||
// TODO: Curl all nodes?
|
||||
u, err := framework.GetNodePortURL(j.client, ns, name, port)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.PollURL(u, "", 30*time.Second, j.pollInterval, &http.Client{Timeout: reqTimeout}, false))
|
||||
u, err := GetNodePortURL(j.Client, ns, name, port)
|
||||
ExpectNoError(err)
|
||||
ExpectNoError(PollURL(u, "", 30*time.Second, j.PollInterval, &http.Client{Timeout: IngressReqTimeout}, false))
|
||||
}
|
||||
|
||||
// getIngressNodePorts returns all related backend services' nodePorts.
|
||||
// GetIngressNodePorts returns all related backend services' nodePorts.
|
||||
// Current GCE ingress controller allows traffic to the default HTTP backend
|
||||
// by default, so retrieve its nodePort as well.
|
||||
func (j *testJig) getIngressNodePorts() []string {
|
||||
func (j *IngressTestJig) GetIngressNodePorts() []string {
|
||||
nodePorts := []string{}
|
||||
defaultSvc, err := j.client.Core().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||
defaultSvc, err := j.Client.Core().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(defaultSvc.Spec.Ports[0].NodePort)))
|
||||
|
||||
backendSvcs := []string{}
|
||||
if j.ing.Spec.Backend != nil {
|
||||
backendSvcs = append(backendSvcs, j.ing.Spec.Backend.ServiceName)
|
||||
if j.Ingress.Spec.Backend != nil {
|
||||
backendSvcs = append(backendSvcs, j.Ingress.Spec.Backend.ServiceName)
|
||||
}
|
||||
for _, rule := range j.ing.Spec.Rules {
|
||||
for _, rule := range j.Ingress.Spec.Rules {
|
||||
for _, ingPath := range rule.HTTP.Paths {
|
||||
backendSvcs = append(backendSvcs, ingPath.Backend.ServiceName)
|
||||
}
|
||||
}
|
||||
for _, svcName := range backendSvcs {
|
||||
svc, err := j.client.Core().Services(j.ing.Namespace).Get(svcName, metav1.GetOptions{})
|
||||
svc, err := j.Client.Core().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
}
|
||||
return nodePorts
|
||||
}
|
||||
|
||||
// constructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
||||
func (j *testJig) constructFirewallForIngress(gceController *GCEIngressController) *compute.Firewall {
|
||||
nodeTags := framework.GetNodeTags(j.client, gceController.cloud)
|
||||
nodePorts := j.getIngressNodePorts()
|
||||
// ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
||||
func (j *IngressTestJig) ConstructFirewallForIngress(gceController *GCEIngressController) *compute.Firewall {
|
||||
nodeTags := GetNodeTags(j.Client, gceController.Cloud)
|
||||
nodePorts := j.GetIngressNodePorts()
|
||||
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = gceController.getFirewallRuleName()
|
||||
fw.Name = gceController.GetFirewallRuleName()
|
||||
fw.SourceRanges = []string{GCEL7SrcRange}
|
||||
fw.TargetTags = nodeTags.Items
|
||||
fw.Allowed = []*compute.FirewallAllowed{
|
||||
@ -933,23 +967,23 @@ func (j *testJig) constructFirewallForIngress(gceController *GCEIngressControlle
|
||||
return &fw
|
||||
}
|
||||
|
||||
// ingFromManifest reads a .json/yaml file and returns the rc in it.
|
||||
func ingFromManifest(fileName string) *extensions.Ingress {
|
||||
// createIngressFromManifest reads a .json/yaml file and returns the rc in it.
|
||||
func createIngressFromManifest(fileName string) *extensions.Ingress {
|
||||
var ing extensions.Ingress
|
||||
framework.Logf("Parsing ingress from %v", fileName)
|
||||
Logf("Parsing ingress from %v", fileName)
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
framework.ExpectNoError(err)
|
||||
ExpectNoError(err)
|
||||
|
||||
json, err := utilyaml.ToJSON(data)
|
||||
framework.ExpectNoError(err)
|
||||
ExpectNoError(err)
|
||||
|
||||
framework.ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
|
||||
ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
|
||||
return &ing
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) getL7AddonUID() (string, error) {
|
||||
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
|
||||
cm, err := cont.c.Core().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
|
||||
Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
|
||||
cm, err := cont.Client.Core().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -959,64 +993,54 @@ func (cont *GCEIngressController) getL7AddonUID() (string, error) {
|
||||
return "", fmt.Errorf("Could not find cluster UID for L7 addon pod")
|
||||
}
|
||||
|
||||
func exists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
framework.Failf("Failed to os.Stat path %v", path)
|
||||
return false
|
||||
}
|
||||
|
||||
// GCEIngressController manages implementation details of Ingress on GCE/GKE.
|
||||
type GCEIngressController struct {
|
||||
ns string
|
||||
Ns string
|
||||
rcPath string
|
||||
UID string
|
||||
staticIPName string
|
||||
rc *v1.ReplicationController
|
||||
svc *v1.Service
|
||||
c clientset.Interface
|
||||
cloud framework.CloudConfig
|
||||
Client clientset.Interface
|
||||
Cloud CloudConfig
|
||||
}
|
||||
|
||||
func newTestJig(c clientset.Interface) *testJig {
|
||||
return &testJig{client: c, rootCAs: map[string][]byte{}, pollInterval: framework.LoadBalancerPollInterval}
|
||||
// NewIngressTestJig instantiates struct with client
|
||||
func NewIngressTestJig(c clientset.Interface) *IngressTestJig {
|
||||
return &IngressTestJig{Client: c, RootCAs: map[string][]byte{}, PollInterval: LoadBalancerPollInterval}
|
||||
}
|
||||
|
||||
// NginxIngressController manages implementation details of Ingress on Nginx.
|
||||
type NginxIngressController struct {
|
||||
ns string
|
||||
Ns string
|
||||
rc *v1.ReplicationController
|
||||
pod *v1.Pod
|
||||
c clientset.Interface
|
||||
Client clientset.Interface
|
||||
externalIP string
|
||||
}
|
||||
|
||||
func (cont *NginxIngressController) init() {
|
||||
// Init initializes the NginxIngressController
|
||||
func (cont *NginxIngressController) Init() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, ingressManifestPath, "nginx", file)
|
||||
return filepath.Join(TestContext.RepoRoot, IngressManifestPath, "nginx", file)
|
||||
}
|
||||
framework.Logf("initializing nginx ingress controller")
|
||||
framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns))
|
||||
Logf("initializing nginx ingress controller")
|
||||
RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.Ns))
|
||||
|
||||
rc, err := cont.c.Core().ReplicationControllers(cont.ns).Get("nginx-ingress-controller", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
rc, err := cont.Client.Core().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
cont.rc = rc
|
||||
|
||||
framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
|
||||
Logf("waiting for pods with label %v", rc.Spec.Selector)
|
||||
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
|
||||
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
|
||||
pods, err := cont.c.Core().Pods(cont.ns).List(metav1.ListOptions{LabelSelector: sel.String()})
|
||||
framework.ExpectNoError(err)
|
||||
ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel))
|
||||
pods, err := cont.Client.Core().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
|
||||
ExpectNoError(err)
|
||||
if len(pods.Items) == 0 {
|
||||
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
|
||||
Failf("Failed to find nginx ingress controller pods with selector %v", sel)
|
||||
}
|
||||
cont.pod = &pods.Items[0]
|
||||
cont.externalIP, err = framework.GetHostExternalAddress(cont.c, cont.pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
|
||||
cont.externalIP, err = GetHostExternalAddress(cont.Client, cont.pod)
|
||||
ExpectNoError(err)
|
||||
Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
|
||||
}
|
@ -30,48 +30,26 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// parent path to yaml test manifests.
|
||||
ingressManifestPath = "test/e2e/testing-manifests/ingress"
|
||||
|
||||
// timeout on a single http request.
|
||||
reqTimeout = 10 * time.Second
|
||||
|
||||
// healthz port used to verify glbc restarted correctly on the master.
|
||||
glbcHealthzPort = 8086
|
||||
|
||||
// General cloud resource poll timeout (eg: create static ip, firewall etc)
|
||||
cloudResourcePollTimeout = 5 * time.Minute
|
||||
|
||||
// Name of the config-map and key the ingress controller stores its uid in.
|
||||
uidConfigMap = "ingress-uid"
|
||||
uidKey = "uid"
|
||||
|
||||
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
defer GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
jig *testJig
|
||||
conformanceTests []conformanceTests
|
||||
jig *framework.IngressTestJig
|
||||
conformanceTests []framework.IngressConformanceTests
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
|
||||
BeforeEach(func() {
|
||||
f.BeforeEach()
|
||||
jig = newTestJig(f.ClientSet)
|
||||
jig = framework.NewIngressTestJig(f.ClientSet)
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
framework.BindClusterRole(jig.client.Rbac(), "cluster-admin", f.Namespace.Name,
|
||||
framework.BindClusterRole(jig.Client.Rbac(), "cluster-admin", f.Namespace.Name,
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(jig.client.AuthorizationV1beta1(),
|
||||
err := framework.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
"", "create", schema.GroupResource{Resource: "pods"}, true)
|
||||
framework.ExpectNoError(err)
|
||||
@ -85,18 +63,18 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
|
||||
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
|
||||
framework.KubeDescribe("GCE [Slow] [Feature:Ingress]", func() {
|
||||
var gceController *GCEIngressController
|
||||
var gceController *framework.GCEIngressController
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing gce controller")
|
||||
gceController = &GCEIngressController{
|
||||
ns: ns,
|
||||
c: jig.client,
|
||||
cloud: framework.TestContext.CloudConfig,
|
||||
gceController = &framework.GCEIngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
gceController.init()
|
||||
gceController.Init()
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
@ -104,51 +82,51 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.ing == nil {
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
jig.deleteIngress()
|
||||
jig.DeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
cleanupGCE(gceController)
|
||||
framework.CleanupGCEIngressController(gceController)
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
conformanceTests = createComformanceTests(jig, ns)
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns)
|
||||
for _, t := range conformanceTests {
|
||||
By(t.entryLog)
|
||||
t.execute()
|
||||
By(t.exitLog)
|
||||
jig.waitForIngress(true)
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
jig.WaitForIngress(true)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create ingress with given static-ip", func() {
|
||||
// ip released when the rest of lb resources are deleted in cleanupGCE
|
||||
ip := gceController.createStaticIP(ns)
|
||||
// ip released when the rest of lb resources are deleted in CleanupGCEIngressController
|
||||
ip := gceController.CreateStaticIP(ns)
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns, ip))
|
||||
|
||||
jig.createIngress(filepath.Join(ingressManifestPath, "static-ip"), ns, map[string]string{
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns, map[string]string{
|
||||
"kubernetes.io/ingress.global-static-ip-name": ns,
|
||||
"kubernetes.io/ingress.allow-http": "false",
|
||||
})
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
httpClient := buildInsecureClient(reqTimeout)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, false))
|
||||
httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
|
||||
|
||||
By("should reject HTTP traffic")
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, true))
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
|
||||
|
||||
By("should have correct firewall rule for ingress")
|
||||
fw := gceController.getFirewallRule()
|
||||
expFw := jig.constructFirewallForIngress(gceController)
|
||||
fw := gceController.GetFirewallRule()
|
||||
expFw := jig.ConstructFirewallForIngress(gceController)
|
||||
// Passed the last argument as `true` to verify the backend ports is a subset
|
||||
// of the allowed ports in firewall rule, given there may be other existing
|
||||
// ingress resources and backends we are not aware of.
|
||||
Expect(framework.VerifyFirewallRule(fw, expFw, gceController.cloud.Network, true)).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred())
|
||||
|
||||
// TODO: uncomment the restart test once we have a way to synchronize
|
||||
// and know that the controller has resumed watching. If we delete
|
||||
@ -167,51 +145,51 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
|
||||
// Time: borderline 5m, slow by design
|
||||
framework.KubeDescribe("Nginx", func() {
|
||||
var nginxController *NginxIngressController
|
||||
var nginxController *framework.NginxIngressController
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing nginx controller")
|
||||
jig.class = "nginx"
|
||||
nginxController = &NginxIngressController{ns: ns, c: jig.client}
|
||||
jig.Class = "nginx"
|
||||
nginxController = &framework.NginxIngressController{Ns: ns, Client: jig.Client}
|
||||
|
||||
// TODO: This test may fail on other platforms. We can simply skip it
|
||||
// but we want to allow easy testing where a user might've hand
|
||||
// configured firewalls.
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
framework.ExpectNoError(gcloudCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
framework.ExpectNoError(framework.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
} else {
|
||||
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
|
||||
}
|
||||
|
||||
nginxController.init()
|
||||
nginxController.Init()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
framework.ExpectNoError(gcloudDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
framework.ExpectNoError(framework.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
}
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.ing == nil {
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
jig.deleteIngress()
|
||||
jig.DeleteIngress()
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
// Poll more frequently to reduce e2e completion time.
|
||||
// This test runs in presubmit.
|
||||
jig.pollInterval = 5 * time.Second
|
||||
conformanceTests = createComformanceTests(jig, ns)
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns)
|
||||
for _, t := range conformanceTests {
|
||||
By(t.entryLog)
|
||||
t.execute()
|
||||
By(t.exitLog)
|
||||
jig.waitForIngress(false)
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
jig.WaitForIngress(false)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -14,6 +14,7 @@ go_library(
|
||||
"daemonsets.go",
|
||||
"deployments.go",
|
||||
"horizontal_pod_autoscalers.go",
|
||||
"ingress.go",
|
||||
"job.go",
|
||||
"persistent_volumes.go",
|
||||
"secrets.go",
|
||||
|
122
test/e2e/upgrades/ingress.go
Normal file
122
test/e2e/upgrades/ingress.go
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package upgrades
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// IngressUpgradeTest adapts the Ingress e2e for upgrade testing
|
||||
type IngressUpgradeTest struct {
|
||||
gceController *framework.GCEIngressController
|
||||
jig *framework.IngressTestJig
|
||||
httpClient *http.Client
|
||||
ip string
|
||||
}
|
||||
|
||||
// Setup creates a GLBC, allocates an ip, and an ingress resource,
|
||||
// then waits for a successful connectivity check to the ip.
|
||||
func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
// jig handles all Kubernetes testing logic
|
||||
jig := framework.NewIngressTestJig(f.ClientSet)
|
||||
|
||||
ns, err := f.CreateNamespace("ingress-upgrade", nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// gceController handles all cloud testing logic
|
||||
gceController := &framework.GCEIngressController{
|
||||
Ns: ns.Name,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
gceController.Init()
|
||||
|
||||
t.gceController = gceController
|
||||
t.jig = jig
|
||||
t.httpClient = framework.BuildInsecureClient(framework.IngressReqTimeout)
|
||||
|
||||
// Allocate a static-ip for the Ingress, this IP is cleaned up via CleanupGCEIngressController
|
||||
t.ip = t.gceController.CreateStaticIP(ns.Name)
|
||||
|
||||
// Create a working basic Ingress
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns.Name, t.ip))
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns.Name, map[string]string{
|
||||
"kubernetes.io/ingress.global-static-ip-name": ns.Name,
|
||||
"kubernetes.io/ingress.allow-http": "false",
|
||||
})
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + t.ip)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, t.httpClient, false))
|
||||
}
|
||||
|
||||
// Test waits for the upgrade to complete, and then verifies
|
||||
// with a connectvity check to the loadbalancer ip.
|
||||
func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
switch upgrade {
|
||||
case MasterUpgrade:
|
||||
// Restarting the ingress controller shouldn't disrupt a steady state
|
||||
// Ingress. Restarting the ingress controller and deleting ingresses
|
||||
// while it's down will leak cloud resources, because the ingress
|
||||
// controller doesn't checkpoint to disk.
|
||||
t.verify(f, done, true)
|
||||
default:
|
||||
// Currently ingress gets disrupted across node upgrade, because endpoints
|
||||
// get killed and we don't have any guarantees that 2 nodes don't overlap
|
||||
// their upgrades (even on cloud platforms like GCE, because VM level
|
||||
// rolling upgrades are not Kubernetes aware).
|
||||
t.verify(f, done, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(t.gceController.Ns)
|
||||
}
|
||||
if t.jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
t.jig.DeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
framework.CleanupGCEIngressController(t.gceController)
|
||||
}
|
||||
|
||||
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
|
||||
if testDuringDisruption {
|
||||
By("continuously hitting the Ingress IP")
|
||||
wait.Until(func() {
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
}, t.jig.PollInterval, done)
|
||||
} else {
|
||||
By("waiting for upgrade to finish without checking if Ingress remains up")
|
||||
<-done
|
||||
}
|
||||
By("hitting the Ingress IP " + t.ip)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
}
|
Loading…
Reference in New Issue
Block a user