mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Merge pull request #24391 from bprashanth/ing_templated_controller
Automatic merge from submit-queue Template the ingress controller We still need https://github.com/kubernetes/contrib/pull/791 to run the controller as a static pod ref https://github.com/kubernetes/kubernetes/issues/23663
This commit is contained in:
commit
2b46c4b7e2
@ -1,3 +1,8 @@
|
|||||||
|
{% set kube_uid = "" -%}
|
||||||
|
{% if pillar['kube_uid'] is defined -%}
|
||||||
|
{% set kube_uid = pillar['kube_uid'] %}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
@ -65,4 +70,5 @@ spec:
|
|||||||
memory: 50Mi
|
memory: 50Mi
|
||||||
args:
|
args:
|
||||||
- --default-backend-service=kube-system/default-http-backend
|
- --default-backend-service=kube-system/default-http-backend
|
||||||
- --sync-period=300s
|
- --sync-period=60s
|
||||||
|
- --cluster-uid={{kube_uid}}
|
@ -202,6 +202,16 @@ function gen-kube-bearertoken() {
|
|||||||
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Generate uid
|
||||||
|
# This function only works on systems with python. It generates a time based
|
||||||
|
# UID instead of a UUID because GCE has a name length limit.
|
||||||
|
#
|
||||||
|
# Vars set:
|
||||||
|
# KUBE_UID
|
||||||
|
function gen-uid {
|
||||||
|
KUBE_UID=$(python -c 'import uuid; print uuid.uuid1().fields[0]')
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
function load-or-gen-kube-basicauth() {
|
function load-or-gen-kube-basicauth() {
|
||||||
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
|
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
|
||||||
@ -456,6 +466,7 @@ function build-kube-env {
|
|||||||
local file=$2
|
local file=$2
|
||||||
|
|
||||||
build-runtime-config
|
build-runtime-config
|
||||||
|
gen-uid
|
||||||
|
|
||||||
rm -f ${file}
|
rm -f ${file}
|
||||||
cat >$file <<EOF
|
cat >$file <<EOF
|
||||||
@ -503,6 +514,7 @@ KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
|
|||||||
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
|
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
|
||||||
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
|
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
|
||||||
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
|
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
|
||||||
|
KUBE_UID: $(yaml-quote ${KUBE_UID:-})
|
||||||
EOF
|
EOF
|
||||||
if [ -n "${KUBELET_PORT:-}" ]; then
|
if [ -n "${KUBELET_PORT:-}" ]; then
|
||||||
cat >>$file <<EOF
|
cat >>$file <<EOF
|
||||||
|
@ -449,6 +449,7 @@ manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")'
|
|||||||
manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")'
|
manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")'
|
||||||
num_nodes: $(echo "${NUM_NODES}" | sed -e "s/'/''/g")
|
num_nodes: $(echo "${NUM_NODES}" | sed -e "s/'/''/g")
|
||||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||||
|
kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
|
||||||
EOF
|
EOF
|
||||||
if [ -n "${KUBELET_PORT:-}" ]; then
|
if [ -n "${KUBELET_PORT:-}" ]; then
|
||||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||||
|
@ -35,6 +35,7 @@ addon-dir-create:
|
|||||||
file.recurse:
|
file.recurse:
|
||||||
- source: salt://kube-addons/cluster-loadbalancing/glbc
|
- source: salt://kube-addons/cluster-loadbalancing/glbc
|
||||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||||
|
- template: jinja
|
||||||
- user: root
|
- user: root
|
||||||
- group: root
|
- group: root
|
||||||
- dir_mode: 755
|
- dir_mode: 755
|
||||||
|
@ -253,76 +253,49 @@ func kubectlLogLBController(c *client.Client, ns string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type IngressController struct {
|
type IngressController struct {
|
||||||
ns string
|
ns string
|
||||||
rcPath string
|
rcPath string
|
||||||
defaultSvcPath string
|
UID string
|
||||||
UID string
|
Project string
|
||||||
Project string
|
rc *api.ReplicationController
|
||||||
rc *api.ReplicationController
|
svc *api.Service
|
||||||
svc *api.Service
|
c *client.Client
|
||||||
c *client.Client
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cont *IngressController) create() {
|
func (cont *IngressController) getL7AddonUID() (string, error) {
|
||||||
|
|
||||||
// TODO: This cop out is because it would be *more* brittle to duplicate all
|
|
||||||
// the name construction logic from the controller cross-repo. We will not
|
|
||||||
// need to be so paranoid about leaked resources once we figure out a solution
|
|
||||||
// for issues like #16337. Currently, all names should fall within 63 chars.
|
|
||||||
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
|
|
||||||
if len(testName) > nameLenLimit {
|
|
||||||
framework.Failf("Cannot reliably test the given namespace(%v)/uid(%v), too close to GCE limit of %v",
|
|
||||||
cont.ns, cont.UID, nameLenLimit)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cont.defaultSvcPath != "" {
|
|
||||||
svc := svcFromManifest(cont.defaultSvcPath)
|
|
||||||
svc.Namespace = cont.ns
|
|
||||||
svc.Labels = controllerLabels
|
|
||||||
svc.Spec.Selector = controllerLabels
|
|
||||||
cont.svc = svc
|
|
||||||
_, err := cont.c.Services(cont.ns).Create(cont.svc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
rc := rcFromManifest(cont.rcPath)
|
|
||||||
|
|
||||||
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(clusterAddonLBLabels))}
|
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(clusterAddonLBLabels))}
|
||||||
existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts)
|
existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
if len(existingRCs.Items) != 1 {
|
if len(existingRCs.Items) != 1 {
|
||||||
framework.Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels)
|
return "", fmt.Errorf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels)
|
||||||
}
|
|
||||||
|
|
||||||
// Merge the existing spec and new spec. The modifications should not
|
|
||||||
// manifest as functional changes to the controller. Most importantly, the
|
|
||||||
// podTemplate shouldn't change (but for the additional test cmd line flags)
|
|
||||||
// to ensure we test actual cluster functionality across upgrades.
|
|
||||||
rc.Spec = existingRCs.Items[0].Spec
|
|
||||||
rc.Name = "glbc"
|
|
||||||
rc.Namespace = cont.ns
|
|
||||||
rc.Labels = controllerLabels
|
|
||||||
rc.Spec.Selector = controllerLabels
|
|
||||||
rc.Spec.Template.Labels = controllerLabels
|
|
||||||
rc.Spec.Replicas = 1
|
|
||||||
|
|
||||||
// These command line params are only recognized by v0.51 and above.
|
|
||||||
testArgs := []string{
|
|
||||||
// Pass namespace uid so the controller will tag resources with it.
|
|
||||||
fmt.Sprintf("--cluster-uid=%v", cont.UID),
|
|
||||||
// Tell the controller to delete all resources as it quits.
|
|
||||||
fmt.Sprintf("--delete-all-on-quit=true"),
|
|
||||||
// Don't use the default Service from kube-system.
|
|
||||||
fmt.Sprintf("--default-backend-service=%v/%v", cont.svc.Namespace, cont.svc.Name),
|
|
||||||
}
|
}
|
||||||
|
rc := existingRCs.Items[0]
|
||||||
|
commandPrefix := "--cluster-uid="
|
||||||
for i, c := range rc.Spec.Template.Spec.Containers {
|
for i, c := range rc.Spec.Template.Spec.Containers {
|
||||||
if c.Name == lbContainerName {
|
if c.Name == lbContainerName {
|
||||||
rc.Spec.Template.Spec.Containers[i].Args = append(c.Args, testArgs...)
|
for _, arg := range rc.Spec.Template.Spec.Containers[i].Args {
|
||||||
|
if strings.HasPrefix(arg, commandPrefix) {
|
||||||
|
return strings.Replace(arg, commandPrefix, "", -1), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cont.rc = rc
|
return "", fmt.Errorf("Could not find cluster UID for L7 addon pod")
|
||||||
_, err = cont.c.ReplicationControllers(cont.ns).Create(cont.rc)
|
}
|
||||||
|
|
||||||
|
func (cont *IngressController) init() {
|
||||||
|
uid, err := cont.getL7AddonUID()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(framework.WaitForRCPodsRunning(cont.c, cont.ns, cont.rc.Name)).NotTo(HaveOccurred())
|
cont.UID = uid
|
||||||
|
// There's a name limit imposed by GCE. The controller will truncate.
|
||||||
|
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
|
||||||
|
if len(testName) > nameLenLimit {
|
||||||
|
framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
|
||||||
|
} else {
|
||||||
|
framework.Logf("Deteced cluster UID %v", cont.UID)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cont *IngressController) Cleanup(del bool) error {
|
func (cont *IngressController) Cleanup(del bool) error {
|
||||||
@ -457,24 +430,14 @@ var _ = framework.KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]
|
|||||||
f.BeforeEach()
|
f.BeforeEach()
|
||||||
client = f.Client
|
client = f.Client
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
// Scaled down the existing Ingress controller so it doesn't interfere with the test.
|
|
||||||
Expect(framework.ScaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 0)).NotTo(HaveOccurred())
|
|
||||||
addonDir = filepath.Join(
|
addonDir = filepath.Join(
|
||||||
framework.TestContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc")
|
framework.TestContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc")
|
||||||
|
|
||||||
nsParts := strings.Split(ns, "-")
|
|
||||||
ingController = &IngressController{
|
ingController = &IngressController{
|
||||||
ns: ns,
|
ns: ns,
|
||||||
// The UID in the namespace was generated by the master, so it's
|
Project: framework.TestContext.CloudConfig.ProjectID,
|
||||||
// global to the cluster.
|
c: client,
|
||||||
UID: nsParts[len(nsParts)-1],
|
|
||||||
Project: framework.TestContext.CloudConfig.ProjectID,
|
|
||||||
rcPath: filepath.Join(addonDir, "glbc-controller.yaml"),
|
|
||||||
defaultSvcPath: filepath.Join(addonDir, "default-svc.yaml"),
|
|
||||||
c: client,
|
|
||||||
}
|
}
|
||||||
ingController.create()
|
ingController.init()
|
||||||
framework.Logf("Finished creating ingress controller")
|
|
||||||
// If we somehow get the same namespace uid as someone else in this
|
// If we somehow get the same namespace uid as someone else in this
|
||||||
// gce project, just back off.
|
// gce project, just back off.
|
||||||
Expect(ingController.Cleanup(false)).NotTo(HaveOccurred())
|
Expect(ingController.Cleanup(false)).NotTo(HaveOccurred())
|
||||||
@ -509,9 +472,6 @@ var _ = framework.KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
// TODO: Remove this once issue #17802 is fixed
|
|
||||||
Expect(framework.ScaleRCByLabels(client, ingController.rc.Namespace, ingController.rc.Labels, 0)).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// If the controller failed to cleanup the test will fail, but we want to cleanup
|
// If the controller failed to cleanup the test will fail, but we want to cleanup
|
||||||
// resources before that.
|
// resources before that.
|
||||||
if pollErr != nil {
|
if pollErr != nil {
|
||||||
@ -520,8 +480,6 @@ var _ = framework.KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]
|
|||||||
}
|
}
|
||||||
framework.Failf("Failed to cleanup GCE L7 resources.")
|
framework.Failf("Failed to cleanup GCE L7 resources.")
|
||||||
}
|
}
|
||||||
// Restore the cluster Addon.
|
|
||||||
Expect(framework.ScaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 1)).NotTo(HaveOccurred())
|
|
||||||
f.AfterEach()
|
f.AfterEach()
|
||||||
framework.Logf("Successfully verified GCE L7 loadbalancer via Ingress.")
|
framework.Logf("Successfully verified GCE L7 loadbalancer via Ingress.")
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user