refactor: migrate health checks of control-plane off insecure port in tests

Signed-off-by: knight42 <anonymousknight96@gmail.com>
This commit is contained in:
knight42 2020-10-31 02:09:50 +08:00
parent f78d095d52
commit 3c4d6859c8
No known key found for this signature in database
GPG Key ID: 61C5DB9CE28EED62
8 changed files with 44 additions and 23 deletions

View File

@ -107,12 +107,11 @@ EOF
kube::log::status "Starting controller-manager" kube::log::status "Starting controller-manager"
"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \ "${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \
--port="${CTLRMGR_PORT}" \
--kube-api-content-type="${KUBE_TEST_API_TYPE-}" \ --kube-api-content-type="${KUBE_TEST_API_TYPE-}" \
--kubeconfig="${config}" 1>&2 & --kubeconfig="${config}" 1>&2 &
export CTLRMGR_PID=$! export CTLRMGR_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager" kube::util::wait_for_url "https://127.0.0.1:${SECURE_CTLRMGR_PORT}/healthz" "controller-manager"
} }
# Creates a node object with name 127.0.0.1. This is required because we do not # Creates a node object with name 127.0.0.1. This is required because we do not

View File

@ -341,6 +341,7 @@ type componentStatusStorage struct {
func (s componentStatusStorage) serversToValidate() map[string]*componentstatus.Server { func (s componentStatusStorage) serversToValidate() map[string]*componentstatus.Server {
// this is fragile, which assumes that the default port is being used // this is fragile, which assumes that the default port is being used
// TODO: switch to secure port until these components remove the ability to serve insecurely.
serversToValidate := map[string]*componentstatus.Server{ serversToValidate := map[string]*componentstatus.Server{
"controller-manager": {Addr: "127.0.0.1", Port: ports.InsecureKubeControllerManagerPort, Path: "/healthz"}, "controller-manager": {Addr: "127.0.0.1", Port: ports.InsecureKubeControllerManagerPort, Path: "/healthz"},
"scheduler": {Addr: "127.0.0.1", Port: kubeschedulerconfig.DefaultInsecureSchedulerPort, Path: "/healthz"}, "scheduler": {Addr: "127.0.0.1", Port: kubeschedulerconfig.DefaultInsecureSchedulerPort, Path: "/healthz"},

View File

@ -62,7 +62,7 @@ ETCD_PORT=${ETCD_PORT:-2379}
SECURE_API_PORT=${SECURE_API_PORT:-6443} SECURE_API_PORT=${SECURE_API_PORT:-6443}
API_HOST=${API_HOST:-127.0.0.1} API_HOST=${API_HOST:-127.0.0.1}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248} KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252} SECURE_CTLRMGR_PORT=${SECURE_CTLRMGR_PORT:-10257}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9" IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"

View File

@ -72,10 +72,11 @@ type RestartDaemonConfig struct {
healthzPort int healthzPort int
pollInterval time.Duration pollInterval time.Duration
pollTimeout time.Duration pollTimeout time.Duration
enableHTTPS bool
} }
// NewRestartConfig creates a RestartDaemonConfig for the given node and daemon. // NewRestartConfig creates a RestartDaemonConfig for the given node and daemon.
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig { func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration, enableHTTPS bool) *RestartDaemonConfig {
if !framework.ProviderIs("gce") { if !framework.ProviderIs("gce") {
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider) framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
} }
@ -85,6 +86,7 @@ func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval
healthzPort: healthzPort, healthzPort: healthzPort,
pollInterval: pollInterval, pollInterval: pollInterval,
pollTimeout: pollTimeout, pollTimeout: pollTimeout,
enableHTTPS: enableHTTPS,
} }
} }
@ -99,8 +101,15 @@ func (r *RestartDaemonConfig) waitUp() {
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
nullDev = "NUL" nullDev = "NUL"
} }
healthzCheck := fmt.Sprintf( var healthzCheck string
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort) if r.enableHTTPS {
healthzCheck = fmt.Sprintf(
"curl -sk -o %v -I -w \"%%{http_code}\" https://localhost:%v/healthz", nullDev, r.healthzPort)
} else {
healthzCheck = fmt.Sprintf(
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)
}
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) { err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider) result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -263,7 +272,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Requires master ssh access. // Requires master ssh access.
e2eskipper.SkipUnlessProviderIs("gce", "aws") e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig( restarter := NewRestartConfig(
framework.APIAddress(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout) framework.APIAddress(), "kube-controller", ports.KubeControllerManagerPort, restartPollInterval, restartTimeout, true)
restarter.restart() restarter.restart()
// The intent is to ensure the replication controller manager has observed and reported status of // The intent is to ensure the replication controller manager has observed and reported status of
@ -294,7 +303,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Requires master ssh access. // Requires master ssh access.
e2eskipper.SkipUnlessProviderIs("gce", "aws") e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig( restarter := NewRestartConfig(
framework.APIAddress(), "kube-scheduler", kubeschedulerconfig.DefaultInsecureSchedulerPort, restartPollInterval, restartTimeout) framework.APIAddress(), "kube-scheduler", kubeschedulerconfig.DefaultKubeSchedulerPort, restartPollInterval, restartTimeout, true)
// Create pods while the scheduler is down and make sure the scheduler picks them up by // Create pods while the scheduler is down and make sure the scheduler picks them up by
// scaling the rc to the same size. // scaling the rc to the same size.
@ -319,7 +328,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
restarter := NewRestartConfig( restarter := NewRestartConfig(
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout) ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout, false)
restarter.restart() restarter.restart()
} }
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
@ -336,7 +345,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
restarter := NewRestartConfig( restarter := NewRestartConfig(
ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout) ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout, false)
// restart method will kill the kube-proxy process and wait for recovery, // restart method will kill the kube-proxy process and wait for recovery,
// if not able to recover, will throw test failure. // if not able to recover, will throw test failure.
restarter.restart() restarter.restart()

View File

@ -47,7 +47,7 @@ func RestartControllerManager() error {
// WaitForControllerManagerUp waits for the kube-controller-manager to be up. // WaitForControllerManagerUp waits for the kube-controller-manager to be up.
func WaitForControllerManagerUp() error { func WaitForControllerManagerUp() error {
cmd := "curl http://localhost:" + strconv.Itoa(framework.InsecureKubeControllerManagerPort) + "/healthz" cmd := "curl -k https://localhost:" + strconv.Itoa(framework.KubeControllerManagerPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {

View File

@ -18,6 +18,7 @@ package network
import ( import (
"context" "context"
"crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -805,6 +806,7 @@ type HTTPPokeParams struct {
ExpectCode int // default = 200 ExpectCode int // default = 200
BodyContains string BodyContains string
RetriableCodes []int RetriableCodes []int
EnableHTTPS bool
} }
// HTTPPokeResult is a struct for HTTP poke result. // HTTPPokeResult is a struct for HTTP poke result.
@ -851,8 +853,18 @@ const (
// The result body will be populated if the HTTP transaction was completed, even // The result body will be populated if the HTTP transaction was completed, even
// if the other test params make this a failure). // if the other test params make this a failure).
func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPokeResult { func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPokeResult {
// Set default params.
if params == nil {
params = &HTTPPokeParams{}
}
hostPort := net.JoinHostPort(host, strconv.Itoa(port)) hostPort := net.JoinHostPort(host, strconv.Itoa(port))
url := fmt.Sprintf("http://%s%s", hostPort, path) var url string
if params.EnableHTTPS {
url = fmt.Sprintf("https://%s%s", hostPort, path)
} else {
url = fmt.Sprintf("http://%s%s", hostPort, path)
}
ret := HTTPPokeResult{} ret := HTTPPokeResult{}
@ -867,10 +879,6 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
return ret return ret
} }
// Set default params.
if params == nil {
params = &HTTPPokeParams{}
}
if params.ExpectCode == 0 { if params.ExpectCode == 0 {
params.ExpectCode = http.StatusOK params.ExpectCode = http.StatusOK
} }
@ -937,6 +945,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
tr := utilnet.SetTransportDefaults(&http.Transport{ tr := utilnet.SetTransportDefaults(&http.Transport{
DisableKeepAlives: true, DisableKeepAlives: true,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}) })
client := &http.Client{ client := &http.Client{
Transport: tr, Transport: tr,

View File

@ -26,4 +26,7 @@ const (
// May be overridden by a flag at startup. // May be overridden by a flag at startup.
// Deprecated: use the secure KubeControllerManagerPort instead. // Deprecated: use the secure KubeControllerManagerPort instead.
InsecureKubeControllerManagerPort = 10252 InsecureKubeControllerManagerPort = 10252
// KubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
KubeControllerManagerPort = 10257
) )

View File

@ -218,17 +218,17 @@ var _ = SIGDescribe("Firewall rule", func() {
controlPlaneAddresses := framework.GetControlPlaneAddresses(cs) controlPlaneAddresses := framework.GetControlPlaneAddresses(cs)
for _, instanceAddress := range controlPlaneAddresses { for _, instanceAddress := range controlPlaneAddresses {
assertNotReachableHTTPTimeout(instanceAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout) assertNotReachableHTTPTimeout(instanceAddress, "/healthz", ports.KubeControllerManagerPort, firewallTestTCPTimeout, true)
assertNotReachableHTTPTimeout(instanceAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout) assertNotReachableHTTPTimeout(instanceAddress, "/healthz", kubeschedulerconfig.DefaultKubeSchedulerPort, firewallTestTCPTimeout, true)
} }
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletPort, firewallTestTCPTimeout) assertNotReachableHTTPTimeout(nodeAddr, "/", ports.KubeletPort, firewallTestTCPTimeout, false)
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletReadOnlyPort, firewallTestTCPTimeout) assertNotReachableHTTPTimeout(nodeAddr, "/", ports.KubeletReadOnlyPort, firewallTestTCPTimeout, false)
assertNotReachableHTTPTimeout(nodeAddr, ports.ProxyStatusPort, firewallTestTCPTimeout) assertNotReachableHTTPTimeout(nodeAddr, "/", ports.ProxyStatusPort, firewallTestTCPTimeout, false)
}) })
}) })
func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) { func assertNotReachableHTTPTimeout(ip, path string, port int, timeout time.Duration, enableHTTPS bool) {
result := e2enetwork.PokeHTTP(ip, port, "/", &e2enetwork.HTTPPokeParams{Timeout: timeout}) result := e2enetwork.PokeHTTP(ip, port, path, &e2enetwork.HTTPPokeParams{Timeout: timeout, EnableHTTPS: enableHTTPS})
if result.Status == e2enetwork.HTTPError { if result.Status == e2enetwork.HTTPError {
framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error)
} }