mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 07:57:35 +00:00
Merge pull request #96216 from knight42/refactor/disable-insecure-port-in-ctrler-mgr
refactor: disable insecure serving in controller-manager
This commit is contained in:
@@ -46,6 +46,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2ereporters "k8s.io/kubernetes/test/e2e/reporters"
|
||||
@@ -307,6 +308,11 @@ func setupSuite() {
|
||||
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
|
||||
go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh)
|
||||
}
|
||||
|
||||
err = metrics.SetupMetricsProxy(c)
|
||||
if err != nil {
|
||||
framework.Logf("Fail to setup metrics proxy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// logClusterImageSources writes out cluster image sources.
|
||||
|
||||
@@ -27,20 +27,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// insecureSchedulerPort is the default port for the scheduler status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeSchedulerPort instead.
|
||||
insecureSchedulerPort = 10251
|
||||
// insecureKubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeControllerManagerPort instead.
|
||||
insecureKubeControllerManagerPort = 10252
|
||||
// kubeSchedulerPort is the default port for the scheduler status server.
|
||||
kubeSchedulerPort = 10259
|
||||
// kubeControllerManagerPort is the default port for the controller manager status server.
|
||||
kubeControllerManagerPort = 10257
|
||||
metricsProxyPod = "metrics-proxy"
|
||||
)
|
||||
|
||||
// Collection is metrics collection of components
|
||||
@@ -152,7 +149,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
|
||||
if g.kubeScheduler == "" {
|
||||
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler pod is not registered. Skipping Scheduler's metrics gathering")
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, g.kubeScheduler, metav1.NamespaceSystem, insecureSchedulerPort)
|
||||
output, err := g.getMetricsFromPod(g.client, metricsProxyPod, metav1.NamespaceSystem, kubeSchedulerPort)
|
||||
if err != nil {
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
@@ -196,7 +193,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
|
||||
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
|
||||
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, metricsProxyPod, metav1.NamespaceSystem, kubeControllerManagerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
@@ -207,7 +204,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
||||
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
|
||||
output, err := g.getMetricsFromPod(g.client, metricsProxyPod, metav1.NamespaceSystem, kubeControllerManagerPort)
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
@@ -286,7 +283,7 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string,
|
||||
Namespace(namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", podName, port)).
|
||||
Name(fmt.Sprintf("%s:%d", podName, port)).
|
||||
Suffix("metrics").
|
||||
Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
|
||||
199
test/e2e/framework/metrics/metrics_proxy.go
Normal file
199
test/e2e/framework/metrics/metrics_proxy.go
Normal file
@@ -0,0 +1,199 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type componentInfo struct {
|
||||
Port int
|
||||
IP string
|
||||
}
|
||||
|
||||
// SetupMetricsProxy creates a nginx Pod to expose metrics from the secure port of kube-scheduler and kube-controller-manager in tests.
|
||||
func SetupMetricsProxy(c clientset.Interface) error {
|
||||
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var infos []componentInfo
|
||||
for _, pod := range podList.Items {
|
||||
switch {
|
||||
case strings.HasPrefix(pod.Name, "kube-scheduler-"):
|
||||
infos = append(infos, componentInfo{
|
||||
Port: kubeSchedulerPort,
|
||||
IP: pod.Status.PodIP,
|
||||
})
|
||||
case strings.HasPrefix(pod.Name, "kube-controller-manager-"):
|
||||
infos = append(infos, componentInfo{
|
||||
Port: kubeControllerManagerPort,
|
||||
IP: pod.Status.PodIP,
|
||||
})
|
||||
}
|
||||
if len(infos) == 2 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(infos) == 0 {
|
||||
klog.Warningf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
|
||||
return nil
|
||||
}
|
||||
|
||||
const name = metricsProxyPod
|
||||
_, err = c.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create serviceAccount: %w", err)
|
||||
}
|
||||
_, err = c.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
NonResourceURLs: []string{"/metrics"},
|
||||
Verbs: []string{"get"},
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create clusterRole: %w", err)
|
||||
}
|
||||
_, err = c.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: name,
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create clusterRoleBinding: %w", err)
|
||||
}
|
||||
|
||||
var token string
|
||||
err = wait.PollImmediate(time.Second*5, time.Minute*5, func() (done bool, err error) {
|
||||
sa, err := c.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Warningf("Fail to get serviceAccount %s: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
if len(sa.Secrets) < 1 {
|
||||
klog.Warningf("No secret found in serviceAccount %s", name)
|
||||
return false, nil
|
||||
}
|
||||
secretRef := sa.Secrets[0]
|
||||
secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Warningf("Fail to get secret %s", secretRef.Name)
|
||||
return false, nil
|
||||
}
|
||||
token = string(secret.Data["token"])
|
||||
if len(token) == 0 {
|
||||
klog.Warningf("Token in secret %s is empty", secretRef.Name)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nginxConfig string
|
||||
for _, info := range infos {
|
||||
nginxConfig += fmt.Sprintf(`
|
||||
server {
|
||||
listen %d;
|
||||
server_name _;
|
||||
proxy_set_header Authorization "Bearer %s";
|
||||
proxy_ssl_verify off;
|
||||
location /metrics {
|
||||
proxy_pass https://%s:%d;
|
||||
}
|
||||
}
|
||||
`, info.Port, token, info.IP, info.Port)
|
||||
}
|
||||
_, err = c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"metrics.conf": nginxConfig,
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create nginx configmap: %w", err)
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: "config",
|
||||
MountPath: "/etc/nginx/conf.d",
|
||||
ReadOnly: true,
|
||||
}},
|
||||
}},
|
||||
Volumes: []v1.Volume{{
|
||||
Name: "config",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
_, err = c.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(c, name, metav1.NamespaceSystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Info("Successfully setup metrics-proxy")
|
||||
return nil
|
||||
}
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@@ -22,10 +22,6 @@ const (
|
||||
// KubeletPort is the default port for the kubelet server on each host machine.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeletPort = 10250
|
||||
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeControllerManagerPort instead.
|
||||
InsecureKubeControllerManagerPort = 10252
|
||||
// KubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeControllerManagerPort = 10257
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
||||
@@ -22,15 +22,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
"k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/cloud-provider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
cloudctrlmgrtesting "k8s.io/cloud-provider/app/testing"
|
||||
"k8s.io/cloud-provider/fake"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
@@ -159,17 +159,23 @@ users:
|
||||
brokenApiserverConfig.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tester componentTester
|
||||
extraFlags []string
|
||||
name string
|
||||
tester componentTester
|
||||
extraFlags []string
|
||||
insecureDisabled bool
|
||||
}{
|
||||
{"kube-controller-manager", kubeControllerManagerTester{}, nil},
|
||||
{"cloud-controller-manager", cloudControllerManagerTester{}, []string{"--cloud-provider=fake"}},
|
||||
{"kube-scheduler", kubeSchedulerTester{}, nil},
|
||||
{"kube-controller-manager", kubeControllerManagerTester{}, nil, true},
|
||||
{"cloud-controller-manager", cloudControllerManagerTester{}, []string{"--cloud-provider=fake"}, false},
|
||||
{"kube-scheduler", kubeSchedulerTester{}, nil, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testComponent(t, tt.tester, apiserverConfig.Name(), brokenApiserverConfig.Name(), token, tt.extraFlags)
|
||||
if tt.insecureDisabled {
|
||||
testComponentWithSecureServing(t, tt.tester, apiserverConfig.Name(), brokenApiserverConfig.Name(), token, tt.extraFlags)
|
||||
} else {
|
||||
testComponent(t, tt.tester, apiserverConfig.Name(), brokenApiserverConfig.Name(), token, tt.extraFlags)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -215,7 +221,7 @@ func testComponent(t *testing.T, tester componentTester, kubeconfig, brokenKubec
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK), nil},
|
||||
{"authorization skipped for /healthz with BROKEN authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-skip-lookup", // to survive unaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-skip-lookup", // to survive inaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
@@ -237,9 +243,9 @@ func testComponent(t *testing.T, tester componentTester, kubeconfig, brokenKubec
|
||||
}, "/metrics", false, false, intPtr(http.StatusInternalServerError), intPtr(http.StatusOK)},
|
||||
{"always-allowed /metrics with BROKEN authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-skip-lookup", // to survive unaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--authentication-skip-lookup", // to survive inaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-always-allow-paths", "/healthz,/metrics",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
@@ -322,6 +328,111 @@ func testComponent(t *testing.T, tester componentTester, kubeconfig, brokenKubec
|
||||
}
|
||||
}
|
||||
|
||||
func testComponentWithSecureServing(t *testing.T, tester componentTester, kubeconfig, brokenKubeconfig, token string, extraFlags []string) {
|
||||
tests := []struct {
|
||||
name string
|
||||
flags []string
|
||||
path string
|
||||
anonymous bool // to use the token or not
|
||||
wantErr bool
|
||||
wantSecureCode *int
|
||||
}{
|
||||
{"no-flags", nil, "/healthz", false, true, nil},
|
||||
{"/healthz without authn/authz", []string{
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", true, false, intPtr(http.StatusOK)},
|
||||
{"/metrics without authn/authz", []string{
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", true, false, intPtr(http.StatusForbidden)},
|
||||
{"authorization skipped for /healthz with authn/authz", []string{
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK)},
|
||||
{"authorization skipped for /healthz with BROKEN authn/authz", []string{
|
||||
"--authentication-skip-lookup", // to survive inaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK)},
|
||||
{"not authorized /metrics with BROKEN authn/authz", []string{
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusInternalServerError)},
|
||||
{"always-allowed /metrics with BROKEN authn/authz", []string{
|
||||
"--authentication-skip-lookup", // to survive inaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-always-allow-paths", "/healthz,/metrics",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusOK)},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
secureOptions, secureInfo, _, tearDownFn, err := tester.StartTestServer(t, append(append([]string{}, tt.flags...), extraFlags...))
|
||||
if tearDownFn != nil {
|
||||
defer tearDownFn()
|
||||
}
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Fatalf("StartTestServer() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if want, got := tt.wantSecureCode != nil, secureInfo != nil; want != got {
|
||||
t.Errorf("SecureServing enabled: expected=%v got=%v", want, got)
|
||||
} else if want {
|
||||
url := fmt.Sprintf("https://%s%s", secureInfo.Listener.Addr().String(), tt.path)
|
||||
url = strings.Replace(url, "[::]", "127.0.0.1", -1) // switch to IPv4 because the self-signed cert does not support [::]
|
||||
|
||||
// read self-signed server cert disk
|
||||
pool := x509.NewCertPool()
|
||||
serverCertPath := path.Join(secureOptions.ServerCert.CertDirectory, secureOptions.ServerCert.PairName+".crt")
|
||||
serverCert, err := ioutil.ReadFile(serverCertPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read component server cert %q: %v", serverCertPath, err)
|
||||
}
|
||||
pool.AppendCertsFromPEM(serverCert)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
},
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !tt.anonymous {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Token %s", token))
|
||||
}
|
||||
r, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to GET %s from component: %v", tt.path, err)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read response body: %v", err)
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if got, expected := r.StatusCode, *tt.wantSecureCode; got != expected {
|
||||
t.Fatalf("expected http %d at %s of component, got: %d %q", expected, tt.path, got, string(body))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func intPtr(x int) *int {
|
||||
return &x
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user