Update to use proxy subresource consistently

This commit is contained in:
Jordan Liggitt 2017-01-09 16:17:23 -05:00
parent dc1302c5ef
commit 88a876b1d0
No known key found for this signature in database
GPG Key ID: 24E7ADF9A3B42012
13 changed files with 81 additions and 33 deletions

View File

@ -29,9 +29,9 @@ type ServiceExpansion interface {
// ProxyGet returns a response of the service by calling it through the proxy.
func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
request := c.client.Get().
Prefix("proxy").
Namespace(c.ns).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort(scheme, name, port)).
Suffix(path)
for k, v := range params {

View File

@ -29,9 +29,9 @@ type ServiceExpansion interface {
// ProxyGet returns a response of the service by calling it through the proxy.
func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
request := c.client.Get().
Prefix("proxy").
Namespace(c.ns).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort(scheme, name, port)).
Suffix(path)
for k, v := range params {

View File

@ -35,7 +35,7 @@ import (
)
const (
baseHeapsterServiceAddress = "/api/v1/proxy/namespaces/kube-system/services/http:heapster:"
baseHeapsterServiceAddress = "/api/v1/namespaces/kube-system/services/http:heapster:/proxy"
baseMetricsAddress = baseHeapsterServiceAddress + "/apis/metrics"
metricsApiVersion = "v1alpha1"
)

View File

@ -100,9 +100,9 @@ func parseMetrics(data string, output *Metrics) error {
func (g *MetricsGrabber) getMetricsFromPod(podName string, namespace string, port int) (string, error) {
rawOutput, err := g.client.Core().RESTClient().Get().
Prefix("proxy").
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", podName, port)).
Suffix("metrics").
Do().Raw()

View File

@ -66,8 +66,8 @@ func (g *MetricsGrabber) getMetricsFromNode(nodeName string, kubeletPort int) (s
var rawOutput []byte
go func() {
rawOutput, err = g.client.Core().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Do().Raw()

View File

@ -22,7 +22,7 @@ import (
"k8s.io/apiserver/pkg/server/mux"
)
const dashboardPath = "/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard"
const dashboardPath = "/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy"
// UIRediect redirects /ui to the kube-ui proxy path.
type UIRedirect struct{}

View File

@ -25,7 +25,7 @@ import (
)
var (
influxdbHost = flag.String("ir-influxdb-host", "localhost:8080/api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api", "Address of InfluxDB which contains metrics required by InitialResources")
influxdbHost = flag.String("ir-influxdb-host", "localhost:8080/api/v1/namespaces/kube-system/services/monitoring-influxdb:api/proxy", "Address of InfluxDB which contains metrics required by InitialResources")
user = flag.String("ir-user", "root", "User used for connecting to InfluxDB")
// TODO: figure out how to better pass password here
password = flag.String("ir-password", "root", "Password used for connecting to InfluxDB")

View File

@ -140,9 +140,10 @@ func init() {
rbac.NewRule("get", "update").Groups(extensionsGroup).Resources("replicationcontrollers/scale").RuleOrDie(),
rbac.NewRule("get", "update").Groups(extensionsGroup).Resources("deployments/scale", "replicasets/scale").RuleOrDie(),
rbac.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(),
// TODO: fix MetricsClient to no longer require root proxy access
// TODO: restrict this to the appropriate namespace
// TODO: Remove the root /proxy permission in 1.7; MetricsClient no longer requires root proxy access as of 1.6 (fixed in https://github.com/kubernetes/kubernetes/pull/39636)
rbac.NewRule("proxy").Groups(legacyGroup).Resources("services").Names("https:heapster:", "http:heapster:").RuleOrDie(),
// TODO: restrict this to the appropriate namespace
rbac.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(),
eventsRule(),
},
})

View File

@ -419,6 +419,15 @@ items:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- 'http:heapster:'
- 'https:heapster:'
resources:
- services/proxy
verbs:
- get
- apiGroups:
- ""
resources:

View File

@ -284,7 +284,7 @@ func getContainerInfo(c clientset.Interface, nodeName string, req *kubeletstats.
if err != nil {
return nil, err
}
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
@ -407,7 +407,7 @@ func getOneTimeResourceUsageOnNode(
}
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}

View File

@ -327,6 +327,11 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
ExpectNoError(err)
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return result, err
}
var data string
var masterRegistered = false
for _, node := range nodes.Items {
@ -338,14 +343,26 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
rawData, err := c.Core().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(metav1.NamespaceSystem).
Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Suffix("metrics").
Do().Raw()
var rawData []byte
if subResourceProxyAvailable {
rawData, err = c.Core().RESTClient().Get().
Context(ctx).
Namespace(metav1.NamespaceSystem).
Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
SubResource("proxy").
Suffix("metrics").
Do().Raw()
} else {
rawData, err = c.Core().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(metav1.NamespaceSystem).
SubResource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Suffix("metrics").
Do().Raw()
}
ExpectNoError(err)
data = string(rawData)

View File

@ -234,10 +234,10 @@ func GetPauseImageNameForHostArch() string {
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = utilversion.MustParseSemantic("v1.1.0")
var subResourceServiceAndNodeProxyVersion = utilversion.MustParseSemantic("v1.2.0")
var SubResourceServiceAndNodeProxyVersion = utilversion.MustParseSemantic("v1.2.0")
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
@ -4809,7 +4809,7 @@ const proxyTimeout = 2 * time.Minute
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return restclient.Result{}, err
}

View File

@ -63,21 +63,42 @@ var (
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := c.Core().RESTClient().Get().
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
var result []byte
if subResourceProxyAvailable {
result, err = c.Core().RESTClient().Get().
Context(ctx).
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
SubResource("proxy").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
} else {
result, err = c.Core().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
}
if err != nil {
if ctx.Err() != nil {