mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-14 14:23:37 +00:00
Merge pull request #9211 from lavalamp/no-ro-nonbreaking
Nonbreaking pieces of #8155
This commit is contained in:
commit
f8bf996000
@ -87,7 +87,7 @@ what etcd offers (at least not in the way we use it). For simplicty, we run
|
|||||||
etcd and SkyDNS together in a pod, and we do not try to link etcd instances
|
etcd and SkyDNS together in a pod, and we do not try to link etcd instances
|
||||||
across replicas. A helper container called [kube2sky](kube2sky/) also runs in
|
across replicas. A helper container called [kube2sky](kube2sky/) also runs in
|
||||||
the pod and acts a bridge between Kubernetes and SkyDNS. It finds the
|
the pod and acts a bridge between Kubernetes and SkyDNS. It finds the
|
||||||
Kubernetes master through the `kubernetes-ro` service (via environment
|
Kubernetes master through the `kubernetes` service (via environment
|
||||||
variables), pulls service info from the master, and writes that to etcd for
|
variables), pulls service info from the master, and writes that to etcd for
|
||||||
SkyDNS to find.
|
SkyDNS to find.
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ spec:
|
|||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- image: gcr.io/google_containers/elasticsearch:1.3
|
- image: gcr.io/google_containers/elasticsearch:1.4
|
||||||
name: elasticsearch-logging
|
name: elasticsearch-logging
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 9200
|
- containerPort: 9200
|
||||||
@ -30,14 +30,8 @@ spec:
|
|||||||
name: es-transport-port
|
name: es-transport-port
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: token-system-logging
|
|
||||||
mountPath: /etc/token-system-logging
|
|
||||||
readOnly: true
|
|
||||||
- name: es-persistent-storage
|
- name: es-persistent-storage
|
||||||
mountPath: /data
|
mountPath: /data
|
||||||
volumes:
|
volumes:
|
||||||
- name: token-system-logging
|
|
||||||
secret:
|
|
||||||
secretName: token-system-logging
|
|
||||||
- name: es-persistent-storage
|
- name: es-persistent-storage
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
.PHONY: elasticsearch_logging_discovery build push
|
.PHONY: elasticsearch_logging_discovery build push
|
||||||
|
|
||||||
TAG = 1.3
|
# Keep this one version ahead to help prevent accidental pushes.
|
||||||
|
TAG = 1.4
|
||||||
|
|
||||||
build: elasticsearch_logging_discovery
|
build: elasticsearch_logging_discovery
|
||||||
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .
|
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .
|
||||||
|
|
||||||
push:
|
push:
|
||||||
gcloud preview docker push gcr.io/google_containers/elasticsearch:$(TAG)
|
gcloud preview docker push gcr.io/google_containers/elasticsearch:$(TAG)
|
||||||
|
|
||||||
elasticsearch_logging_discovery:
|
elasticsearch_logging_discovery:
|
||||||
go build elasticsearch_logging_discovery.go
|
go build elasticsearch_logging_discovery.go
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm elasticsearch_logging_discovery
|
||||||
|
@ -24,14 +24,9 @@ import (
|
|||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
kubeconfig = flag.String("kubeconfig", "/etc/token-system-logging/kubeconfig", "kubeconfig file for access")
|
|
||||||
)
|
|
||||||
|
|
||||||
func flattenSubsets(subsets []api.EndpointSubset) []string {
|
func flattenSubsets(subsets []api.EndpointSubset) []string {
|
||||||
ips := []string{}
|
ips := []string{}
|
||||||
for _, ss := range subsets {
|
for _, ss := range subsets {
|
||||||
@ -46,17 +41,7 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
glog.Info("Kubernetes Elasticsearch logging discovery")
|
glog.Info("Kubernetes Elasticsearch logging discovery")
|
||||||
|
|
||||||
settings, err := clientcmd.LoadFromFile(*kubeconfig)
|
c, err := client.NewInCluster()
|
||||||
if err != nil {
|
|
||||||
glog.Fatalf("Error loading configuration from %s: %v", *kubeconfig, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig()
|
|
||||||
if err != nil {
|
|
||||||
glog.Fatalf("Failed to construct config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := client.New(config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Failed to make client: %v", err)
|
glog.Fatalf("Failed to make client: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -43,9 +43,9 @@ Now, you can access the service `wget 10.0.1.89:9090`, and build graphs.
|
|||||||
|
|
||||||
## How it works
|
## How it works
|
||||||
|
|
||||||
This is a v1beta3 based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES_RO service (the internal kubernetes service running in the default namespace, which is visible to all pods).
|
This is a v1beta3 based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES service (the internal kubernetes service running in the default namespace, which is visible to all pods).
|
||||||
|
|
||||||
1. The KUBERNETES_RO service is already running : providing read access to the API metrics.
|
1. Use kubectl to handle auth & proxy the kubernetes API locally, emulating the old KUBERNETES_RO service.
|
||||||
|
|
||||||
1. The list of services to be monitored is passed as a command line aguments in
|
1. The list of services to be monitored is passed as a command line aguments in
|
||||||
the yaml file.
|
the yaml file.
|
||||||
@ -74,5 +74,7 @@ at port 9090.
|
|||||||
- We should publish this image into the kube/ namespace.
|
- We should publish this image into the kube/ namespace.
|
||||||
- Possibly use postgre or mysql as a promdash database.
|
- Possibly use postgre or mysql as a promdash database.
|
||||||
- push gateway (https://github.com/prometheus/pushgateway) setup.
|
- push gateway (https://github.com/prometheus/pushgateway) setup.
|
||||||
|
- stop using kubectl to make a local proxy faking the old RO port and build in
|
||||||
|
real auth capabilities.
|
||||||
|
|
||||||
[]()
|
[]()
|
||||||
|
@ -54,12 +54,29 @@
|
|||||||
"protocol": "TCP"
|
"protocol": "TCP"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "KUBERNETES_RO_SERVICE_HOST",
|
||||||
|
"value": "localhost"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "KUBERNETES_RO_SERVICE_PORT",
|
||||||
|
"value": "8001"
|
||||||
|
}
|
||||||
|
],
|
||||||
"volumeMounts": [
|
"volumeMounts": [
|
||||||
{
|
{
|
||||||
"mountPath": "/var/prometheus/",
|
"mountPath": "/var/prometheus/",
|
||||||
"name": "data"
|
"name": "data"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "kubectl",
|
||||||
|
"image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty",
|
||||||
|
"args": [
|
||||||
|
"proxy", "-p", "8001"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"volumes": [
|
"volumes": [
|
||||||
|
@ -20,5 +20,5 @@ spec:
|
|||||||
name: heapster
|
name: heapster
|
||||||
command:
|
command:
|
||||||
- /heapster
|
- /heapster
|
||||||
- --source=kubernetes:http://kubernetes-ro?auth=
|
- --source=kubernetes:http://kubernetes?auth=
|
||||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||||
|
@ -116,7 +116,6 @@ POD IP CONTAINER(S) IMAGE(S) HOST LABELS
|
|||||||
$ bin/kubectl get services # your service IPs will likely differ
|
$ bin/kubectl get services # your service IPs will likely differ
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME LABELS SELECTOR IP PORT
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.10.10.2 443
|
kubernetes component=apiserver,provider=kubernetes <none> 10.10.10.2 443
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.10.10.1 80
|
|
||||||
```
|
```
|
||||||
Lastly, use the Mesos CLI tool to validate the Kubernetes scheduler framework has been registered and running:
|
Lastly, use the Mesos CLI tool to validate the Kubernetes scheduler framework has been registered and running:
|
||||||
```bash
|
```bash
|
||||||
@ -241,7 +240,6 @@ Next, determine the internal IP address of the front end [service][8]:
|
|||||||
$ bin/kubectl get services
|
$ bin/kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME LABELS SELECTOR IP PORT
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.10.10.2 443
|
kubernetes component=apiserver,provider=kubernetes <none> 10.10.10.2 443
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.10.10.1 80
|
|
||||||
redismaster <none> name=redis-master 10.10.10.49 10000
|
redismaster <none> name=redis-master 10.10.10.49 10000
|
||||||
redisslave name=redisslave name=redisslave 10.10.10.109 10001
|
redisslave name=redisslave name=redisslave 10.10.10.109 10001
|
||||||
frontend <none> name=frontend 10.10.10.149 9998
|
frontend <none> name=frontend 10.10.10.149 9998
|
||||||
|
2
examples/kubectl-container/.gitignore
vendored
Normal file
2
examples/kubectl-container/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
kubectl
|
||||||
|
.tag
|
18
examples/kubectl-container/Dockerfile
Normal file
18
examples/kubectl-container/Dockerfile
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Copyright 2014 Google Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
MAINTAINER Daniel Smith <dbsmith@google.com>
|
||||||
|
ADD kubectl kubectl
|
||||||
|
ENTRYPOINT ["/kubectl"]
|
30
examples/kubectl-container/Makefile
Normal file
30
examples/kubectl-container/Makefile
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# Use:
|
||||||
|
#
|
||||||
|
# `make kubectl` will build kubectl.
|
||||||
|
# `make tag` will suggest a tag.
|
||||||
|
# `make container` will build a container-- you must supply a tag.
|
||||||
|
# `make push` will push the container-- you must supply a tag.
|
||||||
|
|
||||||
|
kubectl:
|
||||||
|
KUBE_STATIC_OVERRIDES="kubectl" ../../hack/build-go.sh cmd/kubectl; cp ../../_output/local/bin/linux/amd64/kubectl .
|
||||||
|
|
||||||
|
.tag: kubectl
|
||||||
|
./kubectl version -c | grep -o 'GitVersion:"[^"]*"' | cut -f 2 -d '"' > .tag
|
||||||
|
|
||||||
|
tag: .tag
|
||||||
|
@echo "Suggest using TAG=$(shell cat .tag)"
|
||||||
|
@echo "$$ make container TAG=$(shell cat .tag)"
|
||||||
|
@echo "or"
|
||||||
|
@echo "$$ make push TAG=$(shell cat .tag)"
|
||||||
|
|
||||||
|
container:
|
||||||
|
$(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion))
|
||||||
|
docker build -t gcr.io/google_containers/kubectl:$(TAG) .
|
||||||
|
|
||||||
|
push: container
|
||||||
|
$(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion))
|
||||||
|
gcloud preview docker push gcr.io/google_containers/kubectl:$(TAG)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f kubectl
|
||||||
|
rm -f .tag
|
21
examples/kubectl-container/README.md
Normal file
21
examples/kubectl-container/README.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
This directory contains a Dockerfile and Makefile for packaging up kubectl into
|
||||||
|
a container.
|
||||||
|
|
||||||
|
It's not currently automated as part of a release process, so for the moment
|
||||||
|
this is an example of what to do if you want to package kubectl into a
|
||||||
|
container/your pod.
|
||||||
|
|
||||||
|
In the future, we may release consistently versioned groups of containers when
|
||||||
|
we cut a release, in which case the source of gcr.io/google_containers/kubectl
|
||||||
|
would become that automated process.
|
||||||
|
|
||||||
|
```pod.json``` is provided as an example of packaging kubectl as a sidecar
|
||||||
|
container, and to help you verify that kubectl works correctly in
|
||||||
|
this configuration.
|
||||||
|
|
||||||
|
A possible reason why you would want to do this is to use ```kubectl proxy``` as
|
||||||
|
a drop-in replacement for the old no-auth KUBERNETES_RO service. The other
|
||||||
|
containers in your pod will find the proxy apparently serving on localhost.
|
||||||
|
|
||||||
|
|
||||||
|
[]()
|
54
examples/kubectl-container/pod.json
Normal file
54
examples/kubectl-container/pod.json
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
{
|
||||||
|
"kind": "Pod",
|
||||||
|
"apiVersion": "v1beta3",
|
||||||
|
"metadata": {
|
||||||
|
"name": "kubectl-tester"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"name": "bb",
|
||||||
|
"image": "gcr.io/google_containers/busybox",
|
||||||
|
"command": [
|
||||||
|
"sh", "-c", "sleep 5; wget -O - ${KUBERNETES_RO_SERVICE_HOST}:${KUBERNETES_RO_SERVICE_PORT}/api/v1beta3/pods/; sleep 10000"
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"containerPort": 8080,
|
||||||
|
"protocol": "TCP"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "KUBERNETES_RO_SERVICE_HOST",
|
||||||
|
"value": "127.0.0.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "KUBERNETES_RO_SERVICE_PORT",
|
||||||
|
"value": "8001"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"name": "test-volume",
|
||||||
|
"mountPath": "/mount/test-volume"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "kubectl",
|
||||||
|
"image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"args": [
|
||||||
|
"proxy", "-p", "8001"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "test-volume",
|
||||||
|
"emptyDir": {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
@ -104,7 +104,6 @@ elasticsearch-logging kubernetes.io/cluster-service=true,name=elasticsearch-l
|
|||||||
kibana-logging kubernetes.io/cluster-service=true,name=kibana-logging name=kibana-logging 10.0.188.118 5601/TCP
|
kibana-logging kubernetes.io/cluster-service=true,name=kibana-logging name=kibana-logging 10.0.188.118 5601/TCP
|
||||||
kube-dns k8s-app=kube-dns,kubernetes.io/cluster-service=true,name=kube-dns k8s-app=kube-dns 10.0.0.10 53/UDP
|
kube-dns k8s-app=kube-dns,kubernetes.io/cluster-service=true,name=kube-dns k8s-app=kube-dns 10.0.0.10 53/UDP
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.0.1 80/TCP
|
|
||||||
monitoring-grafana kubernetes.io/cluster-service=true,name=grafana name=influxGrafana 10.0.254.202 80/TCP
|
monitoring-grafana kubernetes.io/cluster-service=true,name=grafana name=influxGrafana 10.0.254.202 80/TCP
|
||||||
monitoring-heapster kubernetes.io/cluster-service=true,name=heapster name=heapster 10.0.19.214 80/TCP
|
monitoring-heapster kubernetes.io/cluster-service=true,name=heapster name=heapster 10.0.19.214 80/TCP
|
||||||
monitoring-influxdb name=influxGrafana name=influxGrafana 10.0.198.71 80/TCP
|
monitoring-influxdb name=influxGrafana name=influxGrafana 10.0.198.71 80/TCP
|
||||||
|
@ -89,7 +89,6 @@ cluster/kubectl.sh get services
|
|||||||
NAME LABELS SELECTOR IP PORT(S)
|
NAME LABELS SELECTOR IP PORT(S)
|
||||||
frontendservice <none> name=frontendhttp 10.0.0.241 3000/TCP
|
frontendservice <none> name=frontendhttp 10.0.0.241 3000/TCP
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.0.1 80/TCP
|
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -54,7 +54,6 @@ POD IP CONTAINER(S) IMAGE(S)
|
|||||||
spark-master 192.168.90.14 spark-master mattf/spark-master 172.18.145.8/172.18.145.8 name=spark-master Running
|
spark-master 192.168.90.14 spark-master mattf/spark-master 172.18.145.8/172.18.145.8 name=spark-master Running
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME LABELS SELECTOR IP PORT
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
|
|
||||||
spark-master name=spark-master name=spark-master 10.254.125.166 7077
|
spark-master name=spark-master name=spark-master 10.254.125.166 7077
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -135,7 +134,6 @@ spark-worker-controller-5v48c 192.168.90.17 spark-worker mattf/sp
|
|||||||
spark-worker-controller-ehq23 192.168.35.17 spark-worker mattf/spark-worker 172.18.145.12/172.18.145.12 name=spark-worker,uses=spark-master Running
|
spark-worker-controller-ehq23 192.168.35.17 spark-worker mattf/spark-worker 172.18.145.12/172.18.145.12 name=spark-worker,uses=spark-master Running
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME LABELS SELECTOR IP PORT
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
|
|
||||||
spark-master name=spark-master name=spark-master 10.254.125.166 7077
|
spark-master name=spark-master name=spark-master 10.254.125.166 7077
|
||||||
|
|
||||||
$ sudo docker run -it mattf/spark-base sh
|
$ sudo docker run -it mattf/spark-base sh
|
||||||
|
@ -62,7 +62,6 @@ zookeeper 192.168.86.4 zookeeper mattf/zookeeper
|
|||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME LABELS SELECTOR IP PORT
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
|
|
||||||
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
|
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
|
||||||
|
|
||||||
$ echo ruok | nc 10.254.139.141 2181; echo
|
$ echo ruok | nc 10.254.139.141 2181; echo
|
||||||
@ -97,7 +96,6 @@ Ensure that the Nimbus service is running and functional.
|
|||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME LABELS SELECTOR IP PORT
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
||||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
|
|
||||||
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
|
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
|
||||||
nimbus name=nimbus name=nimbus 10.254.115.208 6627
|
nimbus name=nimbus name=nimbus 10.254.115.208 6627
|
||||||
|
|
||||||
|
@ -99,6 +99,11 @@ readonly KUBE_STATIC_LIBRARIES=(
|
|||||||
kube::golang::is_statically_linked_library() {
|
kube::golang::is_statically_linked_library() {
|
||||||
local e
|
local e
|
||||||
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
||||||
|
# Allow individual overrides--e.g., so that you can get a static build of
|
||||||
|
# kubectl for inclusion in a container.
|
||||||
|
if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then
|
||||||
|
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
||||||
|
fi
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package clientcmd
|
package clientcmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
@ -284,3 +285,32 @@ func (config DirectClientConfig) getCluster() clientcmdapi.Cluster {
|
|||||||
|
|
||||||
return mergedClusterInfo
|
return mergedClusterInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment.
|
||||||
|
type inClusterClientConfig struct{}
|
||||||
|
|
||||||
|
func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
|
||||||
|
return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (inClusterClientConfig) ClientConfig() (*client.Config, error) {
|
||||||
|
return client.InClusterConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (inClusterClientConfig) Namespace() (string, error) {
|
||||||
|
// TODO: generic way to figure out what namespace you are running in?
|
||||||
|
// This way assumes you've set the POD_NAMESPACE environment variable
|
||||||
|
// using the downward API.
|
||||||
|
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
|
||||||
|
return ns, nil
|
||||||
|
}
|
||||||
|
return "default", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Possible returns true if loading an inside-kubernetes-cluster is possible.
|
||||||
|
func (inClusterClientConfig) Possible() bool {
|
||||||
|
fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
|
||||||
|
return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
|
||||||
|
os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
|
||||||
|
err == nil && !fi.IsDir()
|
||||||
|
}
|
||||||
|
@ -45,6 +45,11 @@ func NewInteractiveDeferredLoadingClientConfig(loadingRules *ClientConfigLoading
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (config DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) {
|
func (config DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) {
|
||||||
|
// Are we running in a cluster? If so, use that.
|
||||||
|
icc := inClusterClientConfig{}
|
||||||
|
if icc.Possible() {
|
||||||
|
return icc, nil
|
||||||
|
}
|
||||||
mergedConfig, err := config.loadingRules.Load()
|
mergedConfig, err := config.loadingRules.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -290,7 +290,7 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
|
|||||||
// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this,
|
// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this,
|
||||||
// then no other kubeconfig files are merged. This file must exist.
|
// then no other kubeconfig files are merged. This file must exist.
|
||||||
// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged.
|
// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged.
|
||||||
// 3. HomeDirectoryLocation
|
// 3. HomeDirectoryLocation
|
||||||
// Empty filenames are ignored. Files with non-deserializable content produced errors.
|
// Empty filenames are ignored. Files with non-deserializable content produced errors.
|
||||||
// The first file to set a particular value or map key wins and the value or map key is never changed.
|
// The first file to set a particular value or map key wins and the value or map key is never changed.
|
||||||
// This means that the first file to set CurrentContext will have its context preserved. It also means
|
// This means that the first file to set CurrentContext will have its context preserved. It also means
|
||||||
@ -316,6 +316,13 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
|
|||||||
// 2. If the command line does not specify one, and the auth info has conflicting techniques, fail.
|
// 2. If the command line does not specify one, and the auth info has conflicting techniques, fail.
|
||||||
// 3. If the command line specifies one and the auth info specifies another, honor the command line technique.
|
// 3. If the command line specifies one and the auth info specifies another, honor the command line technique.
|
||||||
// 2. Use default values and potentially prompt for auth information
|
// 2. Use default values and potentially prompt for auth information
|
||||||
|
//
|
||||||
|
// However, if it appears that we're running in a kubernetes cluster
|
||||||
|
// container environment, then run with the auth info kubernetes mounted for
|
||||||
|
// us. Specifically:
|
||||||
|
// The env vars KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT are
|
||||||
|
// set, and the file /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||||
|
// exists and is not a directory.
|
||||||
func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
|
func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
|
||||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||||
flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
|
flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
|
||||||
|
@ -872,7 +872,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Cont
|
|||||||
return opts, nil
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var masterServices = util.NewStringSet("kubernetes", "kubernetes-ro")
|
var masterServices = util.NewStringSet("kubernetes")
|
||||||
|
|
||||||
// getServiceEnvVarMap makes a map[string]string of env vars for services a pod in namespace ns should see
|
// getServiceEnvVarMap makes a map[string]string of env vars for services a pod in namespace ns should see
|
||||||
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
||||||
@ -909,8 +909,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
|||||||
serviceMap[serviceName] = service
|
serviceMap[serviceName] = service
|
||||||
case kl.masterServiceNamespace:
|
case kl.masterServiceNamespace:
|
||||||
if masterServices.Has(serviceName) {
|
if masterServices.Has(serviceName) {
|
||||||
_, exists := serviceMap[serviceName]
|
if _, exists := serviceMap[serviceName]; !exists {
|
||||||
if !exists {
|
|
||||||
serviceMap[serviceName] = service
|
serviceMap[serviceName] = service
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Controller is the controller manager for the core bootstrap Kubernetes controller
|
// Controller is the controller manager for the core bootstrap Kubernetes controller
|
||||||
// loops, which manage creating the "kubernetes" and "kubernetes-ro" services, the "default"
|
// loops, which manage creating the "kubernetes" service, the "default"
|
||||||
// namespace, and provide the IP repair check on service IPs
|
// namespace, and provide the IP repair check on service IPs
|
||||||
type Controller struct {
|
type Controller struct {
|
||||||
NamespaceRegistry namespace.Registry
|
NamespaceRegistry namespace.Registry
|
||||||
|
Loading…
Reference in New Issue
Block a user