mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #10487 from satnam6502/sys-namespace
Move cluster level services to the kube-system namespace
This commit is contained in:
commit
4b2d73dcb3
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster-v5
|
name: monitoring-heapster-v5
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: heapster
|
k8s-app: heapster
|
||||||
version: v5
|
version: v5
|
||||||
|
@ -2,6 +2,7 @@ kind: Service
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster
|
name: monitoring-heapster
|
||||||
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
kubernetes.io/name: "Heapster"
|
kubernetes.io/name: "Heapster"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster-v5
|
name: monitoring-heapster-v5
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: heapster
|
k8s-app: heapster
|
||||||
version: v5
|
version: v5
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-grafana
|
name: monitoring-grafana
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
kubernetes.io/name: "Grafana"
|
kubernetes.io/name: "Grafana"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster-v5
|
name: monitoring-heapster-v5
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: heapster
|
k8s-app: heapster
|
||||||
version: v5
|
version: v5
|
||||||
|
@ -2,6 +2,7 @@ kind: Service
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster
|
name: monitoring-heapster
|
||||||
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
kubernetes.io/name: "Heapster"
|
kubernetes.io/name: "Heapster"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-influx-grafana-v1
|
name: monitoring-influx-grafana-v1
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: influxGrafana
|
k8s-app: influxGrafana
|
||||||
version: v1
|
version: v1
|
||||||
@ -42,7 +42,7 @@ spec:
|
|||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
env:
|
env:
|
||||||
- name: INFLUXDB_EXTERNAL_URL
|
- name: INFLUXDB_EXTERNAL_URL
|
||||||
value: /api/v1/proxy/namespaces/default/services/monitoring-influxdb:api/db/
|
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/
|
||||||
- name: INFLUXDB_HOST
|
- name: INFLUXDB_HOST
|
||||||
value: monitoring-influxdb
|
value: monitoring-influxdb
|
||||||
- name: INFLUXDB_PORT
|
- name: INFLUXDB_PORT
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-influxdb
|
name: monitoring-influxdb
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
kubernetes.io/name: "InfluxDB"
|
kubernetes.io/name: "InfluxDB"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster-v5
|
name: monitoring-heapster-v5
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: heapster
|
k8s-app: heapster
|
||||||
version: v5
|
version: v5
|
||||||
|
@ -2,6 +2,7 @@ kind: Service
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: monitoring-heapster
|
name: monitoring-heapster
|
||||||
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
kubernetes.io/name: "Heapster"
|
kubernetes.io/name: "Heapster"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns-v5
|
name: kube-dns-v5
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
version: v5
|
version: v5
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging-v1
|
name: elasticsearch-logging-v1
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
version: v1
|
version: v1
|
||||||
@ -20,7 +20,7 @@ spec:
|
|||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- image: gcr.io/google_containers/elasticsearch:1.4
|
- image: gcr.io/google_containers/elasticsearch:1.5
|
||||||
name: elasticsearch-logging
|
name: elasticsearch-logging
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
.PHONY: elasticsearch_logging_discovery build push
|
.PHONY: elasticsearch_logging_discovery build push
|
||||||
|
|
||||||
# Keep this one version ahead to help prevent accidental pushes.
|
# The current value of the tag to be used for building and
|
||||||
TAG = 1.4
|
# pushing an image to gcr.io
|
||||||
|
TAG = 1.5
|
||||||
|
|
||||||
build: elasticsearch_logging_discovery
|
build: elasticsearch_logging_discovery
|
||||||
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .
|
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .
|
||||||
|
@ -50,7 +50,7 @@ func main() {
|
|||||||
// Look for endpoints associated with the Elasticsearch loggging service.
|
// Look for endpoints associated with the Elasticsearch loggging service.
|
||||||
// First wait for the service to become available.
|
// First wait for the service to become available.
|
||||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||||
elasticsearch, err = c.Services(api.NamespaceDefault).Get("elasticsearch-logging")
|
elasticsearch, err = c.Services(api.NamespaceSystem).Get("elasticsearch-logging")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -67,7 +67,7 @@ func main() {
|
|||||||
// Wait for some endpoints.
|
// Wait for some endpoints.
|
||||||
count := 0
|
count := 0
|
||||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||||
endpoints, err = c.Endpoints(api.NamespaceDefault).Get("elasticsearch-logging")
|
endpoints, err = c.Endpoints(api.NamespaceSystem).Get("elasticsearch-logging")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging
|
name: elasticsearch-logging
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
# The time_format specification below makes sure we properly
|
# The time_format specification below makes sure we properly
|
||||||
# parse the time format produced by Docker. This will be
|
# parse the time format produced by Docker. This will be
|
||||||
# submitted to Elasticsearch and should appear like:
|
# submitted to Elasticsearch and should appear like:
|
||||||
# $ curl 'http://elasticsearch-logging.default:9200/_search?pretty'
|
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
|
||||||
# ...
|
# ...
|
||||||
# {
|
# {
|
||||||
# "_index" : "logstash-2014.09.25",
|
# "_index" : "logstash-2014.09.25",
|
||||||
@ -94,6 +94,21 @@
|
|||||||
tag docker
|
tag docker
|
||||||
</source>
|
</source>
|
||||||
|
|
||||||
|
<match kubernetes.**>
|
||||||
|
type elasticsearch
|
||||||
|
log_level info
|
||||||
|
include_tag_key true
|
||||||
|
host elasticsearch-logging
|
||||||
|
port 9200
|
||||||
|
logstash_format true
|
||||||
|
flush_interval 5s
|
||||||
|
# Never wait longer than 5 minutes between retries.
|
||||||
|
max_retry_wait 300
|
||||||
|
# Disable the limit on the number of retries (retry forever).
|
||||||
|
disable_retry_limit
|
||||||
|
</match>
|
||||||
|
>>>>>>> Move things into a 'kube-system' namespace.
|
||||||
|
|
||||||
<source>
|
<source>
|
||||||
type tail
|
type tail
|
||||||
format none
|
format none
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: kibana-logging-v1
|
name: kibana-logging-v1
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kibana-logging
|
k8s-app: kibana-logging
|
||||||
version: v1
|
version: v1
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: kibana-logging
|
name: kibana-logging
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kibana-logging
|
k8s-app: kibana-logging
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-ui-v1
|
name: kube-ui-v1
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-ui
|
k8s-app: kube-ui
|
||||||
version: v1
|
version: v1
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-ui
|
name: kube-ui
|
||||||
namespace: default
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-ui
|
k8s-app: kube-ui
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: fluentd-elasticsearch
|
name: fluentd-elasticsearch
|
||||||
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: fluentd-elasticsearch
|
- name: fluentd-elasticsearch
|
||||||
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: fluentd-cloud-logging
|
name: fluentd-cloud-logging
|
||||||
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: fluentd-cloud-logging
|
- name: fluentd-cloud-logging
|
||||||
|
@ -11,6 +11,13 @@ addon-dir-create:
|
|||||||
- require:
|
- require:
|
||||||
- file: addon-dir-delete
|
- file: addon-dir-delete
|
||||||
|
|
||||||
|
/etc/kubernetes/addons/namespace.yaml:
|
||||||
|
file.managed:
|
||||||
|
- source: salt://kube-addons/namespace.yaml
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- file_mode: 644
|
||||||
|
|
||||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %}
|
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %}
|
||||||
/etc/kubernetes/addons/cluster-monitoring/influxdb:
|
/etc/kubernetes/addons/cluster-monitoring/influxdb:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
|
@ -21,6 +21,8 @@ KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
|||||||
|
|
||||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
|
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
|
||||||
|
|
||||||
|
SYSTEM_NAMESPACE=kube-system
|
||||||
|
|
||||||
function create-kubeconfig-secret() {
|
function create-kubeconfig-secret() {
|
||||||
local -r token=$1
|
local -r token=$1
|
||||||
local -r username=$2
|
local -r username=$2
|
||||||
@ -47,6 +49,7 @@ contexts:
|
|||||||
- context:
|
- context:
|
||||||
cluster: local
|
cluster: local
|
||||||
user: ${username}
|
user: ${username}
|
||||||
|
namespace: ${SYSTEM_NAMESPACE}
|
||||||
name: service-account-context
|
name: service-account-context
|
||||||
current-context: service-account-context
|
current-context: service-account-context
|
||||||
EOF
|
EOF
|
||||||
@ -67,6 +70,7 @@ contexts:
|
|||||||
- context:
|
- context:
|
||||||
cluster: local
|
cluster: local
|
||||||
user: ${username}
|
user: ${username}
|
||||||
|
namespace: ${SYSTEM_NAMESPACE}
|
||||||
name: service-account-context
|
name: service-account-context
|
||||||
current-context: service-account-context
|
current-context: service-account-context
|
||||||
EOF
|
EOF
|
||||||
@ -82,36 +86,39 @@ metadata:
|
|||||||
name: token-${safe_username}
|
name: token-${safe_username}
|
||||||
type: Opaque
|
type: Opaque
|
||||||
EOF
|
EOF
|
||||||
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" &
|
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" "${SYSTEM_NAMESPACE}" &
|
||||||
# TODO: label the secrets with special label so kubectl does not show these?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# $1 filename of addon to start.
|
# $1 filename of addon to start.
|
||||||
# $2 count of tries to start the addon.
|
# $2 count of tries to start the addon.
|
||||||
# $3 delay in seconds between two consecutive tries
|
# $3 delay in seconds between two consecutive tries
|
||||||
|
# $4 namespace
|
||||||
function start_addon() {
|
function start_addon() {
|
||||||
local -r addon_filename=$1;
|
local -r addon_filename=$1;
|
||||||
local -r tries=$2;
|
local -r tries=$2;
|
||||||
local -r delay=$3;
|
local -r delay=$3;
|
||||||
|
local -r namespace=$4
|
||||||
|
|
||||||
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}"
|
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# $1 string with json or yaml.
|
# $1 string with json or yaml.
|
||||||
# $2 count of tries to start the addon.
|
# $2 count of tries to start the addon.
|
||||||
# $3 delay in seconds between two consecutive tries
|
# $3 delay in seconds between two consecutive tries
|
||||||
# $3 name of this object to use when logging about it.
|
# $4 name of this object to use when logging about it.
|
||||||
|
# $5 namespace for this object
|
||||||
function create-resource-from-string() {
|
function create-resource-from-string() {
|
||||||
local -r config_string=$1;
|
local -r config_string=$1;
|
||||||
local tries=$2;
|
local tries=$2;
|
||||||
local -r delay=$3;
|
local -r delay=$3;
|
||||||
local -r config_name=$4;
|
local -r config_name=$4;
|
||||||
|
local -r namespace=$5;
|
||||||
while [ ${tries} -gt 0 ]; do
|
while [ ${tries} -gt 0 ]; do
|
||||||
echo "${config_string}" | ${KUBECTL} create -f - && \
|
echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" create -f - && \
|
||||||
echo "== Successfully started ${config_name} at $(date -Is)" && \
|
echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
|
||||||
return 0;
|
return 0;
|
||||||
let tries=tries-1;
|
let tries=tries-1;
|
||||||
echo "== Failed to start ${config_name} at $(date -Is). ${tries} tries remaining. =="
|
echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
|
||||||
sleep ${delay};
|
sleep ${delay};
|
||||||
done
|
done
|
||||||
return 1;
|
return 1;
|
||||||
@ -143,6 +150,8 @@ done
|
|||||||
|
|
||||||
echo "== default service account has token ${token_found} =="
|
echo "== default service account has token ${token_found} =="
|
||||||
|
|
||||||
|
start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" &
|
||||||
|
|
||||||
# Generate secrets for "internal service accounts".
|
# Generate secrets for "internal service accounts".
|
||||||
# TODO(etune): move to a completely yaml/object based
|
# TODO(etune): move to a completely yaml/object based
|
||||||
# workflow so that service accounts can be created
|
# workflow so that service accounts can be created
|
||||||
@ -162,7 +171,7 @@ while read line; do
|
|||||||
else
|
else
|
||||||
# Set the server to https://kubernetes. Pods/components that
|
# Set the server to https://kubernetes. Pods/components that
|
||||||
# do not have DNS available will have to override the server.
|
# do not have DNS available will have to override the server.
|
||||||
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes"
|
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes.default"
|
||||||
fi
|
fi
|
||||||
done < /srv/kubernetes/known_tokens.csv
|
done < /srv/kubernetes/known_tokens.csv
|
||||||
|
|
||||||
@ -170,7 +179,7 @@ done < /srv/kubernetes/known_tokens.csv
|
|||||||
# are defined in a namespace other than default, we should still create the limits for the
|
# are defined in a namespace other than default, we should still create the limits for the
|
||||||
# default namespace.
|
# default namespace.
|
||||||
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
||||||
start_addon ${obj} 100 10 &
|
start_addon ${obj} 100 10 default &
|
||||||
echo "++ obj ${obj} is created ++"
|
echo "++ obj ${obj} is created ++"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
4
cluster/saltbase/salt/kube-addons/namespace.yaml
Normal file
4
cluster/saltbase/salt/kube-addons/namespace.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: kube-system
|
@ -2,6 +2,7 @@ apiVersion: "v1"
|
|||||||
kind: "LimitRange"
|
kind: "LimitRange"
|
||||||
metadata:
|
metadata:
|
||||||
name: "limits"
|
name: "limits"
|
||||||
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
limits:
|
limits:
|
||||||
- type: "Container"
|
- type: "Container"
|
||||||
|
@ -169,6 +169,8 @@ const (
|
|||||||
NamespaceAll string = ""
|
NamespaceAll string = ""
|
||||||
// NamespaceNone is the argument for a context when there is no namespace.
|
// NamespaceNone is the argument for a context when there is no namespace.
|
||||||
NamespaceNone string = ""
|
NamespaceNone string = ""
|
||||||
|
// NamespaceSystem is the system namespace where we place system components.
|
||||||
|
NamespaceSystem string = "kube-system"
|
||||||
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
|
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
|
||||||
TerminationMessagePathDefault string = "/dev/termination-log"
|
TerminationMessagePathDefault string = "/dev/termination-log"
|
||||||
)
|
)
|
||||||
|
@ -57,9 +57,9 @@ func RunClusterInfo(factory *cmdutil.Factory, out io.Writer, cmd *cobra.Command)
|
|||||||
printService(out, "Kubernetes master", client.Host)
|
printService(out, "Kubernetes master", client.Host)
|
||||||
|
|
||||||
mapper, typer := factory.Object()
|
mapper, typer := factory.Object()
|
||||||
cmdNamespace, _, err := factory.DefaultNamespace()
|
cmdNamespace := cmdutil.GetFlagString(cmd, "namespace")
|
||||||
if err != nil {
|
if cmdNamespace == "" {
|
||||||
return err
|
cmdNamespace = api.NamespaceSystem
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO use generalized labels once they are implemented (#341)
|
// TODO use generalized labels once they are implemented (#341)
|
||||||
|
@ -188,20 +188,20 @@ var _ = Describe("DNS", func() {
|
|||||||
// TODO: support DNS on vagrant #3580
|
// TODO: support DNS on vagrant #3580
|
||||||
SkipIfProviderIs("vagrant")
|
SkipIfProviderIs("vagrant")
|
||||||
|
|
||||||
podClient := f.Client.Pods(api.NamespaceDefault)
|
systemClient := f.Client.Pods(api.NamespaceSystem)
|
||||||
|
|
||||||
By("Waiting for DNS Service to be Running")
|
By("Waiting for DNS Service to be Running")
|
||||||
dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything())
|
dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Failf("Failed to list all dns service pods")
|
Failf("Failed to list all dns service pods")
|
||||||
}
|
}
|
||||||
if len(dnsPods.Items) != 1 {
|
if len(dnsPods.Items) != 1 {
|
||||||
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
|
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
|
||||||
}
|
}
|
||||||
expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name))
|
expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
|
||||||
|
|
||||||
// All the names we need to be able to resolve.
|
// All the names we need to be able to resolve.
|
||||||
// TODO: Spin up a separate test service and test that dns works for that service.
|
// TODO: Spin up a separate test service and test that dns works for that service.
|
||||||
|
// TODO: Should these be changed to kubernetes.kube-system etc. ?
|
||||||
namesToResolve := []string{
|
namesToResolve := []string{
|
||||||
"kubernetes.default",
|
"kubernetes.default",
|
||||||
"kubernetes.default.svc",
|
"kubernetes.default.svc",
|
||||||
@ -227,17 +227,17 @@ var _ = Describe("DNS", func() {
|
|||||||
// TODO: support DNS on vagrant #3580
|
// TODO: support DNS on vagrant #3580
|
||||||
SkipIfProviderIs("vagrant")
|
SkipIfProviderIs("vagrant")
|
||||||
|
|
||||||
podClient := f.Client.Pods(api.NamespaceDefault)
|
systemClient := f.Client.Pods(api.NamespaceSystem)
|
||||||
|
|
||||||
By("Waiting for DNS Service to be Running")
|
By("Waiting for DNS Service to be Running")
|
||||||
dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything())
|
dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Failf("Failed to list all dns service pods")
|
Failf("Failed to list all dns service pods")
|
||||||
}
|
}
|
||||||
if len(dnsPods.Items) != 1 {
|
if len(dnsPods.Items) != 1 {
|
||||||
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
|
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
|
||||||
}
|
}
|
||||||
expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name))
|
expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
|
||||||
|
|
||||||
// Create a test headless service.
|
// Create a test headless service.
|
||||||
By("Creating a test headless service")
|
By("Creating a test headless service")
|
||||||
|
@ -126,7 +126,7 @@ func TestE2E(t *testing.T) {
|
|||||||
// cluster infrastructure pods that are being pulled or started can block
|
// cluster infrastructure pods that are being pulled or started can block
|
||||||
// test pods from running, and tests that ensure all pods are running and
|
// test pods from running, and tests that ensure all pods are running and
|
||||||
// ready will fail).
|
// ready will fail).
|
||||||
if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil {
|
if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil {
|
||||||
t.Errorf("Error waiting for all pods to be running and ready: %v", err)
|
t.Errorf("Error waiting for all pods to be running and ready: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
|
|
||||||
// Check for the existence of the Elasticsearch service.
|
// Check for the existence of the Elasticsearch service.
|
||||||
By("Checking the Elasticsearch service exists.")
|
By("Checking the Elasticsearch service exists.")
|
||||||
s := f.Client.Services(api.NamespaceDefault)
|
s := f.Client.Services(api.NamespaceSystem)
|
||||||
// Make a few attempts to connect. This makes the test robust against
|
// Make a few attempts to connect. This makes the test robust against
|
||||||
// being run as the first e2e test just after the e2e cluster has been created.
|
// being run as the first e2e test just after the e2e cluster has been created.
|
||||||
var err error
|
var err error
|
||||||
@ -85,10 +85,10 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
// Wait for the Elasticsearch pods to enter the running state.
|
// Wait for the Elasticsearch pods to enter the running state.
|
||||||
By("Checking to make sure the Elasticsearch pods are running")
|
By("Checking to make sure the Elasticsearch pods are running")
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue}))
|
||||||
pods, err := f.Client.Pods(api.NamespaceDefault).List(label, fields.Everything())
|
pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything())
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
err = waitForPodRunning(f.Client, pod.Name)
|
err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
||||||
// Query against the root URL for Elasticsearch.
|
// Query against the root URL for Elasticsearch.
|
||||||
body, err := f.Client.Get().
|
body, err := f.Client.Get().
|
||||||
Namespace(api.NamespaceDefault).
|
Namespace(api.NamespaceSystem).
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name("elasticsearch-logging").
|
Name("elasticsearch-logging").
|
||||||
@ -146,7 +146,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
var body []byte
|
var body []byte
|
||||||
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
||||||
body, err = f.Client.Get().
|
body, err = f.Client.Get().
|
||||||
Namespace(api.NamespaceDefault).
|
Namespace(api.NamespaceSystem).
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name("elasticsearch-logging").
|
Name("elasticsearch-logging").
|
||||||
@ -188,7 +188,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
return isNodeReadySetAsExpected(&node, true)
|
return isNodeReadySetAsExpected(&node, true)
|
||||||
})
|
})
|
||||||
if len(nodes.Items) < 2 {
|
if len(nodes.Items) < 2 {
|
||||||
Failf("Less than two nodes were found Ready.")
|
Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
|
||||||
}
|
}
|
||||||
Logf("Found %d healthy nodes.", len(nodes.Items))
|
Logf("Found %d healthy nodes.", len(nodes.Items))
|
||||||
|
|
||||||
@ -257,7 +257,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) {
|
for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) {
|
||||||
|
|
||||||
// Debugging code to report the status of the elasticsearch logging endpoints.
|
// Debugging code to report the status of the elasticsearch logging endpoints.
|
||||||
esPods, err := f.Client.Pods(api.NamespaceDefault).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
|
esPods, err := f.Client.Pods(api.NamespaceSystem).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
|
Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
|
||||||
continue
|
continue
|
||||||
@ -272,7 +272,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
// verison of the name. Ask for twice as many log lines as we expect to check for
|
// verison of the name. Ask for twice as many log lines as we expect to check for
|
||||||
// duplication bugs.
|
// duplication bugs.
|
||||||
body, err = f.Client.Get().
|
body, err = f.Client.Get().
|
||||||
Namespace(api.NamespaceDefault).
|
Namespace(api.NamespaceSystem).
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name("elasticsearch-logging").
|
Name("elasticsearch-logging").
|
||||||
|
@ -78,7 +78,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
|||||||
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
|
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
|
||||||
// is running (which would be an error except during a rolling update).
|
// is running (which would be an error except during a rolling update).
|
||||||
for _, rcLabel := range rcLabels {
|
for _, rcLabel := range rcLabels {
|
||||||
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
|
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -87,7 +87,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
|||||||
rcLabel, len(rcList.Items))
|
rcLabel, len(rcList.Items))
|
||||||
}
|
}
|
||||||
for _, rc := range rcList.Items {
|
for _, rc := range rcList.Items {
|
||||||
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
|
podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func expectedServicesExist(c *client.Client) error {
|
func expectedServicesExist(c *client.Client) error {
|
||||||
serviceList, err := c.Services(api.NamespaceDefault).List(labels.Everything())
|
serviceList, err := c.Services(api.NamespaceSystem).List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -205,7 +205,7 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
Failf("failed to get master http client")
|
Failf("failed to get master http client")
|
||||||
}
|
}
|
||||||
proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/default/services/%s:api/", getMasterHost(), influxdbService)
|
proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/%s/services/%s:api/", getMasterHost(), api.NamespaceSystem, influxdbService)
|
||||||
config := &influxdb.ClientConfig{
|
config := &influxdb.ClientConfig{
|
||||||
Host: proxyUrl,
|
Host: proxyUrl,
|
||||||
// TODO(vishh): Infer username and pw from the Pod spec.
|
// TODO(vishh): Infer username and pw from the Pod spec.
|
||||||
|
Loading…
Reference in New Issue
Block a user