mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-11-03 23:40:03 +00:00 
			
		
		
		
	Move things into a 'kube-system' namespace.
This commit is contained in:
		
				
					committed by
					
						
						Satnam Singh
					
				
			
			
				
	
			
			
			
						parent
						
							c8f8e5f333
						
					
				
				
					commit
					988aa6fdf6
				
			@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-heapster-v5
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: heapster
 | 
			
		||||
    version: v5
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@ kind: Service
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
metadata: 
 | 
			
		||||
  name: monitoring-heapster
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels: 
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
    kubernetes.io/name: "Heapster"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-heapster-v5
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: heapster
 | 
			
		||||
    version: v5
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-grafana
 | 
			
		||||
  namespace: default 
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels: 
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
    kubernetes.io/name: "Grafana"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-heapster-v5
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: heapster
 | 
			
		||||
    version: v5
 | 
			
		||||
 
 | 
			
		||||
@@ -1,8 +1,9 @@
 | 
			
		||||
kind: Service
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
metadata: 
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-heapster
 | 
			
		||||
  labels: 
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
    kubernetes.io/name: "Heapster"
 | 
			
		||||
spec: 
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-influx-grafana-v1
 | 
			
		||||
  namespace: default 
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels: 
 | 
			
		||||
    k8s-app: influxGrafana
 | 
			
		||||
    version: v1
 | 
			
		||||
@@ -42,7 +42,7 @@ spec:
 | 
			
		||||
              memory: 100Mi
 | 
			
		||||
          env: 
 | 
			
		||||
            - name: INFLUXDB_EXTERNAL_URL
 | 
			
		||||
              value: /api/v1/proxy/namespaces/default/services/monitoring-influxdb:api/db/
 | 
			
		||||
              value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/
 | 
			
		||||
            - name: INFLUXDB_HOST
 | 
			
		||||
              value: monitoring-influxdb
 | 
			
		||||
            - name: INFLUXDB_PORT
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-influxdb
 | 
			
		||||
  namespace: default 
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels: 
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
    kubernetes.io/name: "InfluxDB"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: monitoring-heapster-v5
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: heapster
 | 
			
		||||
    version: v5
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@ kind: Service
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
metadata: 
 | 
			
		||||
  name: monitoring-heapster
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels: 
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
    kubernetes.io/name: "Heapster"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kube-dns-v5
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: kube-dns
 | 
			
		||||
    version: v5
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kube-dns
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: kube-dns
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: elasticsearch-logging-v1
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: elasticsearch-logging
 | 
			
		||||
    version: v1
 | 
			
		||||
@@ -20,7 +20,7 @@ spec:
 | 
			
		||||
        kubernetes.io/cluster-service: "true"
 | 
			
		||||
    spec:
 | 
			
		||||
      containers:
 | 
			
		||||
      - image: gcr.io/google_containers/elasticsearch:1.4
 | 
			
		||||
      - image: gcr.io/google_containers/elasticsearch:1.5
 | 
			
		||||
        name: elasticsearch-logging         
 | 
			
		||||
        resources:
 | 
			
		||||
          limits:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,8 @@
 | 
			
		||||
.PHONY:	elasticsearch_logging_discovery build push
 | 
			
		||||
 | 
			
		||||
# Keep this one version ahead to help prevent accidental pushes.
 | 
			
		||||
TAG = 1.4
 | 
			
		||||
# The current value of the tag to be used for building and
 | 
			
		||||
# pushing an image to gcr.io
 | 
			
		||||
TAG = 1.5
 | 
			
		||||
 | 
			
		||||
build:	elasticsearch_logging_discovery
 | 
			
		||||
	docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .
 | 
			
		||||
 
 | 
			
		||||
@@ -50,7 +50,7 @@ func main() {
 | 
			
		||||
	// Look for endpoints associated with the Elasticsearch loggging service.
 | 
			
		||||
	// First wait for the service to become available.
 | 
			
		||||
	for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
 | 
			
		||||
		elasticsearch, err = c.Services(api.NamespaceDefault).Get("elasticsearch-logging")
 | 
			
		||||
		elasticsearch, err = c.Services(api.NamespaceSystem).Get("elasticsearch-logging")
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
@@ -67,7 +67,7 @@ func main() {
 | 
			
		||||
	// Wait for some endpoints.
 | 
			
		||||
	count := 0
 | 
			
		||||
	for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
 | 
			
		||||
		endpoints, err = c.Endpoints(api.NamespaceDefault).Get("elasticsearch-logging")
 | 
			
		||||
		endpoints, err = c.Endpoints(api.NamespaceSystem).Get("elasticsearch-logging")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  name: elasticsearch-logging
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: elasticsearch-logging
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
 
 | 
			
		||||
@@ -19,7 +19,7 @@
 | 
			
		||||
# The time_format specification below makes sure we properly
 | 
			
		||||
# parse the time format produced by Docker. This will be
 | 
			
		||||
# submitted to Elasticsearch and should appear like:
 | 
			
		||||
# $ curl 'http://elasticsearch-logging.default:9200/_search?pretty'
 | 
			
		||||
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
 | 
			
		||||
# ...
 | 
			
		||||
# {
 | 
			
		||||
#      "_index" : "logstash-2014.09.25",
 | 
			
		||||
@@ -94,6 +94,21 @@
 | 
			
		||||
  tag docker
 | 
			
		||||
</source>
 | 
			
		||||
 | 
			
		||||
<match kubernetes.**>
 | 
			
		||||
   type elasticsearch
 | 
			
		||||
   log_level info
 | 
			
		||||
   include_tag_key true
 | 
			
		||||
   host elasticsearch-logging
 | 
			
		||||
   port 9200
 | 
			
		||||
   logstash_format true
 | 
			
		||||
   flush_interval 5s
 | 
			
		||||
   # Never wait longer than 5 minutes between retries.
 | 
			
		||||
   max_retry_wait 300
 | 
			
		||||
   # Disable the limit on the number of retries (retry forever).
 | 
			
		||||
   disable_retry_limit
 | 
			
		||||
</match>
 | 
			
		||||
>>>>>>> Move things into a 'kube-system' namespace.
 | 
			
		||||
 | 
			
		||||
<source>
 | 
			
		||||
  type tail
 | 
			
		||||
  format none
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kibana-logging-v1
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: kibana-logging
 | 
			
		||||
    version: v1
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kibana-logging
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: kibana-logging
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: ReplicationController
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kube-ui-v1
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: kube-ui
 | 
			
		||||
    version: v1
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kube-ui
 | 
			
		||||
  namespace: default
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: kube-ui
 | 
			
		||||
    kubernetes.io/cluster-service: "true"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Pod
 | 
			
		||||
metadata:
 | 
			
		||||
  name: fluentd-elasticsearch
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
spec:
 | 
			
		||||
  containers:
 | 
			
		||||
  - name: fluentd-elasticsearch
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@ apiVersion: v1
 | 
			
		||||
kind: Pod
 | 
			
		||||
metadata:
 | 
			
		||||
  name: fluentd-cloud-logging
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
spec:
 | 
			
		||||
  containers:
 | 
			
		||||
  - name: fluentd-cloud-logging
 | 
			
		||||
 
 | 
			
		||||
@@ -11,6 +11,13 @@ addon-dir-create:
 | 
			
		||||
    - require:
 | 
			
		||||
        - file: addon-dir-delete
 | 
			
		||||
 | 
			
		||||
/etc/kubernetes/addons/namespace.yaml:
 | 
			
		||||
  file.managed:
 | 
			
		||||
    - source: salt://kube-addons/namespace.yaml
 | 
			
		||||
    - user: root
 | 
			
		||||
    - group: root
 | 
			
		||||
    - file_mode: 644
 | 
			
		||||
 | 
			
		||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %}
 | 
			
		||||
/etc/kubernetes/addons/cluster-monitoring/influxdb:
 | 
			
		||||
  file.recurse:
 | 
			
		||||
 
 | 
			
		||||
@@ -21,6 +21,8 @@ KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
 | 
			
		||||
 | 
			
		||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
 | 
			
		||||
 | 
			
		||||
SYSTEM_NAMESPACE=kube-system
 | 
			
		||||
 | 
			
		||||
function create-kubeconfig-secret() {
 | 
			
		||||
  local -r token=$1
 | 
			
		||||
  local -r username=$2
 | 
			
		||||
@@ -47,6 +49,7 @@ contexts:
 | 
			
		||||
- context:
 | 
			
		||||
    cluster: local
 | 
			
		||||
    user: ${username}
 | 
			
		||||
    namespace: ${SYSTEM_NAMESPACE} 
 | 
			
		||||
  name: service-account-context
 | 
			
		||||
current-context: service-account-context
 | 
			
		||||
EOF
 | 
			
		||||
@@ -67,6 +70,7 @@ contexts:
 | 
			
		||||
- context:
 | 
			
		||||
    cluster: local
 | 
			
		||||
    user: ${username}
 | 
			
		||||
    namespace: ${SYSTEM_NAMESPACE}
 | 
			
		||||
  name: service-account-context
 | 
			
		||||
current-context: service-account-context
 | 
			
		||||
EOF
 | 
			
		||||
@@ -82,36 +86,39 @@ metadata:
 | 
			
		||||
  name: token-${safe_username}
 | 
			
		||||
type: Opaque
 | 
			
		||||
EOF
 | 
			
		||||
  create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" &
 | 
			
		||||
# TODO: label the secrets with special label so kubectl does not show these?
 | 
			
		||||
  create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" "${SYSTEM_NAMESPACE}" &
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# $1 filename of addon to start.
 | 
			
		||||
# $2 count of tries to start the addon.
 | 
			
		||||
# $3 delay in seconds between two consecutive tries
 | 
			
		||||
# $4 namespace
 | 
			
		||||
function start_addon() {
 | 
			
		||||
  local -r addon_filename=$1;
 | 
			
		||||
  local -r tries=$2;
 | 
			
		||||
  local -r delay=$3;
 | 
			
		||||
  local -r namespace=$4
 | 
			
		||||
 | 
			
		||||
  create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}"
 | 
			
		||||
  create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# $1 string with json or yaml.
 | 
			
		||||
# $2 count of tries to start the addon.
 | 
			
		||||
# $3 delay in seconds between two consecutive tries
 | 
			
		||||
# $3 name of this object to use when logging about it.
 | 
			
		||||
# $4 name of this object to use when logging about it.
 | 
			
		||||
# $5 namespace for this object
 | 
			
		||||
function create-resource-from-string() {
 | 
			
		||||
  local -r config_string=$1;
 | 
			
		||||
  local tries=$2;
 | 
			
		||||
  local -r delay=$3;
 | 
			
		||||
  local -r config_name=$4;
 | 
			
		||||
  local -r namespace=$5;
 | 
			
		||||
  while [ ${tries} -gt 0 ]; do
 | 
			
		||||
    echo "${config_string}" | ${KUBECTL} create -f - && \
 | 
			
		||||
        echo "== Successfully started ${config_name} at $(date -Is)" && \
 | 
			
		||||
    echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" create -f - && \
 | 
			
		||||
        echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
 | 
			
		||||
        return 0;
 | 
			
		||||
    let tries=tries-1;
 | 
			
		||||
    echo "== Failed to start ${config_name} at $(date -Is). ${tries} tries remaining. =="
 | 
			
		||||
    echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
 | 
			
		||||
    sleep ${delay};
 | 
			
		||||
  done
 | 
			
		||||
  return 1;
 | 
			
		||||
@@ -143,6 +150,8 @@ done
 | 
			
		||||
 | 
			
		||||
echo "== default service account has token ${token_found} =="
 | 
			
		||||
 | 
			
		||||
start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" &
 | 
			
		||||
 | 
			
		||||
# Generate secrets for "internal service accounts".
 | 
			
		||||
# TODO(etune): move to a completely yaml/object based
 | 
			
		||||
# workflow so that service accounts can be created
 | 
			
		||||
@@ -162,7 +171,7 @@ while read line; do
 | 
			
		||||
  else
 | 
			
		||||
    # Set the server to https://kubernetes. Pods/components that
 | 
			
		||||
    # do not have DNS available will have to override the server.
 | 
			
		||||
    create-kubeconfig-secret "${token}" "${username}" "https://kubernetes"
 | 
			
		||||
    create-kubeconfig-secret "${token}" "${username}" "https://kubernetes.default"
 | 
			
		||||
  fi
 | 
			
		||||
done < /srv/kubernetes/known_tokens.csv
 | 
			
		||||
 | 
			
		||||
@@ -170,7 +179,7 @@ done < /srv/kubernetes/known_tokens.csv
 | 
			
		||||
# are defined in a namespace other than default, we should still create the limits for the
 | 
			
		||||
# default namespace.
 | 
			
		||||
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
 | 
			
		||||
  start_addon ${obj} 100 10 &
 | 
			
		||||
  start_addon ${obj} 100 10 default &
 | 
			
		||||
  echo "++ obj ${obj} is created ++"
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										4
									
								
								cluster/saltbase/salt/kube-addons/namespace.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								cluster/saltbase/salt/kube-addons/namespace.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,4 @@
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: Namespace
 | 
			
		||||
metadata:
 | 
			
		||||
  name: kube-system
 | 
			
		||||
@@ -2,6 +2,7 @@ apiVersion: "v1"
 | 
			
		||||
kind: "LimitRange"
 | 
			
		||||
metadata:
 | 
			
		||||
  name: "limits"
 | 
			
		||||
  namespace: default
 | 
			
		||||
spec:
 | 
			
		||||
  limits:
 | 
			
		||||
    - type: "Container"
 | 
			
		||||
 
 | 
			
		||||
@@ -169,6 +169,8 @@ const (
 | 
			
		||||
	NamespaceAll string = ""
 | 
			
		||||
	// NamespaceNone is the argument for a context when there is no namespace.
 | 
			
		||||
	NamespaceNone string = ""
 | 
			
		||||
	// NamespaceSystem is the system namespace where we place system components.
 | 
			
		||||
	NamespaceSystem string = "kube-system"
 | 
			
		||||
	// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
 | 
			
		||||
	TerminationMessagePathDefault string = "/dev/termination-log"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -57,9 +57,9 @@ func RunClusterInfo(factory *cmdutil.Factory, out io.Writer, cmd *cobra.Command)
 | 
			
		||||
	printService(out, "Kubernetes master", client.Host)
 | 
			
		||||
 | 
			
		||||
	mapper, typer := factory.Object()
 | 
			
		||||
	cmdNamespace, _, err := factory.DefaultNamespace()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	cmdNamespace := cmdutil.GetFlagString(cmd, "namespace")
 | 
			
		||||
	if cmdNamespace == "" {
 | 
			
		||||
		cmdNamespace = api.NamespaceSystem
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// TODO use generalized labels once they are implemented (#341)
 | 
			
		||||
 
 | 
			
		||||
@@ -188,20 +188,20 @@ var _ = Describe("DNS", func() {
 | 
			
		||||
		// TODO: support DNS on vagrant #3580
 | 
			
		||||
		SkipIfProviderIs("vagrant")
 | 
			
		||||
 | 
			
		||||
		podClient := f.Client.Pods(api.NamespaceDefault)
 | 
			
		||||
 | 
			
		||||
		systemClient := f.Client.Pods(api.NamespaceSystem)
 | 
			
		||||
		By("Waiting for DNS Service to be Running")
 | 
			
		||||
		dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything())
 | 
			
		||||
		dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			Failf("Failed to list all dns service pods")
 | 
			
		||||
		}
 | 
			
		||||
		if len(dnsPods.Items) != 1 {
 | 
			
		||||
			Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
 | 
			
		||||
		}
 | 
			
		||||
		expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name))
 | 
			
		||||
		expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
 | 
			
		||||
 | 
			
		||||
		// All the names we need to be able to resolve.
 | 
			
		||||
		// TODO: Spin up a separate test service and test that dns works for that service.
 | 
			
		||||
		// TODO: Should these be changed to kubernetes.kube-system etc. ?
 | 
			
		||||
		namesToResolve := []string{
 | 
			
		||||
			"kubernetes.default",
 | 
			
		||||
			"kubernetes.default.svc",
 | 
			
		||||
@@ -227,17 +227,17 @@ var _ = Describe("DNS", func() {
 | 
			
		||||
		// TODO: support DNS on vagrant #3580
 | 
			
		||||
		SkipIfProviderIs("vagrant")
 | 
			
		||||
 | 
			
		||||
		podClient := f.Client.Pods(api.NamespaceDefault)
 | 
			
		||||
		systemClient := f.Client.Pods(api.NamespaceSystem)
 | 
			
		||||
 | 
			
		||||
		By("Waiting for DNS Service to be Running")
 | 
			
		||||
		dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything())
 | 
			
		||||
		dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			Failf("Failed to list all dns service pods")
 | 
			
		||||
		}
 | 
			
		||||
		if len(dnsPods.Items) != 1 {
 | 
			
		||||
			Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
 | 
			
		||||
		}
 | 
			
		||||
		expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name))
 | 
			
		||||
		expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
 | 
			
		||||
 | 
			
		||||
		// Create a test headless service.
 | 
			
		||||
		By("Creating a test headless service")
 | 
			
		||||
 
 | 
			
		||||
@@ -126,7 +126,7 @@ func TestE2E(t *testing.T) {
 | 
			
		||||
	// cluster infrastructure pods that are being pulled or started can block
 | 
			
		||||
	// test pods from running, and tests that ensure all pods are running and
 | 
			
		||||
	// ready will fail).
 | 
			
		||||
	if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil {
 | 
			
		||||
	if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil {
 | 
			
		||||
		t.Errorf("Error waiting for all pods to be running and ready: %v", err)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -70,7 +70,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
 | 
			
		||||
	// Check for the existence of the Elasticsearch service.
 | 
			
		||||
	By("Checking the Elasticsearch service exists.")
 | 
			
		||||
	s := f.Client.Services(api.NamespaceDefault)
 | 
			
		||||
	s := f.Client.Services(api.NamespaceSystem)
 | 
			
		||||
	// Make a few attempts to connect. This makes the test robust against
 | 
			
		||||
	// being run as the first e2e test just after the e2e cluster has been created.
 | 
			
		||||
	var err error
 | 
			
		||||
@@ -85,10 +85,10 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
	// Wait for the Elasticsearch pods to enter the running state.
 | 
			
		||||
	By("Checking to make sure the Elasticsearch pods are running")
 | 
			
		||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue}))
 | 
			
		||||
	pods, err := f.Client.Pods(api.NamespaceDefault).List(label, fields.Everything())
 | 
			
		||||
	pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything())
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	for _, pod := range pods.Items {
 | 
			
		||||
		err = waitForPodRunning(f.Client, pod.Name)
 | 
			
		||||
		err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -100,7 +100,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
 | 
			
		||||
		// Query against the root URL for Elasticsearch.
 | 
			
		||||
		body, err := f.Client.Get().
 | 
			
		||||
			Namespace(api.NamespaceDefault).
 | 
			
		||||
			Namespace(api.NamespaceSystem).
 | 
			
		||||
			Prefix("proxy").
 | 
			
		||||
			Resource("services").
 | 
			
		||||
			Name("elasticsearch-logging").
 | 
			
		||||
@@ -146,7 +146,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
	var body []byte
 | 
			
		||||
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
 | 
			
		||||
		body, err = f.Client.Get().
 | 
			
		||||
			Namespace(api.NamespaceDefault).
 | 
			
		||||
			Namespace(api.NamespaceSystem).
 | 
			
		||||
			Prefix("proxy").
 | 
			
		||||
			Resource("services").
 | 
			
		||||
			Name("elasticsearch-logging").
 | 
			
		||||
@@ -188,7 +188,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
		return isNodeReadySetAsExpected(&node, true)
 | 
			
		||||
	})
 | 
			
		||||
	if len(nodes.Items) < 2 {
 | 
			
		||||
		Failf("Less than two nodes were found Ready.")
 | 
			
		||||
		Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
 | 
			
		||||
	}
 | 
			
		||||
	Logf("Found %d healthy nodes.", len(nodes.Items))
 | 
			
		||||
 | 
			
		||||
@@ -257,7 +257,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
	for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) {
 | 
			
		||||
 | 
			
		||||
		// Debugging code to report the status of the elasticsearch logging endpoints.
 | 
			
		||||
		esPods, err := f.Client.Pods(api.NamespaceDefault).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
 | 
			
		||||
		esPods, err := f.Client.Pods(api.NamespaceSystem).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
 | 
			
		||||
			continue
 | 
			
		||||
@@ -272,7 +272,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
 | 
			
		||||
		// verison of the name. Ask for twice as many log lines as we expect to check for
 | 
			
		||||
		// duplication bugs.
 | 
			
		||||
		body, err = f.Client.Get().
 | 
			
		||||
			Namespace(api.NamespaceDefault).
 | 
			
		||||
			Namespace(api.NamespaceSystem).
 | 
			
		||||
			Prefix("proxy").
 | 
			
		||||
			Resource("services").
 | 
			
		||||
			Name("elasticsearch-logging").
 | 
			
		||||
 
 | 
			
		||||
@@ -78,7 +78,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
 | 
			
		||||
	// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
 | 
			
		||||
	// is running (which would be an error except during a rolling update).
 | 
			
		||||
	for _, rcLabel := range rcLabels {
 | 
			
		||||
		rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
 | 
			
		||||
		rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
@@ -87,7 +87,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
 | 
			
		||||
				rcLabel, len(rcList.Items))
 | 
			
		||||
		}
 | 
			
		||||
		for _, rc := range rcList.Items {
 | 
			
		||||
			podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
 | 
			
		||||
			podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
@@ -100,7 +100,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func expectedServicesExist(c *client.Client) error {
 | 
			
		||||
	serviceList, err := c.Services(api.NamespaceDefault).List(labels.Everything())
 | 
			
		||||
	serviceList, err := c.Services(api.NamespaceSystem).List(labels.Everything())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
@@ -205,7 +205,7 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) {
 | 
			
		||||
	if !ok {
 | 
			
		||||
		Failf("failed to get master http client")
 | 
			
		||||
	}
 | 
			
		||||
	proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/default/services/%s:api/", getMasterHost(), influxdbService)
 | 
			
		||||
	proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/%s/services/%s:api/", getMasterHost(), api.NamespaceSystem, influxdbService)
 | 
			
		||||
	config := &influxdb.ClientConfig{
 | 
			
		||||
		Host: proxyUrl,
 | 
			
		||||
		// TODO(vishh): Infer username and pw from the Pod spec.
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user