Merge pull request #58564 from mikedanese/rm-opencontrail

Automatic merge from submit-queue (batch tested with PRs 55439, 58564, 59028, 59169, 59259). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

cluster: delete lot's of stuff

let me know if any of this is too aggressive. 

see #49213

```release-note
Remove unmaintained kube-registry-proxy support from gce kube-up.
```
This commit is contained in:
Kubernetes Submit Queue
2018-02-02 17:17:36 -08:00
committed by GitHub
40 changed files with 16 additions and 1757 deletions

View File

@@ -369,7 +369,6 @@ function kube::release::package_kube_manifests_tarball() {
local dst_dir="${release_stage}/gci-trusty"
mkdir -p "${dst_dir}"
cp "${src_dir}/kube-registry-proxy.yaml" "${dst_dir}/"
cp "${src_dir}/kube-proxy.manifest" "${dst_dir}/"
cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
cp "${src_dir}/etcd.manifest" "${dst_dir}"

View File

@@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5|float -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,14 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,14 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,274 +0,0 @@
# Private Docker Registry in Kubernetes
Kubernetes offers an optional private Docker registry addon, which you can turn
on when you bring up a cluster or install later. This gives you a place to
store truly private Docker images for your cluster.
## How it works
The private registry runs as a `Pod` in your cluster. It does not currently
support SSL or authentication, which triggers Docker's "insecure registry"
logic. To work around this, we run a proxy on each node in the cluster,
exposing a port onto the node (via a hostPort), which Docker accepts as
"secure", since it is accessed by `localhost`.
## Turning it on
Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
`ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs
whether the registry is run or not. To set this flag, you can specify
`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
does not include this flag, the following steps should work. Note that some of
this is cloud-provider specific, so you may have to customize it a bit.
### Make some storage
The primary job of the registry is to store data. To do that we have to decide
where to store it. For cloud environments that have networked storage, we can
use Kubernetes's `PersistentVolume` abstraction. The following template is
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
other situations:
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
```yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}
```
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
If, for example, you wanted to use NFS you would just need to change the
`gcePersistentDisk` block to `nfs`. See
[here](https://kubernetes.io/docs/user-guide/volumes.md) for more details on volumes.
Note that in any case, the storage (in the case the GCE PersistentDisk) must be
created independently - this is not something Kubernetes manages for you (yet).
### I don't want or don't have persistent storage
If you are running in a place that doesn't have networked storage, or if you
just want to kick the tires on this without committing to it, you can easily
adapt the `ReplicationController` specification below to use a simple
`emptyDir` volume instead of a `persistentVolumeClaim`.
## Claim the storage
Now that the Kubernetes cluster knows that some storage exists, you can put a
claim on that storage. As with the `PersistentVolume` above, you can start
with the `salt` template:
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}
```
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
you created before will be bound to this claim (unless you have other
`PersistentVolumes` in which case those might get bound instead). This claim
gives you the right to use this storage until you release the claim.
## Run the registry
Now we can run a Docker registry:
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc
```
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
## Expose the registry in the cluster
Now that we have a registry `Pod` running, we can expose it as a Service:
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
## Expose the registry on each node
Now that we have a running `Service`, we need to expose it onto each Kubernetes
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
node by creating following daemonset.
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: kube-system
labels:
k8s-app: kube-registry-proxy
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
template:
metadata:
labels:
k8s-app: kube-registry-proxy
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
containers:
- name: kube-registry-proxy
image: gcr.io/google_containers/kube-registry-proxy:0.4
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000
```
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
When modifying replication-controller, service and daemon-set defintions, take
care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set.
Failing to do so will have register the localhost proxy daemon-sets to the
upstream service. As a result they will then try to proxy themselves, which
will, for obvious reasons, not work.
This ensures that port 5000 on each node is directed to the registry `Service`.
You should be able to verify that it is running by hitting port 5000 with a web
browser and getting a 404 error:
```console
$ curl localhost:5000
404 page not found
```
## Using the registry
To use an image hosted by this registry, simply say this in your `Pod`'s
`spec.containers[].image` field:
```yaml
image: localhost:5000/user/container
```
Before you can use the registry, you have to be able to get images into it,
though. If you are building an image on your Kubernetes `Node`, you can spell
out `localhost:5000` when you build and push. More likely, though, you are
building locally and want to push to your cluster.
You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
```console
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')
$ kubectl port-forward --namespace kube-system $POD 5000:5000 &
```
Now you can build and push images on your local computer as
`localhost:5000/yourname/container` and those images will be available inside
your kubernetes cluster with the same name.
# More Extensions
- [Use GCS as storage backend](gcs/README.md)
- [Enable TLS/SSL](tls/README.md)
- [Enable Authentication](auth/README.md)
## Future improvements
* Allow port-forwarding to a Service rather than a pod (#15180)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]()

View File

@@ -1,92 +0,0 @@
# Enable Authentication with Htpasswd for Kube-Registry
Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry.
### Prepare Htpasswd Secret
Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`.
Creating secret to hold htpasswd...
```console
$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-auth-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret
```
<!-- END MUNGE: EXAMPLE registry-auth-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
### To Verify
Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works.
### Configure Nodes to Authenticate with Kube-Registry
By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]()

View File

@@ -1,56 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret

View File

@@ -1,81 +0,0 @@
# Kube-Registry with GCS storage backend
Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend.
A few preparation steps are needed.
1. Create a bucket named kube-registry in GCS.
1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts).
### Pack Keyfile into a Secret
Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`...
```console
$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json
```
### Run Registry
<!-- BEGIN MUNGE: EXAMPLE registry-gcs-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret
```
<!-- END MUNGE: EXAMPLE registry-gcs-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]()

View File

@@ -1,52 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret

View File

@@ -1,26 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM nginx:1.11
RUN apt-get update \
&& apt-get install -y \
curl \
--no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc
COPY rootfs /
CMD ["/bin/boot"]

View File

@@ -1,24 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build push vet test clean
TAG = 0.4
REPO = gcr.io/google_containers/kube-registry-proxy
build:
docker build --pull -t $(REPO):$(TAG) .
push:
gcloud docker -- push $(REPO):$(TAG)

View File

@@ -1,23 +0,0 @@
#!/usr/bin/env bash
# fail if no hostname is provided
REGISTRY_HOST=${REGISTRY_HOST:?no host}
REGISTRY_PORT=${REGISTRY_PORT:-5000}
# we are always listening on port 80
# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25
PORT=80
sed -e "s/%HOST%/$REGISTRY_HOST/g" \
-e "s/%PORT%/$REGISTRY_PORT/g" \
-e "s/%BIND_PORT%/$PORT/g" \
</etc/nginx/conf.d/default.conf.in >/etc/nginx/conf.d/default.conf
# wait for registry to come online
while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do
printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT"
sleep 1
done
printf "starting proxy...\n"
exec nginx -g "daemon off;" "$@"

View File

@@ -1,28 +0,0 @@
# Docker registry proxy for api version 2
upstream docker-registry {
server %HOST%:%PORT%;
}
# No client auth or TLS
# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS
server {
listen %BIND_PORT%;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location / {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
include docker-registry.conf;
}
}

View File

@@ -1,6 +0,0 @@
proxy_pass http://docker-registry;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;

View File

@@ -1,26 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}

View File

@@ -1,17 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}

View File

@@ -1,14 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}

View File

@@ -1,49 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2.5.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP

View File

@@ -1,116 +0,0 @@
# Enable TLS for Kube-Registry
This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite:
- A domain for kube-registry. Assuming it is ` myregistrydomain.com`.
- Domain certificate and key. Assuming they are `domain.crt` and `domain.key`
### Pack domain.crt and domain.key into a Secret
```console
$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret
```
<!-- END MUNGE: EXAMPLE registry-tls-rc.yaml -->
### Expose External IP for Kube-Registry
Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-tls-svc.yaml -->
### To Verify
Now you should be able to access your kube-registry from another docker host.
```console
docker pull busybox
docker tag busybox myregistrydomain.com:5000/busybox
docker push myregistrydomain.com:5000/busybox
docker pull myregistrydomain.com:5000/busybox
```
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]()

View File

@@ -1,57 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP

View File

@@ -159,15 +159,6 @@ function clear-kubeconfig() {
echo "Cleared config for ${CONTEXT} from ${KUBECONFIG}"
}
function tear_down_alive_resources() {
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
"${kubectl}" delete deployments --all || true
"${kubectl}" delete rc --all || true
"${kubectl}" delete pods --all || true
"${kubectl}" delete svc --all || true
"${kubectl}" delete pvc --all || true
}
# Gets username, password for the current-context in kubeconfig, if they exist.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
@@ -253,17 +244,6 @@ function gen-kube-bearertoken() {
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
# Generate uid
# This function only works on systems with python. It generates a time based
# UID instead of a UUID because GCE has a name length limit.
#
# Vars set:
# KUBE_UID
function gen-uid {
KUBE_UID=$(python -c 'import uuid; print(uuid.uuid1().fields[0])')
}
function load-or-gen-kube-basicauth() {
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
get-kubeconfig-basicauth
@@ -293,28 +273,6 @@ function load-or-gen-kube-bearertoken() {
fi
}
# Get the master IP for the current-context in kubeconfig if one exists.
#
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
# KUBE_CONTEXT # if unset, defaults to current-context
#
# Vars set:
# KUBE_MASTER_URL
#
# KUBE_MASTER_URL will be empty if no current-context is set, or the
# current-context user does not exist or contain a server entry.
function detect-master-from-kubeconfig() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local cluster=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.cluster}")
KUBE_MASTER_URL=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}")
}
# Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6",
# "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form
# <path>/<version> (e.g. "release/stable",' "ci/latest-1").
@@ -449,50 +407,6 @@ function find-release-version() {
fi
}
function stage-images() {
find-release-version
find-release-tars
KUBE_IMAGE_TAG="$(echo """${KUBE_GIT_VERSION}""" | sed 's/+/-/g')"
local docker_wrapped_binaries=(
"kube-apiserver"
"kube-controller-manager"
"kube-scheduler"
"kube-proxy"
)
local docker_cmd=("docker")
if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/"* ]]; then
local docker_push_cmd=("gcloud" "docker")
else
local docker_push_cmd=("${docker_cmd[@]}")
fi
local temp_dir="$(mktemp -d -t 'kube-server-XXXX')"
tar xzfv "${SERVER_BINARY_TAR}" -C "${temp_dir}" &> /dev/null
for binary in "${docker_wrapped_binaries[@]}"; do
local docker_tag="$(cat ${temp_dir}/kubernetes/server/bin/${binary}.docker_tag)"
(
"${docker_cmd[@]}" load -i "${temp_dir}/kubernetes/server/bin/${binary}.tar"
"${docker_cmd[@]}" rmi "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" 2>/dev/null || true
"${docker_cmd[@]}" tag "gcr.io/google_containers/${binary}:${docker_tag}" "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}"
"${docker_push_cmd[@]}" push "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}"
) &> "${temp_dir}/${binary}-push.log" &
done
kube::util::wait-for-jobs || {
echo "!!! unable to push images. See ${temp_dir}/*.log for more info." 1>&2
return 1
}
rm -rf "${temp_dir}"
return 0
}
# Quote something appropriate for a yaml string.
#
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
@@ -569,7 +483,6 @@ function build-kube-env {
fi
build-runtime-config
gen-uid
rm -f ${file}
cat >$file <<EOF
@@ -605,13 +518,9 @@ LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-false})
ENABLE_CLUSTER_REGISTRY: $(yaml-quote ${ENABLE_CLUSTER_REGISTRY:-false})
CLUSTER_REGISTRY_DISK: $(yaml-quote ${CLUSTER_REGISTRY_DISK:-})
CLUSTER_REGISTRY_DISK_SIZE: $(yaml-quote ${CLUSTER_REGISTRY_DISK_SIZE:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
@@ -626,17 +535,11 @@ NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
SOFTLOCKUP_PANIC: $(yaml-quote ${SOFTLOCKUP_PANIC:-})
OPENCONTRAIL_TAG: $(yaml-quote ${OPENCONTRAIL_TAG:-})
OPENCONTRAIL_KUBERNETES_TAG: $(yaml-quote ${OPENCONTRAIL_KUBERNETES_TAG:-})
OPENCONTRAIL_PUBLIC_SUBNET: $(yaml-quote ${OPENCONTRAIL_PUBLIC_SUBNET:-})
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
KUBE_IMAGE_TAG: $(yaml-quote ${KUBE_IMAGE_TAG:-})
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
KUBE_UID: $(yaml-quote ${KUBE_UID:-})
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
@@ -838,11 +741,6 @@ EOF
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
APISERVER_TEST_ARGS: $(yaml-quote ${APISERVER_TEST_ARGS})
EOF
fi
if [ -n "${APISERVER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
APISERVER_TEST_LOG_LEVEL: $(yaml-quote ${APISERVER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
@@ -934,12 +832,6 @@ EOF
ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
EOF
fi
if [ -n "${DNS_ZONE_NAME:-}" ]; then
cat >>$file <<EOF
DNS_ZONE_NAME: $(yaml-quote ${DNS_ZONE_NAME})
EOF
fi
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
@@ -1325,7 +1217,6 @@ function get-env-val() {
function parse-master-env() {
# Get required master env vars
local master_env=$(get-master-env)
KUBELET_TOKEN=$(get-env-val "${master_env}" "KUBELET_TOKEN")
KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT")

View File

@@ -225,12 +225,6 @@ DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_PD:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
@@ -314,10 +308,7 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
@@ -341,10 +332,6 @@ ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.

View File

@@ -249,12 +249,6 @@ DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
@@ -346,11 +340,7 @@ STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
@@ -381,10 +371,6 @@ ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Upgrade test jobs that go from a version < 1.6 to a version >= 1.6 should override this to be true.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-true}" # true, false
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false

View File

@@ -1287,8 +1287,6 @@ function prepare-log-file {
function prepare-kube-proxy-manifest-variables {
local -r src_file=$1;
remove-salt-config-comments "${src_file}"
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
@@ -1374,7 +1372,6 @@ function prepare-etcd-manifest {
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
remove-salt-config-comments "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
@@ -1491,17 +1488,6 @@ function prepare-mounter-rootfs {
cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
}
# A helper function for removing salt configuration and comments from a file.
# This is mainly for preparing a manifest file.
#
# $1: Full path of the file to manipulate
function remove-salt-config-comments {
# Remove salt configuration.
sed -i "/^[ |\t]*{[#|%]/d" $1
# Remove comments.
sed -i "/^[ |\t]*#/d" $1
}
# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
@@ -1713,7 +1699,6 @@ function start-kube-apiserver {
# Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
remove-salt-config-comments "${abac_policy_json}"
if [[ -n "${KUBE_USER:-}" ]]; then
sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
else
@@ -1758,7 +1743,6 @@ function start-kube-apiserver {
fi
src_file="${src_dir}/kube-apiserver.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@@ -1868,7 +1852,6 @@ function start-kube-controller-manager {
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
@@ -1916,7 +1899,6 @@ function start-kube-scheduler {
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
remove-salt-config-comments "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@@ -1937,7 +1919,6 @@ function start-cluster-autoscaler {
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@@ -2146,7 +2127,6 @@ EOF
else
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
@@ -2193,18 +2173,6 @@ EOF
setup-kube-dns-manifest
fi
fi
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
setup-addon-manifests "addons" "registry"
local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}"
mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}"
# Replace the salt configurations with variable values.
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
@@ -2262,12 +2230,6 @@ function start-image-puller {
/etc/kubernetes/manifests/
}
# Starts kube-registry proxy
function start-kube-registry-proxy {
echo "Start kube-registry-proxy"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}
# Starts a l7 loadbalancing controller for ingress.
function start-lb-controller {
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
@@ -2451,10 +2413,6 @@ else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
# Kube-registry-proxy.
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
start-kube-registry-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi

View File

@@ -1,8 +1,7 @@
{% set kube_user = grains.kube_user -%}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"{{kube_user}}", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kube_proxy", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubecfg", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"client", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}

View File

@@ -1,14 +1,3 @@
{% if pillar.get('enable_cluster_autoscaler', '').lower() == 'true' %}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
{% set cloud_config_volume = "" -%}
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config + " " + pillar.get('autoscaler_expander_config', '') -%}
{
"kind": "Pod",
"apiVersion": "v1",
@@ -103,4 +92,3 @@
"restartPolicy": "Always"
}
}
{% endif %}

View File

@@ -1,30 +1,3 @@
{% set etcd_protocol = 'http' -%}
{% set etcd_creds = '' -%}
{% if pillar.get('etcd_over_ssl', '').lower() == 'true' -%}
{% set etcd_protocol = 'https' -%}
{% set etcd_creds = '--peer-trusted-ca-file /srv/kubernetes/etcd-ca.crt --peer-cert-file /srv/kubernetes/etcd-peer.crt --peer-key-file /srv/kubernetes/etcd-peer.key -peer-client-cert-auth' -%}
{% endif -%}
{% set hostname = pillar.get('hostname', '') -%}
{% set cluster_state = (pillar.get('initial_etcd_cluster_state') or 'new') -%}
{% set etcd_cluster_array = (pillar.get('initial_etcd_cluster') or hostname).split(',') -%}
{% set etcd_cluster = '' -%}
{# We use vars dictionary to pass variables set inside the for loop, because jinja defines new variables inside the for loop that hide variables from the outside. #}
{% set vars = {'etcd_cluster': ''} -%}
{% for host in etcd_cluster_array -%}
{% if etcd_cluster != '' -%}
{% set etcd_cluster = etcd_cluster ~ ',' -%}
{% endif -%}
{% set etcd_cluster = etcd_cluster ~ 'etcd-' ~ host ~ '=' ~ etcd_protocol ~'://' ~ host ~ ':' ~ server_port -%}
{% do vars.update({'etcd_cluster': etcd_cluster}) -%}
{% endfor -%}
{% set etcd_cluster = vars.etcd_cluster -%}
{% set quota_bytes = '' -%}
{% if pillar.get('storage_backend', 'etcd3') == 'etcd3' -%}
{% set quota_bytes = '--quota-backend-bytes=4294967296' -%}
{% endif -%}
{% set liveness_probe_initial_delay = pillar.get('etcd_liveness_probe_initial_delay', 15) -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{
"apiVersion": "v1",
"kind": "Pod",

View File

@@ -1,213 +1,3 @@
{% set daemon_args = "$DAEMON_ARGS" -%}
{% if grains['os_family'] == 'RedHat' -%}
{% set daemon_args = "" -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
{% set cloud_config_volume = "" -%}
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
{% if grains.cloud is defined -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% endif -%}
{% set advertise_address = "" -%}
{% if grains.advertise_address is defined -%}
{% set advertise_address = "--advertise-address=" + grains.advertise_address -%}
{% endif -%}
{% set proxy_ssh_options = "" -%}
{% if grains.proxy_ssh_user is defined -%}
{% set proxy_ssh_options = "--ssh-user=" + grains.proxy_ssh_user + " --ssh-keyfile=/srv/sshproxy/.sshkeyfile" -%}
{# Append 40 characters onto command to work around #9822. #}
{# If mount list changes, this may also need to change. #}
{% set proxy_ssh_options = proxy_ssh_options + " " -%}
{% endif -%}
{% set address = "--address=127.0.0.1" -%}
{% set bind_address = "" -%}
{% if grains.publicAddressOverride is defined -%}
{% set bind_address = "--bind-address=" + grains.publicAddressOverride -%}
{% endif -%}
{% set storage_backend = "" -%}
{% if pillar['storage_backend'] is defined -%}
{% set storage_backend = "--storage-backend=" + pillar['storage_backend'] -%}
{% endif -%}
{% set etcd_servers = "--etcd-servers=http://127.0.0.1:2379" -%}
{% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%}
{% set storage_media_type = "" -%}
{% if pillar['storage_media_type'] is defined -%}
{% set storage_media_type = "--storage-media-type=" + pillar['storage_media_type'] -%}
{% endif -%}
{% set liveness_probe_initial_delay = pillar.get('kube_apiserver_liveness_probe_initial_delay', 15) -%}
{% set request_timeout = "" -%}
{% if pillar['kube_apiserver_request_timeout_sec'] is defined -%}
{% set request_timeout = "--request-timeout=" + pillar['kube_apiserver_request_timeout_sec'] + "s" -%}
{% endif -%}
{% set max_requests_inflight = "" -%}
{% set target_ram_mb = "" -%}
{% if pillar['num_nodes'] is defined -%}
# If the cluster is large, increase max-requests-inflight limit in apiserver.
{% if pillar['num_nodes']|int >= 1000 -%}
{% set max_requests_inflight = "--max-requests-inflight=1500 --max-mutating-requests-inflight=500" -%}
{% endif -%}
# Set amount of memory available for apiserver based on number of nodes.
# TODO: Once we start setting proper requests and limits for apiserver
# we should reuse the same logic here instead of current heuristic.
{% set tmp_ram_mb = pillar['num_nodes']|int * 60 %}
{% set target_ram_mb = "--target-ram-mb=" + tmp_ram_mb|string -%}
{% endif -%}
{% set service_cluster_ip_range = "" -%}
{% if pillar['service_cluster_ip_range'] is defined -%}
{% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
{% set cert_file = "--tls-cert-file=/srv/kubernetes/server.cert" -%}
{% set key_file = "--tls-private-key-file=/srv/kubernetes/server.key" -%}
{% set kubelet_cert_file = "--kubelet-client-certificate=/srv/kubernetes/kubeapiserver.cert" -%}
{% set kubelet_key_file = "--kubelet-client-key=/srv/kubernetes/kubeapiserver.key" -%}
{% set client_ca_file = "" -%}
{% set secure_port = "6443" -%}
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
{% set secure_port = "443" -%}
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
{% set min_request_timeout = "" -%}
{% if grains.minRequestTimeout is defined -%}
{% set min_request_timeout = "--min-request-timeout=" + grains.minRequestTimeout -%}
{% endif -%}
{% set token_auth_file = " --token-auth-file=/dev/null" -%}
{% set basic_auth_file = "" -%}
{% set authz_mode = "" -%}
{% set abac_policy_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
{% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
{% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
{% set authz_mode = " --authorization-mode=ABAC" -%}
{% set abac_policy_file = " --authorization-policy-file=/srv/kubernetes/abac-authz-policy.jsonl" -%}
{% endif -%}
{% set webhook_authentication_config = "" -%}
{% set webhook_authn_config_mount = "" -%}
{% set webhook_authn_config_volume = "" -%}
{% if grains.webhook_authentication_config is defined -%}
{% set webhook_authentication_config = " --authentication-token-webhook-config-file=" + grains.webhook_authentication_config -%}
{% set webhook_authn_config_mount = "{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"" + grains.webhook_authentication_config + "\", \"readOnly\": false}," -%}
{% set webhook_authn_config_volume = "{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"" + grains.webhook_authentication_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% set webhook_authorization_config = "" -%}
{% set webhook_config_mount = "" -%}
{% set webhook_config_volume = "" -%}
{% if grains.webhook_authorization_config is defined -%}
{% set webhook_authorization_config = " --authorization-webhook-config-file=" + grains.webhook_authorization_config -%}
{% set webhook_config_mount = "{\"name\": \"webhookconfigmount\",\"mountPath\": \"" + grains.webhook_authorization_config + "\", \"readOnly\": false}," -%}
{% set webhook_config_volume = "{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"" + grains.webhook_authorization_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% set authz_mode = authz_mode + ",Webhook" -%}
{% endif -%}
{% set image_review_config = "" -%}
{% set admission_controller_config_mount = "" -%}
{% set admission_controller_config_volume = "" -%}
{% set image_policy_webhook_config_mount = "" -%}
{% set image_policy_webhook_config_volume = "" -%}
{% if grains.image_review_config is defined -%}
{% set image_review_config = " --admission-control-config-file=" + grains.image_review_config -%}
{% set admission_controller_config_mount = "{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"" + grains.image_review_config + "\", \"readOnly\": false}," -%}
{% set admission_controller_config_volume = "{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"" + grains.image_review_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% set image_policy_webhook_config_mount = "{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false}," -%}
{% set image_policy_webhook_config_volume = "{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% set admission_control = "" -%}
{% if pillar['admission_control'] is defined -%}
{% set admission_control = "--admission-control=" + pillar['admission_control'] -%}
{% endif -%}
{% set runtime_config = "" -%}
{% if grains.runtime_config is defined -%}
{% set runtime_config = "--runtime-config=" + grains.runtime_config -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['api_server_test_log_level'] is defined -%}
{% set log_level = pillar['api_server_test_log_level'] -%}
{% endif -%}
{% set enable_garbage_collector = "" -%}
{% if pillar['enable_garbage_collector'] is defined -%}
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
{% endif -%}
{% set etcd_compaction_interval = "" %}
{% if pillar['etcd_compaction_interval_sec'] is defined -%}
{% set etcd_compaction_interval = "--etcd-compaction-interval=" + pillar['etcd_compaction_interval_sec'] + "s" -%}
{% endif -%}
{% set etcd_quorum_read = "" %}
{% if pillar['etcd_quorum_read'] is defined -%}
{% set etcd_quorum_read = "--etcd_quorum_read=" + pillar['etcd_quorum_read'] -%}
{% endif -%}
{% set audit_log = "" -%}
{% set audit_policy_config_mount = "" -%}
{% set audit_policy_config_volume = "" -%}
{% set audit_webhook_config_mount = "" -%}
{% set audit_webhook_config_volume = "" -%}
{% if pillar['enable_apiserver_basic_audit'] is defined and pillar['enable_apiserver_basic_audit'] in ['true'] -%}
{% set audit_log = "--audit-log-path=/var/log/kube-apiserver-audit.log --audit-log-maxage=0 --audit-log-maxbackup=0 --audit-log-maxsize=2000000000" -%}
{% elif pillar['enable_apiserver_advanced_audit'] is defined and pillar['enable_apiserver_advanced_audit'] in ['true'] -%}
{% set audit_log = "--audit-policy-file=/etc/audit_policy.config" -%}
{% set audit_policy_config_mount = "{\"name\": \"auditpolicyconfigmount\",\"mountPath\": \"/etc/audit_policy.config\", \"readOnly\": true}," -%}
{% set audit_policy_config_volume = "{\"name\": \"auditpolicyconfigmount\",\"hostPath\": {\"path\": \"/etc/audit_policy.config\", \"type\": \"FileOrCreate\"}}," -%}
{% if pillar['advanced_audit_backend'] is defined and 'log' in pillar['advanced_audit_backend'] -%}
{% set audit_log = audit_log + " --audit-log-path=/var/log/kube-apiserver-audit.log --audit-log-maxage=0 --audit-log-maxbackup=0 --audit-log-maxsize=2000000000" -%}
{% endif %}
{% if pillar['advanced_audit_backend'] is defined and 'webhook' in pillar['advanced_audit_backend'] -%}
{% set audit_log = audit_log + " --audit-webhook-mode=batch" -%}
{% set audit_webhook_config_mount = "{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"/etc/audit_webhook.config\", \"readOnly\": true}," -%}
{% set audit_webhook_config_volume = "{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/audit_webhook.config\", \"type\": \"FileOrCreate\"}}," -%}
{% endif %}
{% endif -%}
{% set params = address + " " + storage_backend + " " + storage_media_type + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector + " " + etcd_quorum_read + " " + etcd_compaction_interval + " " + audit_log + " " + request_timeout -%}
{% set params = params + " " + cert_file + " " + key_file + " " + kubelet_cert_file + " " + kubelet_key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config + image_review_config -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['apiserver_test_args'] is defined -%}
{% set params = params + " " + pillar['apiserver_test_args'] -%}
{% endif -%}
{% set container_env = "" -%}
{
"apiVersion": "v1",
"kind": "Pod",

View File

@@ -1,86 +1,3 @@
{% set cluster_name = "" -%}
{% set cluster_cidr = "" -%}
{% set allocate_node_cidrs = "" -%}
{% set service_cluster_ip_range = "" %}
{% set terminated_pod_gc = "" -%}
{% if pillar['instance_prefix'] is defined -%}
{% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%}
{% endif -%}
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
{% endif -%}
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=true" -%}
{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
{% set terminated_pod_gc = "--terminated-pod-gc-threshold=" + pillar['terminated_pod_gc_threshold'] -%}
{% endif -%}
{% set enable_garbage_collector = "" -%}
{% if pillar['enable_garbage_collector'] is defined -%}
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
{% set cloud_config_volume = "" -%}
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
{% set pv_recycler_mount = "" -%}
{% set pv_recycler_volume = "" -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% flex_vol_plugin_dir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec" -%}
{% if grains.cloud is defined -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% endif -%}
{% set root_ca_file = "" -%}
{% if grains.cloud is defined and grains.cloud == 'gce' %}
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['controller_manager_test_log_level'] is defined -%}
{% set log_level = pillar['controller_manager_test_log_level'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
{% set params = params + " " + feature_gates -%}
{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
{% set params = params + " --enable-hostpath-provisioner" %}
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['controller_manager_test_args'] is defined -%}
{% set params = params + " " + pillar['controller_manager_test_args'] -%}
{% endif -%}
{% set container_env = "" -%}
{
"apiVersion": "v1",
"kind": "Pod",

View File

@@ -1,52 +1,3 @@
# Please keep kube-proxy configuration in-sync with:
# cluster/addons/kube-proxy/kube-proxy-ds.yaml
{% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%}
{% if grains.api_servers is defined -%}
{% set api_servers = "--master=https://" + grains.api_servers -%}
{% else -%}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
{% endif -%}
{% set test_args = "" -%}
{% if pillar['kubeproxy_test_args'] is defined -%}
{% set test_args=pillar['kubeproxy_test_args'] %}
{% endif -%}
{% set cluster_cidr = "" -%}
{% if pillar['cluster_cidr'] is defined -%}
{% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['kubeproxy_test_log_level'] is defined -%}
{% set log_level = pillar['kubeproxy_test_log_level'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set throttles = "--iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s" -%}
{% set pod_priority = "" -%}
{% if pillar.get('enable_pod_priority', '').lower() == 'true' -%}
{% set pod_priority = "priorityClassName: system-node-critical" -%}
{% endif -%}
# test_args should always go last to overwrite prior configuration
{% set params = log_level + " " + throttles + " " + feature_gates + " " + test_args -%}
{% set container_env = "" -%}
{% set kube_cache_mutation_detector_env_name = "" -%}
{% set kube_cache_mutation_detector_env_value = "" -%}
# kube-proxy podspec
apiVersion: v1
kind: Pod
metadata:

View File

@@ -1,35 +0,0 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: kube-system
labels:
k8s-app: kube-registry
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
template:
metadata:
labels:
k8s-app: kube-registry
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
containers:
- name: kube-registry-proxy
image: gcr.io/google_containers/kube-registry-proxy:0.4
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000

View File

@@ -1,28 +1,3 @@
{% set params = "--master=127.0.0.1:8080" -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['scheduler_test_log_level'] is defined -%}
{% set log_level = pillar['scheduler_test_log_level'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set scheduling_algorithm_provider = "" -%}
{% if grains.scheduling_algorithm_provider is defined -%}
{% set scheduling_algorithm_provider = "--algorithm-provider=" + grains.scheduling_algorithm_provider -%}
{% endif -%}
{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['scheduler_test_args'] is defined -%}
{% set params = params + " " + pillar['scheduler_test_args'] -%}
{% endif -%}
{
"apiVersion": "v1",
"kind": "Pod",

View File

@@ -190,7 +190,6 @@ function get-node-os() {
# ZONE
#
# Vars set:
# KUBELET_TOKEN
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
@@ -230,7 +229,6 @@ function setup-base-image() {
# Vars set:
# SANITIZED_VERSION
# INSTANCE_GROUPS
# KUBELET_TOKEN
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
@@ -253,7 +251,6 @@ function prepare-node-upgrade() {
# Get required node env vars from exiting template.
local node_env=$(get-node-env)
KUBELET_TOKEN=$(get-env-val "${node_env}" "KUBELET_TOKEN")
KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT")

View File

@@ -53,9 +53,6 @@ if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
# If the master image is not set, we use the latest GCI image.
# Otherwise, we respect whatever is set by the user.
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${GCI_VERSION}}
elif [[ "${MASTER_OS_DISTRIBUTION}" == "debian" ]]; then
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${CVM_VERSION}}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
fi
# Sets node image based on the specified os distro. Currently this function only
@@ -71,9 +68,6 @@ function set-node-image() {
# Otherwise, we respect whatever is set by the user.
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-${DEFAULT_GCI_PROJECT}}
elif [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${CVM_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-google-containers}
fi
}
@@ -677,61 +671,6 @@ function create-node-template() {
done
}
# Robustly try to add metadata on an instance.
# $1: The name of the instance.
# $2...$n: The metadata key=value pairs to add.
function add-instance-metadata() {
local -r instance=$1
shift 1
local -r kvs=( "$@" )
detect-project
local attempt=0
while true; do
if ! gcloud compute instances add-metadata "${instance}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--metadata "${kvs[@]}"; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" >&2
exit 2
fi
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}" >&2
attempt=$(($attempt+1))
sleep $((5 * $attempt))
else
break
fi
done
}
# Robustly try to add metadata on an instance, from a file.
# $1: The name of the instance.
# $2...$n: The metadata key=file pairs to add.
function add-instance-metadata-from-file() {
local -r instance=$1
shift 1
local -r kvs=( "$@" )
detect-project
local attempt=0
while true; do
echo "${kvs[@]}"
if ! gcloud compute instances add-metadata "${instance}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--metadata-from-file "$(join_csv ${kvs[@]})"; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" >&2
exit 2
fi
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}" >&2
attempt=$(($attempt+1))
sleep $(($attempt * 5))
else
break
fi
done
}
# Instantiate a kubernetes cluster
#
# Assumed vars
@@ -758,8 +697,8 @@ function kube-up() {
detect-subnetworks
create-nodes
elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then
echo "Master replication supported only for gci, debian, and ubuntu"
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then
echo "Master replication supported only for gci and ubuntu"
return 1
fi
create-loadbalancer
@@ -1075,15 +1014,6 @@ function create-master() {
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
# Create disk for cluster registry if enabled
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
gcloud compute disks create "${CLUSTER_REGISTRY_DISK}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--type "${CLUSTER_REGISTRY_DISK_TYPE_GCE}" \
--size "${CLUSTER_REGISTRY_DISK_SIZE}" &
fi
# Create rule for accessing and securing etcd servers.
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${MASTER_NAME}-etcd" &>/dev/null; then
gcloud compute firewall-rules create "${MASTER_NAME}-etcd" \
@@ -1098,7 +1028,6 @@ function create-master() {
# from the other cluster variables so that the client (this
# computer) can forget it later. This should disappear with
# http://issue.k8s.io/3168
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
@@ -1683,17 +1612,6 @@ function kube-down() {
"${replica_pd}"
fi
# Delete disk for cluster registry if enabled
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
if gcloud compute disks describe "${CLUSTER_REGISTRY_DISK}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute disks delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${CLUSTER_REGISTRY_DISK}"
fi
fi
# Check if this are any remaining master replicas.
local REMAINING_MASTER_COUNT=$(gcloud compute instances list \
--project "${PROJECT}" \
@@ -1942,11 +1860,6 @@ function check-resources() {
return 1
fi
if gcloud compute disks describe --project "${PROJECT}" "${CLUSTER_REGISTRY_DISK}" --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Persistent disk ${CLUSTER_REGISTRY_DISK}"
return 1
fi
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \

View File

@@ -64,11 +64,6 @@ verify-prereqs
echo "... calling verify-kube-binaries" >&2
verify-kube-binaries
if [[ "${KUBE_STAGE_IMAGES:-}" == "true" ]]; then
echo "... staging images" >&2
stage-images
fi
echo "... calling kube-up" >&2
kube-up

View File

@@ -45,6 +45,17 @@ export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
function detect-master-from-kubeconfig() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local cluster=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.cluster}")
KUBE_MASTER_URL=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}")
}
# ---- Do cloud-provider-specific setup
if [[ -n "${KUBERNETES_CONFORMANCE_TEST:-}" ]]; then
echo "Conformance test: not doing test setup."