diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 77bf2273d37..41864f57ca7 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -16760,7 +16760,7 @@ }, "v1.Node": { "id": "v1.Node", - "description": "Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).", + "description": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", "properties": { "kind": { "type": "string", diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index d812f00495c..add7e5530ac 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -481,7 +481,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

v1.Node

-

Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).

+

Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).

diff --git a/docs/api.md b/docs/api.md index 4d3aa674e0d..040f2dba2cc 100644 --- a/docs/api.md +++ b/docs/api.md @@ -176,7 +176,7 @@ Some important differences between v1beta1/2 and v1beta3: * The resource `id` is now called `name`. * `name`, `labels`, `annotations`, and other metadata are now nested in a map called `metadata` * `desiredState` is now called `spec`, and `currentState` is now called `status` -* `/minions` has been moved to `/nodes`, and the resource has kind `Node` +* `/nodes` has been moved to `/nodes`, and the resource has kind `Node` * The namespace is required (for all namespaced resources) and has moved from a URL parameter to the path: `/api/v1beta3/namespaces/{namespace}/{resource_collection}/{resource_name}`. If you were not using a namespace before, use `default` here. * The names of all resource collections are now lower cased - instead of `replicationControllers`, use `replicationcontrollers`. * To watch for changes to a resource, open an HTTP or Websocket connection to the collection query and provide the `?watch=true` query parameter along with the desired `resourceVersion` parameter to watch from. diff --git a/docs/design/aws_under_the_hood.md b/docs/design/aws_under_the_hood.md index 9702a4faf5c..77b18d75483 100644 --- a/docs/design/aws_under_the_hood.md +++ b/docs/design/aws_under_the_hood.md @@ -64,7 +64,7 @@ you manually created or configured your cluster. ### Architecture overview Kubernetes is a cluster of several machines that consists of a Kubernetes -master and a set number of nodes (previously known as 'minions') for which the +master and a set number of nodes (previously known as 'nodes') for which the master which is responsible. See the [Architecture](architecture.md) topic for more details. @@ -161,7 +161,7 @@ Note that we do not automatically open NodePort services in the AWS firewall NodePort services are more of a building block for things like inter-cluster services or for LoadBalancer. To consume a NodePort service externally, you will likely have to open the port in the node security group -(`kubernetes-minion-`). +(`kubernetes-node-`). For SSL support, starting with 1.3 two annotations can be added to a service: @@ -194,7 +194,7 @@ modifying the headers. kube-proxy sets up two IAM roles, one for the master called [kubernetes-master](../../cluster/aws/templates/iam/kubernetes-master-policy.json) and one for the nodes called -[kubernetes-minion](../../cluster/aws/templates/iam/kubernetes-minion-policy.json). +[kubernetes-node](../../cluster/aws/templates/iam/kubernetes-minion-policy.json). The master is responsible for creating ELBs and configuring them, as well as setting up advanced VPC routing. Currently it has blanket permissions on EC2, @@ -242,7 +242,7 @@ HTTP URLs are passed to instances; this is how Kubernetes code gets onto the machines. * Creates two IAM profiles based on templates in [cluster/aws/templates/iam](../../cluster/aws/templates/iam/): * `kubernetes-master` is used by the master. - * `kubernetes-minion` is used by nodes. + * `kubernetes-node` is used by nodes. * Creates an AWS SSH key named `kubernetes-`. Fingerprint here is the OpenSSH key fingerprint, so that multiple users can run the script with different keys and their keys will not collide (with near-certainty). It will @@ -265,7 +265,7 @@ The debate is open here, where cluster-per-AZ is discussed as more robust but cross-AZ-clusters are more convenient. * Associates the subnet to the route table * Creates security groups for the master (`kubernetes-master-`) -and the nodes (`kubernetes-minion-`). +and the nodes (`kubernetes-node-`). * Configures security groups so that masters and nodes can communicate. This includes intercommunication between masters and nodes, opening SSH publicly for both masters and nodes, and opening port 443 on the master for the HTTPS @@ -281,8 +281,8 @@ information that must be passed in this way. routing rule for the internal network range (`MASTER_IP_RANGE`, defaults to 10.246.0.0/24). * For auto-scaling, on each nodes it creates a launch configuration and group. -The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-minion-group. The default -name is kubernetes-minion-group. The auto-scaling group has a min and max size +The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-node-group. The default +name is kubernetes-node-group. The auto-scaling group has a min and max size that are both set to NUM_NODES. You can change the size of the auto-scaling group to add or remove the total number of nodes from within the AWS API or Console. Each nodes self-configures, meaning that they come up; run Salt with diff --git a/docs/design/event_compression.md b/docs/design/event_compression.md index 738c3a1cc6a..bbac945ac1b 100644 --- a/docs/design/event_compression.md +++ b/docs/design/event_compression.md @@ -170,10 +170,10 @@ Sample kubectl output: ```console FIRSTSEEN LASTSEEN COUNT NAME KIND SUBOBJECT REASON SOURCE MESSAGE -Thu, 12 Feb 2015 01:13:02 +0000 Thu, 12 Feb 2015 01:13:02 +0000 1 kubernetes-node-4.c.saad-dev-vms.internal Minion starting {kubelet kubernetes-node-4.c.saad-dev-vms.internal} Starting kubelet. -Thu, 12 Feb 2015 01:13:09 +0000 Thu, 12 Feb 2015 01:13:09 +0000 1 kubernetes-node-1.c.saad-dev-vms.internal Minion starting {kubelet kubernetes-node-1.c.saad-dev-vms.internal} Starting kubelet. -Thu, 12 Feb 2015 01:13:09 +0000 Thu, 12 Feb 2015 01:13:09 +0000 1 kubernetes-node-3.c.saad-dev-vms.internal Minion starting {kubelet kubernetes-node-3.c.saad-dev-vms.internal} Starting kubelet. -Thu, 12 Feb 2015 01:13:09 +0000 Thu, 12 Feb 2015 01:13:09 +0000 1 kubernetes-node-2.c.saad-dev-vms.internal Minion starting {kubelet kubernetes-node-2.c.saad-dev-vms.internal} Starting kubelet. +Thu, 12 Feb 2015 01:13:02 +0000 Thu, 12 Feb 2015 01:13:02 +0000 1 kubernetes-node-4.c.saad-dev-vms.internal Node starting {kubelet kubernetes-node-4.c.saad-dev-vms.internal} Starting kubelet. +Thu, 12 Feb 2015 01:13:09 +0000 Thu, 12 Feb 2015 01:13:09 +0000 1 kubernetes-node-1.c.saad-dev-vms.internal Node starting {kubelet kubernetes-node-1.c.saad-dev-vms.internal} Starting kubelet. +Thu, 12 Feb 2015 01:13:09 +0000 Thu, 12 Feb 2015 01:13:09 +0000 1 kubernetes-node-3.c.saad-dev-vms.internal Node starting {kubelet kubernetes-node-3.c.saad-dev-vms.internal} Starting kubelet. +Thu, 12 Feb 2015 01:13:09 +0000 Thu, 12 Feb 2015 01:13:09 +0000 1 kubernetes-node-2.c.saad-dev-vms.internal Node starting {kubelet kubernetes-node-2.c.saad-dev-vms.internal} Starting kubelet. Thu, 12 Feb 2015 01:13:05 +0000 Thu, 12 Feb 2015 01:13:12 +0000 4 monitoring-influx-grafana-controller-0133o Pod failedScheduling {scheduler } Error scheduling: no nodes available to schedule pods Thu, 12 Feb 2015 01:13:05 +0000 Thu, 12 Feb 2015 01:13:12 +0000 4 elasticsearch-logging-controller-fplln Pod failedScheduling {scheduler } Error scheduling: no nodes available to schedule pods Thu, 12 Feb 2015 01:13:05 +0000 Thu, 12 Feb 2015 01:13:12 +0000 4 kibana-logging-controller-gziey Pod failedScheduling {scheduler } Error scheduling: no nodes available to schedule pods diff --git a/docs/devel/api-conventions.md b/docs/devel/api-conventions.md index 7fc2bdfc15a..2742a9f05b7 100644 --- a/docs/devel/api-conventions.md +++ b/docs/devel/api-conventions.md @@ -1182,7 +1182,7 @@ than capitalization of the initial letter, the two should almost always match. No underscores nor dashes in either. * Field and resource names should be declarative, not imperative (DoSomething, SomethingDoer, DoneBy, DoneAt). -* `Minion` has been deprecated in favor of `Node`. Use `Node` where referring to +* Use `Node` where referring to the node resource in the context of the cluster. Use `Host` where referring to properties of the individual physical/virtual system, such as `hostname`, `hostPath`, `hostNetwork`, etc. diff --git a/docs/devel/developer-guides/vagrant.md b/docs/devel/developer-guides/vagrant.md index fe5bc6ead31..53dd06816d8 100755 --- a/docs/devel/developer-guides/vagrant.md +++ b/docs/devel/developer-guides/vagrant.md @@ -371,8 +371,8 @@ provisioned. #### I have Vagrant up but the nodes won't validate! -Log on to one of the nodes (`vagrant ssh node-1`) and inspect the salt minion -log (`sudo cat /var/log/salt/minion`). +Log on to one of the nodes (`vagrant ssh node-1`) and inspect the salt node +log (`sudo cat /var/log/salt/node`). #### I want to change the number of nodes! diff --git a/examples/guestbook-go/README.md b/examples/guestbook-go/README.md index db76ae27432..5f3d2093de3 100644 --- a/examples/guestbook-go/README.md +++ b/examples/guestbook-go/README.md @@ -92,9 +92,9 @@ Use the `examples/guestbook-go/redis-master-controller.json` file to create a [r 4. To verify what containers are running in the redis-master pod, you can SSH to that machine with `gcloud compute ssh --zone` *`zone_name`* *`host_name`* and then run `docker ps`: ```console - me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-bz1p + me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-bz1p - me@kubernetes-minion-3:~$ sudo docker ps + me@kubernetes-node-3:~$ sudo docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS d5c458dabe50 redis "/entrypoint.sh redis" 5 minutes ago Up 5 minutes ``` diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index ae2a1fbe877..a37b8bf006c 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -322,7 +322,7 @@ You can get information about a pod, including the machine that it is running on ```console $ kubectl describe pods redis-master-2353460263-1ecey Name: redis-master-2353460263-1ecey -Node: kubernetes-minion-m0k7/10.240.0.5 +Node: kubernetes-node-m0k7/10.240.0.5 ... Labels: app=redis,pod-template-hash=2353460263,role=master,tier=backend Status: Running @@ -337,7 +337,7 @@ Containers: ... ``` -The `Node` is the name and IP of the machine, e.g. `kubernetes-minion-m0k7` in the example above. You can find more details about this node with `kubectl describe nodes kubernetes-minion-m0k7`. +The `Node` is the name and IP of the machine, e.g. `kubernetes-node-m0k7` in the example above. You can find more details about this node with `kubectl describe nodes kubernetes-node-m0k7`. If you want to view the container logs for a given pod, you can run: @@ -356,7 +356,7 @@ me@workstation$ gcloud compute ssh Then, you can look at the Docker containers on the remote machine. You should see something like this (the specifics of the IDs will be different): ```console -me@kubernetes-minion-krxw:~$ sudo docker ps +me@kubernetes-node-krxw:~$ sudo docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ... 0ffef9649265 redis:latest "/entrypoint.sh redi" About a minute ago Up About a minute k8s_master.869d22f3_redis-master-dz33o_default_1449a58a-5ead-11e5-a104-688f84ef8ef6_d74cb2b5 @@ -718,10 +718,10 @@ NAME REGION IP_ADDRESS IP_PROTOCOL TARGET frontend us-central1 130.211.188.51 TCP us-central1/targetPools/frontend ``` -In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion` (replace with your tags as appropriate): +In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-node` (replace with your tags as appropriate): ```console -$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-minion kubernetes-minion-80 +$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-node kubernetes-node-80 ``` For GCE Kubernetes startup details, see the [Getting started on Google Compute Engine](../../docs/getting-started-guides/gce.md) diff --git a/examples/javaee/README.md b/examples/javaee/README.md index 944cd9c2efc..4fe6ec4f2ba 100644 --- a/examples/javaee/README.md +++ b/examples/javaee/README.md @@ -143,12 +143,12 @@ kubectl get -o template po wildfly-rc-w2kk5 --template={{.status.podIP}} 10.246.1.23 ``` -Log in to minion and access the application: +Log in to node and access the application: ```sh -vagrant ssh minion-1 +vagrant ssh node-1 Last login: Thu Jul 16 00:24:36 2015 from 10.0.2.2 -[vagrant@kubernetes-minion-1 ~]$ curl http://10.246.1.23:8080/employees/resources/employees/ +[vagrant@kubernetes-node-1 ~]$ curl http://10.246.1.23:8080/employees/resources/employees/ 1Penny2Sheldon3Amy4Leonard5Bernadette6Raj7Howard8Priya ``` diff --git a/examples/meteor/README.md b/examples/meteor/README.md index 66a3a896718..94c393afd99 100644 --- a/examples/meteor/README.md +++ b/examples/meteor/README.md @@ -180,7 +180,7 @@ You will have to open up port 80 if it's not open yet in your environment. On Google Compute Engine, you may run the below command. ``` -gcloud compute firewall-rules create meteor-80 --allow=tcp:80 --target-tags kubernetes-minion +gcloud compute firewall-rules create meteor-80 --allow=tcp:80 --target-tags kubernetes-node ``` What is going on? diff --git a/examples/openshift-origin/README.md b/examples/openshift-origin/README.md index 451e4d412a3..b352dd5cdaa 100644 --- a/examples/openshift-origin/README.md +++ b/examples/openshift-origin/README.md @@ -59,7 +59,7 @@ $ vi cluster/saltbase/pillar/privilege.sls allow_privileged: true ``` -Now spin up a cluster using your preferred KUBERNETES_PROVIDER. Remember that `kube-up.sh` may start other pods on your minion nodes, so ensure that you have enough resources to run the five pods for this example. +Now spin up a cluster using your preferred KUBERNETES_PROVIDER. Remember that `kube-up.sh` may start other pods on your nodes, so ensure that you have enough resources to run the five pods for this example. ```sh diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index ba04699d3c7..dc3c408f0b8 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -160,7 +160,7 @@ phabricator-controller-9vy68 1/1 Running 0 1m If you ssh to that machine, you can run `docker ps` to see the actual pod: ```sh -me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-2 +me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-2 $ sudo docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES @@ -230,10 +230,10 @@ and then visit port 80 of that IP address. **Note**: Provisioning of the external IP address may take few minutes. -**Note**: You may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: +**Note**: You may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-node`: ```sh -$ gcloud compute firewall-rules create phabricator-node-80 --allow=tcp:80 --target-tags kubernetes-minion +$ gcloud compute firewall-rules create phabricator-node-80 --allow=tcp:80 --target-tags kubernetes-node ``` ### Step Six: Cleanup diff --git a/examples/phabricator/setup.sh b/examples/phabricator/setup.sh index 678973c812f..588b1f5f93f 100755 --- a/examples/phabricator/setup.sh +++ b/examples/phabricator/setup.sh @@ -16,5 +16,5 @@ echo "Create Phabricator replication controller" && kubectl create -f phabricator-controller.json echo "Create Phabricator service" && kubectl create -f phabricator-service.json -echo "Create firewall rule" && gcloud compute firewall-rules create phabricator-node-80 --allow=tcp:80 --target-tags kubernetes-minion +echo "Create firewall rule" && gcloud compute firewall-rules create phabricator-node-80 --allow=tcp:80 --target-tags kubernetes-node diff --git a/examples/runtime-constraints/README.md b/examples/runtime-constraints/README.md index 7c7c7bd1b5a..4dc4f627253 100644 --- a/examples/runtime-constraints/README.md +++ b/examples/runtime-constraints/README.md @@ -79,13 +79,13 @@ $ cluster/kubectl.sh run cpuhog \ -- md5sum /dev/urandom ``` -This will create a single pod on your minion that requests 1/10 of a CPU, but it has no limit on how much CPU it may actually consume +This will create a single pod on your node that requests 1/10 of a CPU, but it has no limit on how much CPU it may actually consume on the node. To demonstrate this, if you SSH into your machine, you will see it is consuming as much CPU as possible on the node. ``` -$ vagrant ssh minion-1 +$ vagrant ssh node-1 $ sudo docker stats $(sudo docker ps -q) CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O 6b593b1a9658 0.00% 1.425 MB/1.042 GB 0.14% 1.038 kB/738 B @@ -150,7 +150,7 @@ $ cluster/kubectl.sh run cpuhog \ Let's SSH into the node, and look at usage stats. ``` -$ vagrant ssh minion-1 +$ vagrant ssh node-1 $ sudo su $ docker stats $(docker ps -q) CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O diff --git a/examples/sharing-clusters/README.md b/examples/sharing-clusters/README.md index 90b529c5bc2..a3af523db7c 100644 --- a/examples/sharing-clusters/README.md +++ b/examples/sharing-clusters/README.md @@ -88,18 +88,18 @@ And kubectl get nodes should agree: ``` $ kubectl get nodes NAME LABELS STATUS -eu-minion-0n61 kubernetes.io/hostname=eu-minion-0n61 Ready -eu-minion-79ua kubernetes.io/hostname=eu-minion-79ua Ready -eu-minion-7wz7 kubernetes.io/hostname=eu-minion-7wz7 Ready -eu-minion-loh2 kubernetes.io/hostname=eu-minion-loh2 Ready +eu-node-0n61 kubernetes.io/hostname=eu-node-0n61 Ready +eu-node-79ua kubernetes.io/hostname=eu-node-79ua Ready +eu-node-7wz7 kubernetes.io/hostname=eu-node-7wz7 Ready +eu-node-loh2 kubernetes.io/hostname=eu-node-loh2 Ready $ kubectl config use-context $ kubectl get nodes NAME LABELS STATUS -kubernetes-minion-5jtd kubernetes.io/hostname=kubernetes-minion-5jtd Ready -kubernetes-minion-lqfc kubernetes.io/hostname=kubernetes-minion-lqfc Ready -kubernetes-minion-sjra kubernetes.io/hostname=kubernetes-minion-sjra Ready -kubernetes-minion-wul8 kubernetes.io/hostname=kubernetes-minion-wul8 Ready +kubernetes-node-5jtd kubernetes.io/hostname=kubernetes-node-5jtd Ready +kubernetes-node-lqfc kubernetes.io/hostname=kubernetes-node-lqfc Ready +kubernetes-node-sjra kubernetes.io/hostname=kubernetes-node-sjra Ready +kubernetes-node-wul8 kubernetes.io/hostname=kubernetes-node-wul8 Ready ``` ## Testing reachability @@ -207,10 +207,10 @@ $ kubectl exec -it kubectl-tester bash kubectl-tester $ kubectl get nodes NAME LABELS STATUS -eu-minion-0n61 kubernetes.io/hostname=eu-minion-0n61 Ready -eu-minion-79ua kubernetes.io/hostname=eu-minion-79ua Ready -eu-minion-7wz7 kubernetes.io/hostname=eu-minion-7wz7 Ready -eu-minion-loh2 kubernetes.io/hostname=eu-minion-loh2 Ready +eu-node-0n61 kubernetes.io/hostname=eu-node-0n61 Ready +eu-node-79ua kubernetes.io/hostname=eu-node-79ua Ready +eu-node-7wz7 kubernetes.io/hostname=eu-node-7wz7 Ready +eu-node-loh2 kubernetes.io/hostname=eu-node-loh2 Ready ``` For a more advanced example of sharing clusters, see the [service-loadbalancer](https://github.com/kubernetes/contrib/tree/master/service-loadbalancer/README.md) diff --git a/hack/jenkins/README.md b/hack/jenkins/README.md index fc285153e91..9b2b2e1f911 100644 --- a/hack/jenkins/README.md +++ b/hack/jenkins/README.md @@ -70,7 +70,7 @@ gs://kubernetes-jenkins/logs/kubernetes-e2e-gce/ gcp-resources-{before, after}.txt junit_{00, 01, ...}.xml jenkins-e2e-master/{kube-apiserver.log, ...} - jenkins-e2e-minion-abcd/{kubelet.log, ...} + jenkins-e2e-node-abcd/{kubelet.log, ...} 12344/ ... ``` diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 3c14ea8b30d..6125aee6e41 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -44,7 +44,6 @@ cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ storage_backend }} cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd2') -%} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " + feature_gates -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%} diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 134c43218a6..fc1aa5872db 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -14,6 +14,7 @@ api-external-dns-names api-burst api-prefix api-rate +apiserver-count api-server-port api-servers api-token @@ -31,9 +32,12 @@ authentication-token-webhook-config-file authorization-mode authorization-policy-file authorization-rbac-super-user -authorization-webhook-config-file authorization-webhook-cache-authorized-ttl authorization-webhook-cache-unauthorized-ttl +authorization-webhook-config-file +auth-path +auth-provider +auth-provider-arg babysit-daemons basic-auth-file bench-pods @@ -50,8 +54,8 @@ build-tag cadvisor-port cert-dir certificate-authority -cgroups-per-qos cgroup-root +cgroups-per-qos chaos-chance clean-start cleanup @@ -68,21 +72,21 @@ cluster-cidr cluster-dns cluster-domain cluster-ip -cluster-name -cluster-tag cluster-monitor-period +cluster-name cluster-signing-cert-file cluster-signing-key-file cni-bin-dir cni-conf-dir +cluster-tag concurrent-deployment-syncs concurrent-endpoint-syncs +concurrent-gc-syncs concurrent-namespace-syncs concurrent-replicaset-syncs -concurrent-service-syncs concurrent-resource-quota-syncs concurrent-serviceaccount-token-syncs -concurrent-gc-syncs +concurrent-service-syncs config-sync-period configure-cbr0 configure-cloud-routes @@ -93,10 +97,10 @@ conntrack-tcp-timeout-established consumer-port consumer-service-name consumer-service-namespace -contain-pod-resources container-port container-runtime container-runtime-endpoint +contain-pod-resources controller-start-interval cors-allowed-origins cpu-cfs-quota @@ -124,13 +128,13 @@ disable-kubenet dns-port dns-provider dns-provider-config +dockercfg-path docker-email docker-endpoint docker-exec-handler docker-password docker-server docker-username -dockercfg-path driver-port drop-embedded-fields dry-run @@ -141,8 +145,9 @@ e2e-verify-service-account enable-controller-attach-detach enable-custom-metrics enable-debugging-handlers -enable-garbage-collector enable-dynamic-provisioning +enable-garbage-collector +enable-garbage-collector enable-hostpath-provisioner enable-server enable-swagger-ui @@ -162,11 +167,11 @@ event-burst event-qps event-ttl eviction-hard -eviction-soft -eviction-soft-grace-period -eviction-pressure-transition-period eviction-max-pod-grace-period eviction-minimum-reclaim +eviction-pressure-transition-period +eviction-soft +eviction-soft-grace-period executor-bindall executor-logv executor-path @@ -195,8 +200,8 @@ federated-api-qps federated-kube-context federation-name file-check-frequency -file-suffix file_content_in_loop +file-suffix flex-volume-plugin-dir forward-services framework-name @@ -219,16 +224,16 @@ google-json-key grace-period ha-domain hairpin-mode -hard-pod-affinity-symmetric-weight hard +hard-pod-affinity-symmetric-weight healthz-bind-address healthz-port horizontal-pod-autoscaler-sync-period host-ipc-sources +hostname-override host-network-sources host-pid-sources host-port-endpoints -hostname-override http-check-frequency http-port ignore-daemonsets @@ -241,6 +246,7 @@ image-pull-policy image-service-endpoint include-extended-apis included-types-overrides +include-extended-apis input-base input-dirs insecure-experimental-approve-all-kubelet-csrs-for-group @@ -273,10 +279,6 @@ kops-zones kube-api-burst kube-api-content-type kube-api-qps -kube-master -kube-master -kube-master-url -kube-reserved kubecfg-file kubectl-path kubelet-address @@ -298,6 +300,10 @@ kubelet-read-only-port kubelet-root-dir kubelet-sync-frequency kubelet-timeout +kube-master +kube-master +kube-master-url +kube-reserved kubernetes-service-node-port label-columns large-cluster-size-threshold @@ -324,6 +330,8 @@ master-os-distro master-service-namespace max-concurrency max-connection-bytes-per-sec +maximum-dead-containers +maximum-dead-containers-per-container max-log-age max-log-backups max-log-size @@ -332,8 +340,6 @@ max-outgoing-burst max-outgoing-qps max-pods max-requests-inflight -maximum-dead-containers -maximum-dead-containers-per-container mesos-authentication-principal mesos-authentication-provider mesos-authentication-secret-file @@ -347,15 +353,15 @@ mesos-launch-grace-period mesos-master mesos-sandbox-overlay mesos-user -min-pr-number -min-request-timeout -min-resync-period minimum-container-ttl-duration minimum-image-ttl-duration minion-max-log-age minion-max-log-backups minion-max-log-size minion-path-override +min-pr-number +min-request-timeout +min-resync-period namespace-sync-period network-plugin network-plugin-dir @@ -367,14 +373,20 @@ node-eviction-rate node-instance-group node-ip node-labels +node-max-log-age +node-max-log-backups +node-max-log-size node-monitor-grace-period node-monitor-period node-name node-os-distro +node-path-override node-startup-grace-period node-status-update-frequency node-sync-period +no-headers non-masquerade-cidr +no-suggestions num-nodes oidc-ca-file oidc-client-id @@ -383,7 +395,6 @@ oidc-issuer-url oidc-username-claim only-idl oom-score-adj -out-version outofdisk-transition-frequency output-base output-directory @@ -391,6 +402,7 @@ output-file-base output-package output-print-type output-version +out-version path-override pod-cidr pod-eviction-timeout @@ -413,6 +425,7 @@ proxy-logv proxy-mode proxy-port-range public-address-override +pvclaimbinder-sync-period pv-recycler-increment-timeout-nfs pv-recycler-maximum-retry pv-recycler-minimum-timeout-hostpath @@ -420,7 +433,6 @@ pv-recycler-minimum-timeout-nfs pv-recycler-pod-template-filepath-hostpath pv-recycler-pod-template-filepath-nfs pv-recycler-timeout-increment-hostpath -pvclaimbinder-sync-period read-only-port really-crash-for-testing reconcile-cidr @@ -524,9 +536,9 @@ test-timeout tls-ca-file tls-cert-file tls-private-key-file -to-version token-auth-file ttl-keys-prefix +to-version ttl-secs type-src udp-port diff --git a/pkg/api/errors/errors.go b/pkg/api/errors/errors.go index 858c19113e6..8623b3aab47 100644 --- a/pkg/api/errors/errors.go +++ b/pkg/api/errors/errors.go @@ -31,11 +31,9 @@ import ( const ( StatusUnprocessableEntity = 422 StatusTooManyRequests = 429 - // HTTP recommendations are for servers to define 5xx error codes - // for scenarios not covered by behavior. In this case, ServerTimeout - // is an indication that a transient server error has occurred and the - // client *should* retry, with an optional Retry-After header to specify - // the back off window. + // StatusServerTimeout is an indication that a transient server error has + // occurred and the client *should* retry, with an optional Retry-After + // header to specify the back off window. StatusServerTimeout = 504 ) diff --git a/pkg/api/node_example.json b/pkg/api/node_example.json index 260183484fa..ea249cb7854 100644 --- a/pkg/api/node_example.json +++ b/pkg/api/node_example.json @@ -2,8 +2,8 @@ "kind": "Node", "apiVersion": "v1", "metadata": { - "name": "e2e-test-wojtekt-minion-etd6", - "selfLink": "/api/v1/nodes/e2e-test-wojtekt-minion-etd6", + "name": "e2e-test-wojtekt-node-etd6", + "selfLink": "/api/v1/nodes/e2e-test-wojtekt-node-etd6", "uid": "a7e89222-e8e5-11e4-8fde-42010af09327", "resourceVersion": "379", "creationTimestamp": "2015-04-22T11:49:39Z" diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index dcaf1880f3f..7948fdb22d0 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -331,7 +331,7 @@ func TestBadJSONRejection(t *testing.T) { t.Errorf("Did not reject despite use of unknown type: %s", badJSONUnknownType) } /*badJSONKindMismatch := []byte(`{"kind": "Pod"}`) - if err2 := DecodeInto(badJSONKindMismatch, &Minion{}); err2 == nil { + if err2 := DecodeInto(badJSONKindMismatch, &Node{}); err2 == nil { t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch) }*/ } diff --git a/pkg/api/v1/generated.proto b/pkg/api/v1/generated.proto index 79c5540abae..3ffb00b6752 100644 --- a/pkg/api/v1/generated.proto +++ b/pkg/api/v1/generated.proto @@ -1205,7 +1205,7 @@ message NamespaceStatus { optional string phase = 1; } -// Node is a worker node in Kubernetes, formerly known as minion. +// Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 3dfe60eaaa5..32e81777cd4 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -2678,7 +2678,7 @@ type ResourceList map[ResourceName]resource.Quantity // +genclient=true // +nonNamespaced=true -// Node is a worker node in Kubernetes, formerly known as minion. +// Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). type Node struct { unversioned.TypeMeta `json:",inline"` diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index 54e87c8f919..a8e9ba6b38d 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -795,7 +795,7 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { } var map_Node = map[string]string{ - "": "Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).", + "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 80d83a1322f..bc2968526a0 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -2885,7 +2885,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer // Open the firewall from the load balancer to the instance // We don't actually have a trivial way to know in advance which security group the instance is in - // (it is probably the minion security group, but we don't easily have that). + // (it is probably the node security group, but we don't easily have that). // However, we _do_ have the list of security groups on the instance records. // Map containing the changes we want to make; true to add, false to remove diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index e69bce7ebca..30eb35a2794 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -2085,7 +2085,7 @@ func (gce *GCECloud) GetInstanceGroup(name string, zone string) (*compute.Instan // Take a GCE instance 'hostname' and break it down to something that can be fed // to the GCE API client library. Basically this means reducing 'kubernetes- -// minion-2.c.my-proj.internal' to 'kubernetes-minion-2' if necessary. +// node-2.c.my-proj.internal' to 'kubernetes-node-2' if necessary. func canonicalizeInstanceName(name string) string { ix := strings.Index(name, ".") if ix != -1 { diff --git a/pkg/cloudprovider/providers/ovirt/ovirt.go b/pkg/cloudprovider/providers/ovirt/ovirt.go index 4f4bcefa6a8..f2c8e961838 100644 --- a/pkg/cloudprovider/providers/ovirt/ovirt.go +++ b/pkg/cloudprovider/providers/ovirt/ovirt.go @@ -283,7 +283,7 @@ func (m *OVirtInstanceMap) ListSortedNames() []string { return names } -// List enumerates the set of minions instances known by the cloud provider +// List enumerates the set of nodes instances known by the cloud provider func (v *OVirtCloud) List(filter string) ([]types.NodeName, error) { instances, err := v.fetchAllInstances() if err != nil { diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index c38ef5dffb5..0b551d4d02c 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -573,7 +573,7 @@ func Example_printPodWithWideFormat() { NegotiatedSerializer: ns, Client: nil, } - nodeName := "kubernetes-minion-abcd" + nodeName := "kubernetes-node-abcd" cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -600,7 +600,7 @@ func Example_printPodWithWideFormat() { } // Output: // NAME READY STATUS RESTARTS AGE IP NODE - // test1 1/2 podPhase 6 10y 10.1.1.3 kubernetes-minion-abcd + // test1 1/2 podPhase 6 10y 10.1.1.3 kubernetes-node-abcd } func Example_printPodWithShowLabels() { @@ -613,7 +613,7 @@ func Example_printPodWithShowLabels() { NegotiatedSerializer: ns, Client: nil, } - nodeName := "kubernetes-minion-abcd" + nodeName := "kubernetes-node-abcd" cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -647,7 +647,7 @@ func Example_printPodWithShowLabels() { } func newAllPhasePodList() *api.PodList { - nodeName := "kubernetes-minion-abcd" + nodeName := "kubernetes-node-abcd" return &api.PodList{ Items: []api.Pod{ { diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index ea9f9b57ce4..5aac1dfc399 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -48,7 +48,7 @@ var ( describe_example = dedent.Dedent(` # Describe a node - kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal + kubectl describe nodes kubernetes-node-emt8.c.myproject.internal # Describe a pod kubectl describe pods/nginx diff --git a/pkg/registry/registrytest/doc.go b/pkg/registry/registrytest/doc.go index 41e2b65cbe6..7e4c5e28662 100644 --- a/pkg/registry/registrytest/doc.go +++ b/pkg/registry/registrytest/doc.go @@ -15,5 +15,5 @@ limitations under the License. */ // Package registrytest provides tests for Registry implementations -// for storing Minions, Pods, Schedulers and Services. +// for storing Nodes, Pods, Schedulers and Services. package registrytest // import "k8s.io/kubernetes/pkg/registry/registrytest" diff --git a/pkg/runtime/scheme_test.go b/pkg/runtime/scheme_test.go index beeafe72176..97cd1873a62 100644 --- a/pkg/runtime/scheme_test.go +++ b/pkg/runtime/scheme_test.go @@ -161,7 +161,7 @@ func TestBadJSONRejection(t *testing.T) { t.Errorf("Did not reject despite use of unknown type: %s", badJSONUnknownType) } /*badJSONKindMismatch := []byte(`{"kind": "Pod"}`) - if err2 := DecodeInto(badJSONKindMismatch, &Minion{}); err2 == nil { + if err2 := DecodeInto(badJSONKindMismatch, &Node{}); err2 == nil { t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch) }*/ } diff --git a/pkg/util/proxy/transport_test.go b/pkg/util/proxy/transport_test.go index 544d4b96dfc..f32bcf69edc 100644 --- a/pkg/util/proxy/transport_test.go +++ b/pkg/util/proxy/transport_test.go @@ -38,20 +38,20 @@ func TestProxyTransport(t *testing.T) { testTransport := &Transport{ Scheme: "http", Host: "foo.com", - PathPrepend: "/proxy/minion/minion1:10250", + PathPrepend: "/proxy/node/node1:10250", } testTransport2 := &Transport{ Scheme: "https", Host: "foo.com", - PathPrepend: "/proxy/minion/minion1:8080", + PathPrepend: "/proxy/node/node1:8080", } emptyHostTransport := &Transport{ Scheme: "https", - PathPrepend: "/proxy/minion/minion1:10250", + PathPrepend: "/proxy/node/node1:10250", } emptySchemeTransport := &Transport{ Host: "foo.com", - PathPrepend: "/proxy/minion/minion1:10250", + PathPrepend: "/proxy/node/node1:10250", } type Item struct { input string @@ -67,120 +67,120 @@ func TestProxyTransport(t *testing.T) { table := map[string]Item{ "normal": { input: `
kubelet.loggoogle.log
`, - sourceURL: "http://myminion.com/logs/log.log", + sourceURL: "http://mynode.com/logs/log.log", transport: testTransport, - output: `
kubelet.loggoogle.log
`, + output: `
kubelet.loggoogle.log
`, contentType: "text/html", - forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + forwardedURI: "/proxy/node/node1:10250/logs/log.log", }, "full document": { input: `
kubelet.loggoogle.log
`, - sourceURL: "http://myminion.com/logs/log.log", + sourceURL: "http://mynode.com/logs/log.log", transport: testTransport, - output: `
kubelet.loggoogle.log
`, + output: `
kubelet.loggoogle.log
`, contentType: "text/html", - forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + forwardedURI: "/proxy/node/node1:10250/logs/log.log", }, "trailing slash": { input: `
kubelet.loggoogle.log
`, - sourceURL: "http://myminion.com/logs/log.log", + sourceURL: "http://mynode.com/logs/log.log", transport: testTransport, - output: `
kubelet.loggoogle.log
`, + output: `
kubelet.loggoogle.log
`, contentType: "text/html", - forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + forwardedURI: "/proxy/node/node1:10250/logs/log.log", }, "content-type charset": { input: `
kubelet.loggoogle.log
`, - sourceURL: "http://myminion.com/logs/log.log", + sourceURL: "http://mynode.com/logs/log.log", transport: testTransport, - output: `
kubelet.loggoogle.log
`, + output: `
kubelet.loggoogle.log
`, contentType: "text/html; charset=utf-8", - forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + forwardedURI: "/proxy/node/node1:10250/logs/log.log", }, "content-type passthrough": { input: `
kubelet.loggoogle.log
`, - sourceURL: "http://myminion.com/logs/log.log", + sourceURL: "http://mynode.com/logs/log.log", transport: testTransport, output: `
kubelet.loggoogle.log
`, contentType: "text/plain", - forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + forwardedURI: "/proxy/node/node1:10250/logs/log.log", }, "subdir": { input: `kubelet.loggoogle.log`, - sourceURL: "http://myminion.com/whatever/apt/somelog.log", + sourceURL: "http://mynode.com/whatever/apt/somelog.log", transport: testTransport2, - output: `kubelet.loggoogle.log`, + output: `kubelet.loggoogle.log`, contentType: "text/html", - forwardedURI: "/proxy/minion/minion1:8080/whatever/apt/somelog.log", + forwardedURI: "/proxy/node/node1:8080/whatever/apt/somelog.log", }, "image": { input: `
`, - sourceURL: "http://myminion.com/", + sourceURL: "http://mynode.com/", transport: testTransport, - output: `
`, + output: `
`, contentType: "text/html", - forwardedURI: "/proxy/minion/minion1:10250/", + forwardedURI: "/proxy/node/node1:10250/", }, "abs": { input: `