Merge pull request #11452 from thockin/docs-munge-headerlines

Munge headerlines
This commit is contained in:
David Oppenheimer 2015-07-17 15:52:08 -07:00
commit d28a6656ae
214 changed files with 745 additions and 29 deletions

71
cmd/mungedocs/headers.go Normal file
View File

@ -0,0 +1,71 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"regexp"
"strings"
)
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
var whitespaceRegex = regexp.MustCompile(`^\s*$`)
func fixHeaderLines(fileBytes []byte) []byte {
lines := splitLines(fileBytes)
out := []string{}
for i := range lines {
matches := headerRegex.FindStringSubmatch(lines[i])
if matches == nil {
out = append(out, lines[i])
continue
}
if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) {
out = append(out, "")
}
out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2]))
if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) {
out = append(out, "")
}
}
final := strings.Join(out, "\n")
// Preserve the end of the file.
if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' {
final += "\n"
}
return []byte(final)
}
// Header lines need whitespace around them and after the #s.
func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) {
fbs := splitByPreformatted(fileBytes)
fbs = append([]fileBlock{{false, []byte{}}}, fbs...)
fbs = append(fbs, fileBlock{false, []byte{}})
for i := range fbs {
block := &fbs[i]
if block.preformatted {
continue
}
block.data = fixHeaderLines(block.data)
}
output := []byte{}
for _, block := range fbs {
output = append(output, block.data...)
}
return output, nil
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestHeaderLines(t *testing.T) {
var cases = []struct {
in string
out string
}{
{"", ""},
{
"# ok",
"# ok",
},
{
"## ok",
"## ok",
},
{
"##### ok",
"##### ok",
},
{
"##fix",
"## fix",
},
{
"foo\n\n##fix\n\nbar",
"foo\n\n## fix\n\nbar",
},
{
"foo\n##fix\nbar",
"foo\n\n## fix\n\nbar",
},
{
"foo\n```\n##fix\n```\nbar",
"foo\n```\n##fix\n```\nbar",
},
{
"foo\n#fix1\n##fix2\nbar",
"foo\n\n# fix1\n\n## fix2\n\nbar",
},
}
for i, c := range cases {
actual, err := checkHeaderLines("filename.md", []byte(c.in))
assert.NoError(t, err)
if string(actual) != c.out {
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
}
}
}

View File

@ -48,6 +48,7 @@ Examples:
{"table-of-contents", updateTOC},
{"check-links", checkLinks},
{"blank-lines-surround-preformatted", checkPreformatted},
{"header-lines", checkHeaderLines},
{"unversioned-warning", updateUnversionedWarning},
{"analytics", checkAnalytics},
{"kubectl-dash-f", checkKubectlFileTargets},

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Documentation: releases.k8s.io/HEAD
* The [User's guide](user-guide/README.md) is for anyone who wants to run programs and

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Cluster Admin Guide
The cluster admin guide is for anyone creating or administering a Kubernetes cluster.
@ -72,6 +73,7 @@ If you are modifying an existing guide which uses Salt, this document explains [
project.](salt.md).
## Upgrading a cluster
[Upgrading a cluster](cluster-management.md).
## Managing nodes

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Configuring APIserver ports
This document describes what ports the kubernetes apiserver
@ -42,6 +43,7 @@ in [Accessing the cluster](../user-guide/accessing-the-cluster.md).
## Ports and IPs Served On
The Kubernetes API is served by the Kubernetes APIServer process. Typically,
there is one of these running on a single kubernetes-master node.
@ -93,6 +95,7 @@ variety of uses cases:
setup time. Kubelets use cert-based auth, while kube-proxy uses token-based auth.
## Expected changes
- Policy will limit the actions kubelets can do via the authed port.
- Scheduler and Controller-manager will use the Secure Port too. They
will then be able to run on different machines than the apiserver.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Admission Controllers
**Table of Contents**

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Authentication Plugins
Kubernetes uses client certificates, tokens, or http basic auth to authenticate users for API calls.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Authorization Plugins
@ -53,6 +54,7 @@ The following implementations are available, and are selected by flag:
`ABAC` allows for user-configured authorization policy. ABAC stands for Attribute-Based Access Control.
## ABAC Mode
### Request Attributes
A request has 4 attributes that can be considered for authorization:
@ -105,6 +107,7 @@ To permit any user to do something, write a policy with the user property unset.
To permit an action Policy with an unset namespace applies regardless of namespace.
### Examples
1. Alice can do anything: `{"user":"alice"}`
2. Kubelet can read any pods: `{"user":"kubelet", "resource": "pods", "readonly": true}`
3. Kubelet can read and write events: `{"user":"kubelet", "resource": "events"}`

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Cluster Admin Guide: Cluster Components
This document outlines the various binary components that need to run to
@ -92,6 +93,7 @@ These controllers include:
selects a node for them to run on.
### addons
Addons are pods and services that implement cluster features. They don't run on
the master VM, but currently the default setup scripts that make the API calls
to create these pods and services does run on the master VM. See:

View File

@ -30,9 +30,11 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Large Cluster
## Support
At v1.0, Kubernetes supports clusters up to 100 nodes with 30 pods per node and 1-2 container per pod (as defined in the [1.0 roadmap](../../docs/roadmap.md#reliability-and-performance)).
## Setup
@ -59,6 +61,7 @@ To avoid running into cloud provider quota issues, when creating a cluster with
* Gating the setup script so that it brings up new node VMs in smaller batches with waits in between, because some cloud providers rate limit the creation of VMs.
### Addon Resources
To prevent memory leaks or other resource issues in [cluster addons](../../cluster/addons/) from consuming all the resources available on a node, Kubernetes sets resource limits on addon containers to limit the CPU and Memory resources they can consume (See PR [#10653](https://github.com/GoogleCloudPlatform/kubernetes/pull/10653/files) and [#10778](https://github.com/GoogleCloudPlatform/kubernetes/pull/10778/files)).
For example:

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Cluster Management
This doc is in progress.

View File

@ -30,13 +30,16 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Cluster Troubleshooting
This doc is about cluster troubleshooting; we assume you have already ruled out your application as the root cause of the
problem you are experiencing. See
the [application troubleshooting guide](../user-guide/application-troubleshooting.md) for tips on application debugging.
You may also visit [troubleshooting document](../troubleshooting.md) for more information.
## Listing your cluster
The first thing to debug in your cluster is if your nodes are all registered correctly.
Run
@ -48,15 +51,18 @@ kubectl get nodes
And verify that all of the nodes you expect to see are present and that they are all in the ```Ready``` state.
## Looking at logs
For now, digging deeper into the cluster requires logging into the relevant machines. Here are the locations
of the relevant log files. (note that on systemd-based systems, you may need to use ```journalctl``` instead)
### Master
* /var/log/kube-apiserver.log - API Server, responsible for serving the API
* /var/log/kube-scheduler.log - Scheduler, responsible for making scheduling decisions
* /var/log/kube-controller-manager.log - Controller that manages replication controllers
### Worker Nodes
* /var/log/kubelet.log - Kubelet, responsible for running containers on the node
* /var/log/kube-proxy.log - Kube Proxy, responsible for service load balancing

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# DNS Integration with Kubernetes
As of kubernetes 0.8, DNS is offered as a [cluster add-on](../../cluster/addons/README.md).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# High Availability Kubernetes Clusters
**Table of Contents**
@ -44,6 +45,7 @@ Documentation for other releases can be found at
<!-- END MUNGE: GENERATED_TOC -->
## Introduction
This document describes how to build a high-availability (HA) Kubernetes cluster. This is a fairly advanced topic.
Users who merely want to experiment with Kubernetes are encouraged to use configurations that are simpler to set up such as
the simple [Docker based single node cluster instructions](../../docs/getting-started-guides/docker.md),
@ -53,6 +55,7 @@ Also, at this time high availability support for Kubernetes is not continuously
be working to add this continuous testing, but for now the single-node master installations are more heavily tested.
## Overview
Setting up a truly reliable, highly available distributed system requires a number of steps, it is akin to
wearing underwear, pants, a belt, suspenders, another pair of underwear, and another pair of pants. We go into each
of these steps in detail, but a summary is given here to help guide and orient the user.
@ -69,6 +72,7 @@ Here's what the system should look like when it's finished:
Ready? Let's get started.
## Initial set-up
The remainder of this guide assumes that you are setting up a 3-node clustered master, where each machine is running some flavor of Linux.
Examples in the guide are given for Debian distributions, but they should be easily adaptable to other distributions.
Likewise, this set up should work whether you are running in a public or private cloud provider, or if you are running
@ -79,6 +83,7 @@ instructions at [https://get.k8s.io](https://get.k8s.io)
describe easy installation for single-master clusters on a variety of platforms.
## Reliable nodes
On each master node, we are going to run a number of processes that implement the Kubernetes API. The first step in making these reliable is
to make sure that each automatically restarts when it fails. To achieve this, we need to install a process watcher. We choose to use
the ```kubelet``` that we run on each of the worker nodes. This is convenient, since we can use containers to distribute our binaries, we can
@ -99,6 +104,7 @@ On systemd systems you ```systemctl enable kubelet``` and ```systemctl enable do
## Establishing a redundant, reliable data storage layer
The central foundation of a highly available solution is a redundant, reliable storage layer. The number one rule of high-availability is
to protect the data. Whatever else happens, whatever catches on fire, if you have the data, you can rebuild. If you lose the data, you're
done.
@ -110,6 +116,7 @@ size of the cluster from three to five nodes. If that is still insufficient, yo
[even more redundancy to your storage layer](#even-more-reliable-storage).
### Clustering etcd
The full details of clustering etcd are beyond the scope of this document, lots of details are given on the
[etcd clustering page](https://github.com/coreos/etcd/blob/master/Documentation/clustering.md). This example walks through
a simple cluster set up, using etcd's built in discovery to build our cluster.
@ -131,6 +138,7 @@ for ```${NODE_IP}``` on each machine.
#### Validating your cluster
Once you copy this into all three nodes, you should have a clustered etcd set up. You can validate with
```
@ -147,6 +155,7 @@ You can also validate that this is working with ```etcdctl set foo bar``` on one
on a different node.
### Even more reliable storage
Of course, if you are interested in increased data reliability, there are further options which makes the place where etcd
installs it's data even more reliable than regular disks (belts *and* suspenders, ftw!).
@ -163,9 +172,11 @@ for each node. Throughout these instructions, we assume that this storage is mo
## Replicated API Servers
Once you have replicated etcd set up correctly, we will also install the apiserver using the kubelet.
### Installing configuration files
First you need to create the initial log file, so that Docker mounts a file instead of a directory:
```
@ -184,12 +195,14 @@ Next, you need to create a ```/srv/kubernetes/``` directory on each node. This
The easiest way to create this directory, may be to copy it from the master node of a working cluster, or you can manually generate these files yourself.
### Starting the API Server
Once these files exist, copy the [kube-apiserver.yaml](high-availability/kube-apiserver.yaml) into ```/etc/kubernetes/manifests/``` on each master node.
The kubelet monitors this directory, and will automatically create an instance of the ```kube-apiserver``` container using the pod definition specified
in the file.
### Load balancing
At this point, you should have 3 apiservers all working correctly. If you set up a network load balancer, you should
be able to access your cluster via that load balancer, and see traffic balancing between the apiserver instances. Setting
up a load balancer will depend on the specifics of your platform, for example instructions for the Google Cloud
@ -204,6 +217,7 @@ For external users of the API (e.g. the ```kubectl``` command line interface, co
them to talk to the external load balancer's IP address.
## Master elected components
So far we have set up state storage, and we have set up the API server, but we haven't run anything that actually modifies
cluster state, such as the controller manager and scheduler. To achieve this reliably, we only want to have one actor modifying state at a time, but we want replicated
instances of these actors, in case a machine dies. To achieve this, we are going to use a lease-lock in etcd to perform
@ -227,6 +241,7 @@ by copying [kube-scheduler.yaml](high-availability/kube-scheduler.yaml) and [kub
directory.
### Running the podmaster
Now that the configuration files are in place, copy the [podmaster.yaml](high-availability/podmaster.yaml) config file into ```/etc/kubernetes/manifests/```
As before, the kubelet on the node monitors this directory, and will start an instance of the podmaster using the pod specification provided in ```podmaster.yaml```.
@ -237,6 +252,7 @@ the kubelet will restart them. If any of these nodes fail, the process will mov
node.
## Conclusion
At this point, you are done (yeah!) with the master components, but you still need to add worker nodes (boo!).
If you have an existing cluster, this is as simple as reconfiguring your kubelets to talk to the load-balanced endpoint, and
@ -245,7 +261,7 @@ restarting the kubelets on each node.
If you are turning up a fresh cluster, you will need to install the kubelet and kube-proxy on each worker node, and
set the ```--apiserver``` flag to your replicated endpoint.
##Vagrant up!
## Vagrant up!
We indeed have an initial proof of concept tester for this, which is available [here](../../examples/high-availability/).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-apiserver

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-controller-manager

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-proxy

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-scheduler

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kubelet

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Considerations for running multiple Kubernetes clusters
You may want to set up multiple kubernetes clusters, both to
@ -65,6 +66,7 @@ Reasons to have multiple clusters include:
- test clusters to canary new Kubernetes releases or other cluster software.
## Selecting the right number of clusters
The selection of the number of kubernetes clusters may be a relatively static choice, only revisited occasionally.
By contrast, the number of nodes in a cluster and the number of pods in a service may be change frequently according to
load and growth.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Networking in Kubernetes
**Table of Contents**

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Node
**Table of Contents**

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes OpenVSwitch GRE/VxLAN networking
This document describes how OpenVSwitch is used to setup networking between pods across nodes.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Administering Resource Quotas
Kubernetes can limit both the number of objects created in a namespace, and the
@ -49,7 +50,8 @@ Resource Quota is enforced in a particular namespace when there is a
See [ResourceQuota design doc](../design/admission_control_resource_quota.md) for more information.
## Object Count Quota
## Object Count Quota
The number of objects of a given type can be restricted. The following types
are supported:
@ -65,7 +67,8 @@ are supported:
For example, `pods` quota counts and enforces a maximum on the number of `pods`
created in a single namespace.
## Compute Resource Quota
## Compute Resource Quota
The total number of objects of a given type can be restricted. The following types
are supported:
@ -83,6 +86,7 @@ Any resource that is not part of core Kubernetes must follow the resource naming
This means the resource must have a fully-qualified name (i.e. mycompany.org/shinynewresource)
## Viewing and Setting Quotas
Kubectl supports creating, updating, and viewing quotas
```
@ -123,6 +127,7 @@ services 3 5
```
## Quota and Cluster Capacity
Resource Quota objects are independent of the Cluster Capacity. They are
expressed in absolute units.
@ -136,6 +141,7 @@ writing a 'controller' which watches the quota usage and adjusts the quota
hard limits of each namespace.
## Example
See a [detailed example for how to use resource quota](../user-guide/resourcequota/).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Using Salt to configure Kubernetes
The Kubernetes cluster can be configured using Salt.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Cluster Admin Guide to Service Accounts
*This is a Cluster Administrator guide to service accounts. It assumes knowledge of
@ -57,7 +58,7 @@ for a number of reasons:
accounts for components of that system. Because service accounts can be created
ad-hoc and have namespaced names, such config is portable.
## Service account automation
## Service account automation
Three separate components cooperate to implement the automation around service accounts:
- A Service account admission controller
@ -78,6 +79,7 @@ It acts synchronously to modify pods as they are created or updated. When this p
6. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`.
### Token Controller
TokenController runs as part of controller-manager. It acts asynchronously. It:
- observes serviceAccount creation and creates a corresponding Secret to allow API access.
- observes serviceAccount deletion and deletes all corresponding ServiceAccountToken Secrets

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# The Kubernetes API
Primary system and API concepts are documented in the [User guide](user-guide/README.md).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Design Overview
Kubernetes is a system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# K8s Identity and Access Management Sketch
This document suggests a direction for identity and access management in the Kubernetes system.
@ -43,6 +44,7 @@ High level goals are:
- Ease integration with existing enterprise and hosted scenarios.
### Actors
Each of these can act as normal users or attackers.
- External Users: People who are accessing applications running on K8s (e.g. a web site served by webserver running in a container on K8s), but who do not have K8s API access.
- K8s Users : People who access the K8s API (e.g. create K8s API objects like Pods)
@ -51,6 +53,7 @@ Each of these can act as normal users or attackers.
- K8s Admin means K8s Cluster Admins and K8s Project Admins taken together.
### Threats
Both intentional attacks and accidental use of privilege are concerns.
For both cases it may be useful to think about these categories differently:
@ -81,6 +84,7 @@ K8s Cluster assets:
This document is primarily about protecting K8s User assets and K8s cluster assets from other K8s Users and K8s Project and Cluster Admins.
### Usage environments
Cluster in Small organization:
- K8s Admins may be the same people as K8s Users.
- few K8s Admins.
@ -112,6 +116,7 @@ Pods configs should be largely portable between Org-run and hosted configuration
# Design
Related discussion:
- https://github.com/GoogleCloudPlatform/kubernetes/issues/442
- https://github.com/GoogleCloudPlatform/kubernetes/issues/443
@ -125,7 +130,9 @@ K8s distribution should include templates of config, and documentation, for simp
Features in this doc are divided into "Initial Feature", and "Improvements". Initial features would be candidates for version 1.00.
## Identity
###userAccount
### userAccount
K8s will have a `userAccount` API object.
- `userAccount` has a UID which is immutable. This is used to associate users with objects and to record actions in audit logs.
- `userAccount` has a name which is a string and human readable and unique among userAccounts. It is used to refer to users in Policies, to ensure that the Policies are human readable. It can be changed only when there are no Policy objects or other objects which refer to that name. An email address is a suggested format for this field.
@ -158,7 +165,8 @@ Enterprise Profile:
- each service using the API has own `userAccount` too. (e.g. `scheduler`, `repcontroller`)
- automated jobs to denormalize the ldap group info into the local system list of users into the K8s userAccount file.
###Unix accounts
### Unix accounts
A `userAccount` is not a Unix user account. The fact that a pod is started by a `userAccount` does not mean that the processes in that pod's containers run as a Unix user with a corresponding name or identity.
Initially:
@ -170,7 +178,8 @@ Improvements:
- requires docker to integrate user namespace support, and deciding what getpwnam() does for these uids.
- any features that help users avoid use of privileged containers (https://github.com/GoogleCloudPlatform/kubernetes/issues/391)
###Namespaces
### Namespaces
K8s will have a have a `namespace` API object. It is similar to a Google Compute Engine `project`. It provides a namespace for objects created by a group of people co-operating together, preventing name collisions with non-cooperating groups. It also serves as a reference point for authorization policies.
Namespaces are described in [namespaces.md](namespaces.md).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Proposal - Admission Control
**Related PR:**

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Admission control plugin: LimitRanger
## Background
@ -164,6 +165,7 @@ It is expected we will want to define limits for particular pods or containers b
To make a **LimitRangeItem** more restrictive, we will intend to add these additional restrictions at a future point in time.
## Example
See the [example of Limit Range](../user-guide/limitrange/) for more information.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Admission control plugin: ResourceQuota
## Background
@ -185,6 +186,7 @@ services 3 5
```
## More information
See [resource quota document](../admin/resource-quota.md) and the [example of Resource Quota](../user-guide/resourcequota/) for more information.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes architecture
A running Kubernetes cluster contains node agents (kubelet) and master components (APIs, scheduler, etc), on top of a distributed storage solution. This diagram shows our desired eventual state, though we're still working on a few things, like making kubelet itself (all our components, really) run within containers, and making the scheduler 100% pluggable.
@ -45,6 +46,7 @@ The Kubernetes node has the services necessary to run application containers and
Each node runs Docker, of course. Docker takes care of the details of downloading images and running containers.
### Kubelet
The **Kubelet** manages [pods](../user-guide/pods.md) and their containers, their images, their volumes, etc.
### Kube-Proxy

View File

@ -30,10 +30,12 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Clustering in Kubernetes
## Overview
The term "clustering" refers to the process of having all members of the kubernetes cluster find and trust each other. There are multiple different ways to achieve clustering with different security and usability profiles. This document attempts to lay out the user experiences for clustering that Kubernetes aims to address.
Once a cluster is established, the following is true:

View File

@ -41,6 +41,7 @@ pip install seqdiag
Just call `make` to regenerate the diagrams.
## Building with Docker
If you are on a Mac or your pip install is messed up, you can easily build with docker.
```

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Container Command Execution & Port Forwarding in Kubernetes
## Abstract
@ -87,12 +88,14 @@ won't be able to work with this mechanism, unless adapters can be written.
## Process Flow
### Remote Command Execution Flow
1. The client connects to the Kubernetes Master to initiate a remote command execution
request
2. The Master proxies the request to the Kubelet where the container lives
3. The Kubelet executes nsenter + the requested command and streams stdin/stdout/stderr back and forth between the client and the container
### Port Forwarding Flow
1. The client connects to the Kubernetes Master to initiate a remote command execution
request
2. The Master proxies the request to the Kubelet where the container lives

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Event Compression
This document captures the design of event compression.
@ -40,11 +41,13 @@ This document captures the design of event compression.
Kubernetes components can get into a state where they generate tons of events which are identical except for the timestamp. For example, when pulling a non-existing image, Kubelet will repeatedly generate ```image_not_existing``` and ```container_is_waiting``` events until upstream components correct the image. When this happens, the spam from the repeated events makes the entire event mechanism useless. It also appears to cause memory pressure in etcd (see [#3853](https://github.com/GoogleCloudPlatform/kubernetes/issues/3853)).
## Proposal
Each binary that generates events (for example, ```kubelet```) should keep track of previously generated events so that it can collapse recurring events into a single event instead of creating a new instance for each new event.
Event compression should be best effort (not guaranteed). Meaning, in the worst case, ```n``` identical (minus timestamp) events may still result in ```n``` event entries.
## Design
Instead of a single Timestamp, each event object [contains](../../pkg/api/types.go#L1111) the following fields:
* ```FirstTimestamp util.Time```
* The date/time of the first occurrence of the event.
@ -78,11 +81,13 @@ Each binary that generates events:
* An entry for the event is also added to the previously generated events cache.
## Issues/Risks
* Compression is not guaranteed, because each component keeps track of event history in memory
* An application restart causes event history to be cleared, meaning event history is not preserved across application restarts and compression will not occur across component restarts.
* Because an LRU cache is used to keep track of previously generated events, if too many unique events are generated, old events will be evicted from the cache, so events will only be compressed until they age out of the events cache, at which point any new instance of the event will cause a new entry to be created in etcd.
## Example
Sample kubectl output
```
@ -104,6 +109,7 @@ Thu, 12 Feb 2015 01:13:20 +0000 Thu, 12 Feb 2015 01:13:20 +0000 1
This demonstrates what would have been 20 separate entries (indicating scheduling failure) collapsed/compressed down to 5 entries.
## Related Pull Requests/Issues
* Issue [#4073](https://github.com/GoogleCloudPlatform/kubernetes/issues/4073): Compress duplicate events
* PR [#4157](https://github.com/GoogleCloudPlatform/kubernetes/issues/4157): Add "Update Event" to Kubernetes API
* PR [#4206](https://github.com/GoogleCloudPlatform/kubernetes/issues/4206): Modify Event struct to allow compressing multiple recurring events in to a single event

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Variable expansion in pod command, args, and env
## Abstract

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Identifiers and Names in Kubernetes
A summarization of the goals and recommendations for identifiers in Kubernetes. Described in [GitHub issue #199](https://github.com/GoogleCloudPlatform/kubernetes/issues/199).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Namespaces
## Abstract

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Networking
There are 4 distinct networking problems to solve:

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Persistent Storage
This document proposes a model for managing persistent, cluster-scoped storage for applications requiring long lived data.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Design Principles
Principles to follow when extending Kubernetes.

View File

@ -48,6 +48,7 @@ The resource model aims to be:
* precise, to avoid misunderstandings and promote pod portability.
## The resource model
A Kubernetes _resource_ is something that can be requested by, allocated to, or consumed by a pod or container. Examples include memory (RAM), CPU, disk-time, and network bandwidth.
Once resources on a node have been allocated to one pod, they should not be allocated to another until that pod is removed or exits. This means that Kubernetes schedulers should ensure that the sum of the resources allocated (requested and granted) to its pods never exceeds the usable capacity of the node. Testing whether a pod will fit on a node is called _feasibility checking_.
@ -124,9 +125,11 @@ Where:
## Kubernetes-defined resource types
The following resource types are predefined ("reserved") by Kubernetes in the `kubernetes.io` namespace, and so cannot be used for user-defined resources. Note that the syntax of all resource types in the resource spec is deliberately similar, but some resource types (e.g., CPU) may receive significantly more support than simply tracking quantities in the schedulers and/or the Kubelet.
### Processor cycles
* Name: `cpu` (or `kubernetes.io/cpu`)
* Units: Kubernetes Compute Unit seconds/second (i.e., CPU cores normalized to a canonical "Kubernetes CPU")
* Internal representation: milli-KCUs
@ -141,6 +144,7 @@ Note that requesting 2 KCU won't guarantee that precisely 2 physical cores will
### Memory
* Name: `memory` (or `kubernetes.io/memory`)
* Units: bytes
* Compressible? no (at least initially)
@ -152,6 +156,7 @@ rather than decimal ones: "64MiB" rather than "64MB".
## Resource metadata
A resource type may have an associated read-only ResourceType structure, that contains metadata about the type. For example:
```
@ -222,16 +227,19 @@ and predicted
## Future resource types
### _[future] Network bandwidth_
* Name: "network-bandwidth" (or `kubernetes.io/network-bandwidth`)
* Units: bytes per second
* Compressible? yes
### _[future] Network operations_
* Name: "network-iops" (or `kubernetes.io/network-iops`)
* Units: operations (messages) per second
* Compressible? yes
### _[future] Storage space_
* Name: "storage-space" (or `kubernetes.io/storage-space`)
* Units: bytes
* Compressible? no
@ -239,6 +247,7 @@ and predicted
The amount of secondary storage space available to a container. The main target is local disk drives and SSDs, although this could also be used to qualify remotely-mounted volumes. Specifying whether a resource is a raw disk, an SSD, a disk array, or a file system fronting any of these, is left for future work.
### _[future] Storage time_
* Name: storage-time (or `kubernetes.io/storage-time`)
* Units: seconds per second of disk time
* Internal representation: milli-units
@ -247,6 +256,7 @@ The amount of secondary storage space available to a container. The main target
This is the amount of time a container spends accessing disk, including actuator and transfer time. A standard disk drive provides 1.0 diskTime seconds per second.
### _[future] Storage operations_
* Name: "storage-iops" (or `kubernetes.io/storage-iops`)
* Units: operations per second
* Compressible? yes

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Security in Kubernetes
Kubernetes should define a reasonable set of security best practices that allows processes to be isolated from each other, from the cluster infrastructure, and which preserves important boundaries between those who manage the cluster, and those who use the cluster.

View File

@ -30,8 +30,11 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Security Contexts
## Abstract
A security context is a set of constraints that are applied to a container in order to achieve the following goals (from [security design](security.md)):
1. Ensure a clear isolation between container and the underlying host it runs on
@ -53,11 +56,13 @@ to the container process.
Support for user namespaces has recently been [merged](https://github.com/docker/libcontainer/pull/304) into Docker's libcontainer project and should soon surface in Docker itself. It will make it possible to assign a range of unprivileged uids and gids from the host to each container, improving the isolation between host and container and between containers.
### External integration with shared storage
In order to support external integration with shared storage, processes running in a Kubernetes cluster
should be able to be uniquely identified by their Unix UID, such that a chain of ownership can be established.
Processes in pods will need to have consistent UID/GID/SELinux category labels in order to access shared disks.
## Constraints and Assumptions
* It is out of the scope of this document to prescribe a specific set
of constraints to isolate containers from their host. Different use cases need different
settings.
@ -96,6 +101,7 @@ be addressed with security contexts:
## Proposed Design
### Overview
A *security context* consists of a set of constraints that determine how a container
is secured before getting created and run. A security context resides on the container and represents the runtime parameters that will
be used to create and run the container via container APIs. A *security context provider* is passed to the Kubelet so it can have a chance

View File

@ -30,7 +30,8 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
#Service Accounts
# Service Accounts
## Motivation
@ -50,6 +51,7 @@ They also may interact with services other than the Kubernetes API, such as:
- accessing files in an NFS volume attached to the pod
## Design Overview
A service account binds together several things:
- a *name*, understood by users, and perhaps by peripheral systems, for an identity
- a *principal* that can be authenticated and [authorized](../admin/authorization.md)
@ -137,6 +139,7 @@ are added to the map of tokens used by the authentication process in the apiserv
might have some types that do not do anything on apiserver but just get pushed to the kubelet.)
### Pods
The `PodSpec` is extended to have a `Pods.Spec.ServiceAccountUsername` field. If this is unset, then a
default value is chosen. If it is set, then the corresponding value of `Pods.Spec.SecurityContext` is set by the
Service Account Finalizer (see below).
@ -144,6 +147,7 @@ Service Account Finalizer (see below).
TBD: how policy limits which users can make pods with which service accounts.
### Authorization
Kubernetes API Authorization Policies refer to users. Pods created with a `Pods.Spec.ServiceAccountUsername` typically
get a `Secret` which allows them to authenticate to the Kubernetes APIserver as a particular user. So any
policy that is desired can be applied to them.

View File

@ -30,12 +30,15 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Simple rolling update
This is a lightweight design document for simple [rolling update](../user-guide/kubectl/kubectl_rolling-update.md) in ```kubectl```.
Complete execution flow can be found [here](#execution-details). See the [example of rolling update](../user-guide/update-demo/) for more information.
### Lightweight rollout
Assume that we have a current replication controller named ```foo``` and it is running image ```image:v1```
```kubectl rolling-update foo [foo-v2] --image=myimage:v2```
@ -51,6 +54,7 @@ and the old 'foo' replication controller is deleted. For the purposes of the ro
The value of that label is the hash of the complete JSON representation of the```foo-next``` or```foo``` replication controller. The name of this label can be overridden by the user with the ```--deployment-label-key``` flag.
#### Recovery
If a rollout fails or is terminated in the middle, it is important that the user be able to resume the roll out.
To facilitate recovery in the case of a crash of the updating process itself, we add the following annotations to each replication controller in the ```kubernetes.io/``` annotation namespace:
* ```desired-replicas``` The desired number of replicas for this replication controller (either N or zero)
@ -68,6 +72,7 @@ it is assumed that the rollout is nearly completed, and ```foo-next``` is rename
### Aborting a rollout
Abort is assumed to want to reverse a rollout in progress.
```kubectl rolling-update foo [foo-v2] --rollback```
@ -87,6 +92,7 @@ If the user doesn't specify a ```foo-next``` name, then it is either discovered
then ```foo-next``` is synthesized using the pattern ```<controller-name>-<hash-of-next-controller-JSON>```
#### Initialization
* If ```foo``` and ```foo-next``` do not exist:
* Exit, and indicate an error to the user, that the specified controller doesn't exist.
* If ```foo``` exists, but ```foo-next``` does not:
@ -102,6 +108,7 @@ then ```foo-next``` is synthesized using the pattern ```<controller-name>-<hash-
* Goto Rollout
#### Rollout
* While size of ```foo-next``` < ```desired-replicas``` annotation on ```foo-next```
* increase size of ```foo-next```
* if size of ```foo``` > 0
@ -109,11 +116,13 @@ then ```foo-next``` is synthesized using the pattern ```<controller-name>-<hash-
* Goto Rename
#### Rename
* delete ```foo```
* create ```foo``` that is identical to ```foo-next```
* delete ```foo-next```
#### Abort
* If ```foo-next``` doesn't exist
* Exit and indicate to the user that they may want to simply do a new rollout with the old version
* If ```foo``` doesn't exist

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes API and Release Versioning
Legend:

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes Developer Guide
The developer guide is for anyone wanting to either write code which directly accesses the

View File

@ -456,6 +456,7 @@ The following HTTP status codes may be returned by the API.
* Returned in response to HTTP OPTIONS requests.
#### Error codes
* `307 StatusTemporaryRedirect`
* Indicates that the address for the requested resource has changed.
* Suggested client recovery behavior

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# So you want to change the API?
The Kubernetes API has two major components - the internal structures and
@ -365,6 +366,7 @@ $ hack/update-swagger-spec.sh
The API spec changes should be in a commit separate from your other changes.
## Incompatible API changes
If your change is going to be backward incompatible or might be a breaking change for API
consumers, please send an announcement to `kubernetes-dev@googlegroups.com` before
the change gets in. If you are unsure, ask. Also make sure that the change gets documented in

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Overview
This document explains cherry picks are managed on release branches within the

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Kubernetes CLI/Configuration Roadmap
See also issues with the following labels:

View File

@ -30,12 +30,15 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kubernetes API client libraries
### Supported
* [Go](../../pkg/client/)
### User Contributed
*Note: Libraries provided by outside parties are supported by their authors, not the core Kubernetes team*
* [Java (OSGI)](https://bitbucket.org/amdatulabs/amdatu-kubernetes)

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# On Collaborative Development
Kubernetes is open source, but many of the people working on it do so as their day job. In order to avoid forcing people to be "at work" effectively 24/7, we want to establish some semi-formal protocols around development. Hopefully these rules make things go more smoothly. If you find that this is not the case, please complain loudly.

View File

@ -30,11 +30,13 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Getting started with Vagrant
Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/develop on your local machine (Linux, Mac OS X).
### Prerequisites
1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html
2. Install one of:
1. The latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads
@ -371,6 +373,7 @@ export KUBERNETES_MINION_MEMORY=2048
```
#### I ran vagrant suspend and nothing works!
```vagrant suspend``` seems to mess up the network. It's not supported at this time.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Development Guide
# Releases and Official Builds
@ -45,6 +46,7 @@ Kubernetes is written in [Go](http://golang.org) programming language. If you ha
Below, we outline one of the more common git workflows that core developers use. Other git workflows are also valid.
### Visual overview
![Git workflow](git_workflow.png)
### Fork the main repository
@ -93,6 +95,7 @@ $ git push -f origin myfeature
```
### Creating a pull request
1. Visit http://github.com/$YOUR_GITHUB_USERNAME/kubernetes
2. Click the "Compare and pull request" button next to your "myfeature" branch.
@ -102,6 +105,7 @@ $ git push -f origin myfeature
Kubernetes uses [godep](https://github.com/tools/godep) to manage dependencies. It is not strictly required for building Kubernetes but it is required when managing dependencies under the Godeps/ tree, and is required by a number of the build and test scripts. Please make sure that ``godep`` is installed and in your ``$PATH``.
### Installing godep
There are many ways to build and host go binaries. Here is an easy way to get utilities like ```godep``` installed:
1) Ensure that [mercurial](http://mercurial.selenic.com/wiki/Download) is installed on your system. (some of godep's dependencies use the mercurial
@ -124,6 +128,7 @@ export PATH=$PATH:$GOPATH/bin
```
### Using godep
Here's a quick walkthrough of one way to use godeps to add or update a Kubernetes dependency into Godeps/_workspace. For more details, please see the instructions in [godep's documentation](https://github.com/tools/godep).
1) Devote a directory to this endeavor:
@ -259,6 +264,7 @@ go run hack/e2e.go --down
```
### Flag options
See the flag definitions in `hack/e2e.go` for more options, such as reusing an existing cluster, here is an overview:
```sh
@ -309,6 +315,7 @@ go run hack/e2e.go -v -ctl='delete pod foobar'
```
## Conformance testing
End-to-end testing, as described above, is for [development
distributions](writing-a-getting-started-guide.md). A conformance test is used on
a [versioned distro](writing-a-getting-started-guide.md).
@ -320,6 +327,7 @@ intended to run against a cluster at a specific binary release of Kubernetes.
See [conformance-test.sh](../../hack/conformance-test.sh).
## Testing out flaky tests
[Instructions here](flaky-tests.md)
## Regenerating the CLI documentation

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# How to get faster PR reviews
Most of what is written here is not at all specific to Kubernetes, but it bears

View File

@ -30,7 +30,9 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Hunting flaky tests in Kubernetes
Sometimes unit tests are flaky. This means that due to (usually) race conditions, they will occasionally fail, even though most of the time they pass.
We have a goal of 99.9% flake free tests. This means that there is only one flake in one thousand runs of a test.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Getting Kubernetes Builds
You can use [hack/get-build.sh](../../hack/get-build.sh) to or use as a reference on how to get the most recent builds with curl. With `get-build.sh` you can grab the most recent stable build, the most recent release candidate, or the most recent build to pass our ci and gce e2e tests (essentially a nightly build).

View File

@ -30,10 +30,13 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Making release notes
This documents the process for making release notes for a release.
### 1) Note the PR number of the previous release
Find the most-recent PR that was merged with the previous .0 release. Remember this as $LASTPR.
_TODO_: Figure out a way to record this somewhere to save the next release engineer time.
@ -46,6 +49,7 @@ ${KUBERNETES_ROOT}/build/make-release-notes.sh $LASTPR $CURRENTPR
```
### 3) Trim the release notes
This generates a list of the entire set of PRs merged since the last minor
release. It is likely long and many PRs aren't worth mentioning. If any of the
PRs were cherrypicked into patches on the last minor release, you should exclude
@ -57,9 +61,11 @@ Remove, regroup, organize to your hearts content.
### 4) Update CHANGELOG.md
With the final markdown all set, cut and paste it to the top of ```CHANGELOG.md```
### 5) Update the Release page
* Switch to the [releases](https://github.com/GoogleCloudPlatform/kubernetes/releases) page.
* Open up the release you are working on.
* Cut and paste the final markdown from above into the release notes

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Profiling Kubernetes
This document explain how to plug in profiler and how to profile Kubernetes services.
@ -53,6 +54,7 @@ to the init(c *Config) method in 'pkg/master/master.go' and import 'net/http/ppr
In most use cases to use profiler service it's enough to do 'import _ net/http/pprof', which automatically registers a handler in the default http.Server. Slight inconvenience is that APIserver uses default server for intra-cluster communication, so plugging profiler to it is not really useful. In 'pkg/master/server/server.go' more servers are created and started as separate goroutines. The one that is usually serving external traffic is secureServer. The handler for this traffic is defined in 'pkg/master/master.go' and stored in Handler variable. It is created from HTTP multiplexer, so the only thing that needs to be done is adding profiler handler functions to this multiplexer. This is exactly what lines after TL;DR do.
## Connecting to the profiler
Even when running profiler I found not really straightforward to use 'go tool pprof' with it. The problem is that at least for dev purposes certificates generated for APIserver are not signed by anyone trusted and because secureServer serves only secure traffic it isn't straightforward to connect to the service. The best workaround I found is by creating an ssh tunnel from the kubernetes_master open unsecured port to some external server, and use this server as a proxy. To save everyone looking for correct ssh flags, it is done by running:
```

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Releasing Kubernetes
This document explains how to cut a release, and the theory behind it. If you
@ -87,6 +88,7 @@ Where `v0.20.2-322-g974377b` is the git hash you decided on. This will become
our (retroactive) branch point.
#### Branching, Tagging and Merging
Do the following:
1. `export VER=x.y` (e.g. `0.20` for v0.20)

View File

@ -30,11 +30,13 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Scheduler Algorithm in Kubernetes
For each unscheduled Pod, the Kubernetes scheduler tries to find a node across the cluster according to a set of rules. A general introduction to the Kubernetes scheduler can be found at [scheduler.md](scheduler.md). In this document, the algorithm of how to select a node for the Pod is explained. There are two steps before a destination node of a Pod is chosen. The first step is filtering all the nodes and the second is ranking the remaining nodes to find a best fit for the Pod.
## Filtering the nodes
The purpose of filtering the nodes is to filter out the nodes that do not meet certain requirements of the Pod. For example, if the free resource on a node (measured by the capacity minus the sum of the resource limits of all the Pods that already run on the node) is less than the Pod's required resource, the node should not be considered in the ranking phase so it is filtered out. Currently, there are several "predicates" implementing different filtering policies, including:
- `NoDiskConflict`: Evaluate if a pod can fit due to the volumes it requests, and those that are already mounted.

View File

@ -32,6 +32,7 @@ Documentation for other releases can be found at
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Writing a Getting Started Guide
This page gives some advice for anyone planning to write or update a Getting Started Guide for Kubernetes.
It also gives some guidelines which reviewers should follow when reviewing a pull request for a
guide.
@ -57,6 +58,7 @@ Distros fall into two categories:
There are different guidelines for each.
## Versioned Distro Guidelines
These guidelines say *what* to do. See the Rationale section for *why*.
- Send us a PR.
- Put the instructions in `docs/getting-started-guides/...`. Scripts go there too. This helps devs easily
@ -77,6 +79,7 @@ we still want to hear from you. We suggest you write a blog post or a Gist, and
Just file an issue or chat us on IRC and one of the committers will link to it from the wiki.
## Development Distro Guidelines
These guidelines say *what* to do. See the Rationale section for *why*.
- the main reason to add a new development distro is to support a new IaaS provider (VM and
network management). This means implementing a new `pkg/cloudprovider/$IAAS_NAME`.
@ -93,6 +96,7 @@ These guidelines say *what* to do. See the Rationale section for *why*.
refactoring and feature additions that affect code for their IaaS.
## Rationale
- We want people to create Kubernetes clusters with whatever IaaS, Node OS,
configuration management tools, and so on, which they are familiar with. The
guidelines for **versioned distros** are designed for flexibility.

View File

@ -55,6 +55,7 @@ they vary from step-by-step instructions to general advice for setting up
a kubernetes cluster from scratch.
### Local-machine Solutions
Local-machine solutions create a single cluster with one or more kubernetes nodes on a single
physical machine. Setup is completely automated and doesn't require a cloud provider account.
But their size and availability is limited to that of a single machine.
@ -66,10 +67,12 @@ The local-machine solutions are:
### Hosted Solutions
[Google Container Engine](https://cloud.google.com/container-engine) offers managed Kubernetes
clusters.
### Turn-key Cloud Solutions
These solutions allow you to create Kubernetes clusters on range of Cloud IaaS providers with only a
few commands, and have active community support.
- [GCE](gce.md)
@ -90,6 +93,7 @@ If you are interested in supporting Kubernetes on a new platform, check out our
writing a new solution](../../docs/devel/writing-a-getting-started-guide.md).
#### Cloud
These solutions are combinations of cloud provider and OS not covered by the above solutions.
- [AWS + coreos](coreos.md)
- [GCE + CoreOS](coreos.md)
@ -98,6 +102,7 @@ These solutions are combinations of cloud provider and OS not covered by the abo
- [Rackspace + CoreOS](rackspace.md)
#### On-Premises VMs
- [Vagrant](coreos.md) (uses CoreOS and flannel)
- [CloudStack](cloudstack.md) (uses Ansible, CoreOS and flannel)
- [Vmware](vsphere.md) (uses Debian)
@ -109,6 +114,7 @@ These solutions are combinations of cloud provider and OS not covered by the abo
- [KVM](fedora/flannel_multi_node_cluster.md) (uses Fedora and flannel)
#### Bare Metal
- [Offline](coreos/bare_metal_offline.md) (no internet required. Uses CoreOS and Flannel)
- [fedora/fedora_ansible_config.md](fedora/fedora_ansible_config.md)
- [Fedora single node](fedora/fedora_manual_config.md)
@ -118,9 +124,11 @@ These solutions are combinations of cloud provider and OS not covered by the abo
- [Docker Multi Node](docker-multinode.md)
#### Integrations
- [Kubernetes on Mesos](mesos.md) (Uses GCE)
## Table of Solutions
Here are all the solutions mentioned above in table form.
IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Getting started on Amazon EC2 with CoreOS
The example below creates an elastic Kubernetes cluster with a custom number of worker nodes and a master.

View File

@ -52,6 +52,7 @@ Getting started on AWS EC2
3. You need an AWS [instance profile and role](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) with EC2 full access.
## Cluster turnup
### Supported procedure: `get-kube`
```bash
@ -89,11 +90,14 @@ If these already exist, make sure you want them to be used here.
NOTE: If using an existing keypair named "kubernetes" then you must set the `AWS_SSH_KEY` key to point to your private key.
### Alternatives
A contributed [example](aws-coreos.md) allows you to setup a Kubernetes cluster based on [CoreOS](http://www.coreos.com), either using
AWS CloudFormation or EC2 with user data (cloud-config).
## Getting started with your cluster
### Command line administration tool: `kubectl`
The cluster startup script will leave you with a ```kubernetes``` directory on your workstation.
Alternately, you can download the latest Kubernetes release from [this page](https://github.com/GoogleCloudPlatform/kubernetes/releases).
@ -113,6 +117,7 @@ By default, `kubectl` will use the `kubeconfig` file generated during the cluste
For more information, please read [kubeconfig files](../../docs/user-guide/kubeconfig-file.md)
### Examples
See [a simple nginx example](../../docs/user-guide/simple-nginx.md) to try out your new cluster.
The "Guestbook" application is another popular example to get started with Kubernetes: [guestbook example](../../examples/guestbook/)
@ -120,6 +125,7 @@ The "Guestbook" application is another popular example to get started with Kuber
For more complete applications, please look in the [examples directory](../../examples/)
## Tearing down the cluster
Make sure the environment variables you used to provision your cluster are still exported, then call the following script inside the
`kubernetes` directory:
@ -128,6 +134,7 @@ cluster/kube-down.sh
```
## Further reading
Please see the [Kubernetes docs](../../docs/) for more details on administering
and using a Kubernetes cluster.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Install and configure kubectl
## Download the kubectl CLI tool

View File

@ -58,7 +58,9 @@ installed](https://docs.docker.com/installation/). On Mac OS X you can use
[boot2docker](http://boot2docker.io/).
## Setup
###Starting a cluster
### Starting a cluster
The cluster setup scripts can setup Kubernetes for multiple targets. First modify `cluster/kube-env.sh` to specify azure:
KUBERNETES_PROVIDER="azure"
@ -83,6 +85,7 @@ The script above will start (by default) a single master VM along with 4 worker
can tweak some of these parameters by editing `cluster/azure/config-default.sh`.
### Adding the kubernetes command line tools to PATH
The [kubectl](../../docs/user-guide/kubectl/kubectl.md) tool controls the Kubernetes cluster manager. It lets you inspect your cluster resources, create, delete, and update components, and much more.
You will use it to look at your new cluster and bring up example apps.
@ -95,6 +98,7 @@ Add the appropriate binary folder to your ```PATH``` to access kubectl:
export PATH=<path/to/kubernetes-directory>/platforms/linux/amd64:$PATH
## Getting started with your cluster
See [a simple nginx example](../user-guide/simple-nginx.md) to try out your new cluster.
For more complete applications, please look in the [examples directory](../../examples/).

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Getting a Binary Release
You can either build a release from sources or download a pre-built release. If you do not plan on developing Kubernetes itself, we suggest a pre-built release.

View File

@ -37,10 +37,13 @@ Getting started on [CentOS](http://centos.org)
- [Prerequisites](#prerequisites)
- [Starting a cluster](#starting-a-cluster)
## Prerequisites
You need two machines with CentOS installed on them.
## Starting a cluster
This is a getting started guide for CentOS. It is a manual configuration so you understand all the underlying packages / services / ports, etc...
This guide will only get ONE node working. Multiple nodes requires a functional [networking configuration](../../admin/networking.md) done outside of kubernetes. Although the additional kubernetes configuration requirements should be obvious.

View File

@ -52,7 +52,7 @@ This is a completely automated, a single playbook deploys Kubernetes based on th
This [Ansible](http://ansibleworks.com) playbook deploys Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init.
###Prerequisites
### Prerequisites
$ sudo apt-get install -y python-pip
$ sudo pip install ansible
@ -74,14 +74,14 @@ Or create a `~/.cloudstack.ini` file:
We need to use the http POST method to pass the _large_ userdata to the coreOS instances.
###Clone the playbook
### Clone the playbook
$ git clone --recursive https://github.com/runseb/ansible-kubernetes.git
$ cd ansible-kubernetes
The [ansible-cloudstack](https://github.com/resmo/ansible-cloudstack) module is setup in this repository as a submodule, hence the `--recursive`.
###Create a Kubernetes cluster
### Create a Kubernetes cluster
You simply need to run the playbook.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Getting started on [CoreOS](http://coreos.com)
There are multiple guides on running Kubernetes with [CoreOS](http://coreos.com):

View File

@ -49,6 +49,7 @@ Kubernetes on Azure with CoreOS and [Weave](http://weave.works)
In this guide I will demonstrate how to deploy a Kubernetes cluster to Azure cloud. You will be using CoreOS with Weave, which implements simple and secure networking, in a transparent, yet robust way. The purpose of this guide is to provide an out-of-the-box implementation that can ultimately be taken into production with little change. It will demonstrate how to provision a dedicated Kubernetes master and etcd nodes, and show how to scale the cluster with ease.
### Prerequisites
1. You need an Azure account.
## Let's go!

View File

@ -53,10 +53,12 @@ Deploy a CoreOS running Kubernetes environment. This particular guild is made to
## Prerequisites
1. Installed *CentOS 6* for PXE server
2. At least two bare metal nodes to work with
## High Level Design
1. Manage the tftp directory
* /tftpboot/(coreos)(centos)(RHEL)
* /tftpboot/pxelinux.0/(MAC) -> linked to Linux image config file
@ -67,6 +69,7 @@ Deploy a CoreOS running Kubernetes environment. This particular guild is made to
6. Installing the CoreOS slaves to become Kubernetes nodes.
## This Guides variables
| Node Description | MAC | IP |
| :---------------------------- | :---------------: | :---------: |
| CoreOS/etcd/Kubernetes Master | d0:00:67:13:0d:00 | 10.20.30.40 |
@ -75,6 +78,7 @@ Deploy a CoreOS running Kubernetes environment. This particular guild is made to
## Setup PXELINUX CentOS
To setup CentOS PXELINUX environment there is a complete [guide here](http://docs.fedoraproject.org/en-US/Fedora/7/html/Installation_Guide/ap-pxe-server.html). This section is the abbreviated version.
1. Install packages needed on CentOS
@ -121,6 +125,7 @@ To setup CentOS PXELINUX environment there is a complete [guide here](http://doc
Now you should have a working PXELINUX setup to image CoreOS nodes. You can verify the services by using VirtualBox locally or with bare metal servers.
## Adding CoreOS to PXE
This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment.
1. Find or create the TFTP root directory that everything will be based off of.
@ -168,6 +173,7 @@ This section describes how to setup the CoreOS images to live alongside a pre-ex
This configuration file will now boot from local drive but have the option to PXE image CoreOS.
## DHCP configuration
This section covers configuring the DHCP server to hand out our new images. In this case we are assuming that there are other servers that will boot alongside other images.
1. Add the ```filename``` to the _host_ or _subnet_ sections.
@ -210,6 +216,7 @@ This section covers configuring the DHCP server to hand out our new images. In t
We will be specifying the node configuration later in the guide.
## Kubernetes
To deploy our configuration we need to create an ```etcd``` master. To do so we want to pxe CoreOS with a specific cloud-config.yml. There are two options we have here.
1. Is to template the cloud config file and programmatically create new static configs for different cluster setups.
2. Have a service discovery protocol running in our stack to do auto discovery.
@ -243,6 +250,7 @@ This sets up our binaries we need to run Kubernetes. This would need to be enhan
Now for the good stuff!
## Cloud Configs
The following config files are tailored for the OFFLINE version of a Kubernetes deployment.
These are based on the work found here: [master.yml](cloud-configs/master.yaml), [node.yml](cloud-configs/node.yaml)
@ -256,6 +264,7 @@ To make the setup work, you need to replace a few placeholders:
- Add your own SSH public key(s) to the cloud config at the end
### master.yml
On the PXE server make and fill in the variables ```vi /var/www/html/coreos/pxe-cloud-config-master.yml```.
@ -476,6 +485,7 @@ On the PXE server make and fill in the variables ```vi /var/www/html/coreos/pxe-
### node.yml
On the PXE server make and fill in the variables ```vi /var/www/html/coreos/pxe-cloud-config-slave.yml```.
#cloud-config
@ -610,6 +620,7 @@ On the PXE server make and fill in the variables ```vi /var/www/html/coreos/pxe-
## New pxelinux.cfg file
Create a pxelinux target file for a _slave_ node: ```vi /tftpboot/pxelinux.cfg/coreos-node-slave```
default coreos
@ -637,6 +648,7 @@ And one for the _master_ node: ```vi /tftpboot/pxelinux.cfg/coreos-node-master``
append initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http://<pxe-host-ip>/coreos/pxe-cloud-config-master.yml console=tty0 console=ttyS0 coreos.autologin=tty1 coreos.autologin=ttyS0
## Specify the pxelinux targets
Now that we have our new targets setup for master and slave we want to configure the specific hosts to those targets. We will do this by using the pxelinux mechanism of setting a specific MAC addresses to a specific pxelinux.cfg file.
Refer to the MAC address table in the beginning of this guide. Documentation for more details can be found [here](http://www.syslinux.org/wiki/index.php/PXELINUX).
@ -650,6 +662,7 @@ Refer to the MAC address table in the beginning of this guide. Documentation for
Reboot these servers to get the images PXEd and ready for running containers!
## Creating test pod
Now that the CoreOS with Kubernetes installed is up and running lets spin up some Kubernetes pods to demonstrate the system.
See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try out your new cluster.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# CoreOS Multinode Cluster
Use the [master.yaml](cloud-configs/master.yaml) and [node.yaml](cloud-configs/node.yaml) cloud-configs to provision a multi-node Kubernetes cluster.

View File

@ -51,9 +51,11 @@ Please install Docker 1.6.2 or wait for Docker 1.7.1.
- [Testing your cluster](#testing-your-cluster)
## Prerequisites
1. You need a machine with docker installed.
## Overview
This guide will set up a 2-node kubernetes cluster, consisting of a _master_ node which hosts the API server and orchestrates work
and a _worker_ node which receives work from the master. You can repeat the process of adding worker nodes an arbitrary number of
times to create larger clusters.
@ -62,6 +64,7 @@ Here's a diagram of what the final result will look like:
![Kubernetes Single Node on Docker](k8s-docker.png)
### Bootstrap Docker
This guide also uses a pattern of running two instances of the Docker daemon
1) A _bootstrap_ Docker instance which is used to start system daemons like ```flanneld``` and ```etcd```
2) A _main_ Docker instance which is used for the Kubernetes infrastructure and user's scheduled containers
@ -71,6 +74,7 @@ all of the Docker containers created by Kubernetes. To achieve this, it must ru
it is still useful to use containers for deployment and management, so we create a simpler _bootstrap_ daemon to achieve this.
## Master Node
The first step in the process is to initialize the master node.
See [here](docker-multinode/master.md) for detailed instructions.

View File

@ -30,7 +30,9 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Installing a Kubernetes Master Node via Docker
We'll begin by setting up the master node. For the purposes of illustration, we'll assume that the IP of this machine is ```${MASTER_IP}```
There are two main phases to installing the master:
@ -45,6 +47,7 @@ There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0
Please install Docker 1.6.2 or wait for Docker 1.7.1.
### Setup Docker-Bootstrap
We're going to use ```flannel``` to set up networking between Docker daemons. Flannel itself (and etcd on which it relies) will run inside of
Docker containers themselves. To achieve this, we need a separate "bootstrap" instance of the Docker daemon. This daemon will be started with
```--iptables=false``` so that it can only run containers with ```--net=host```. That's sufficient to bootstrap our system.
@ -61,6 +64,7 @@ across reboots and failures.
### Startup etcd for flannel and the API server to use
Run:
```
@ -75,11 +79,13 @@ sudo docker -H unix:///var/run/docker-bootstrap.sock run --net=host gcr.io/googl
### Set up Flannel on the master node
Flannel is a network abstraction layer build by CoreOS, we will use it to provide simplified networking between our Pods of containers.
Flannel re-configures the bridge that Docker uses for networking. As a result we need to stop Docker, reconfigure its networking, and then restart Docker.
#### Bring down Docker
To re-configure Docker to use flannel, we need to take docker down, run flannel and then restart Docker.
Turning down Docker is system dependent, it may be:
@ -113,6 +119,7 @@ sudo docker -H unix:///var/run/docker-bootstrap.sock exec <really-long-hash-from
```
#### Edit the docker configuration
You now need to edit the docker configuration to activate new flags. Again, this is system specific.
This may be in ```/etc/default/docker``` or ```/etc/systemd/service/docker.service``` or it may be elsewhere.
@ -124,6 +131,7 @@ Regardless, you need to add the following to the docker command line:
```
#### Remove the existing Docker bridge
Docker creates a bridge named ```docker0``` by default. You need to remove this:
```sh
@ -134,6 +142,7 @@ sudo brctl delbr docker0
You may need to install the ```bridge-utils``` package for the ```brctl``` binary.
#### Restart Docker
Again this is system dependent, it may be:
```sh
@ -147,6 +156,7 @@ systemctl start docker
```
## Starting the Kubernetes Master
Ok, now that your networking is set up, you can startup Kubernetes, this is the same as the single-node case, we will use the "main" instance of the Docker daemon for the Kubernetes components.
```sh
@ -160,6 +170,7 @@ sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0
```
### Test it out
At this point, you should have a functioning 1-node cluster. Let's test it out!
Download the kubectl binary
@ -184,6 +195,7 @@ If all else fails, ask questions on IRC at [#google-containers](http://webchat.f
### Next steps
Move on to [adding one or more workers](worker.md)

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Testing your Kubernetes cluster.
To validate that your node(s) have been added, run:

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Adding a Kubernetes worker node via Docker.
@ -44,6 +45,7 @@ For each worker node, there are three steps:
* [Add the worker to the cluster](#add-the-node-to-the-cluster)
### Set up Flanneld on the worker node
As before, the Flannel daemon is going to provide network connectivity.
_Note_:
@ -52,6 +54,7 @@ Please install Docker 1.6.2 or wait for Docker 1.7.1.
#### Set up a bootstrap docker
As previously, we need a second instance of the Docker daemon running to bootstrap the flannel networking.
Run:
@ -65,6 +68,7 @@ If you are running this on a long running system, rather than experimenting, you
across reboots and failures.
#### Bring down Docker
To re-configure Docker to use flannel, we need to take docker down, run flannel and then restart Docker.
Turning down Docker is system dependent, it may be:
@ -99,6 +103,7 @@ sudo docker -H unix:///var/run/docker-bootstrap.sock exec <really-long-hash-from
#### Edit the docker configuration
You now need to edit the docker configuration to activate new flags. Again, this is system specific.
This may be in ```/etc/default/docker``` or ```/etc/systemd/service/docker.service``` or it may be elsewhere.
@ -110,6 +115,7 @@ Regardless, you need to add the following to the docker command line:
```
#### Remove the existing Docker bridge
Docker creates a bridge named ```docker0``` by default. You need to remove this:
```sh
@ -120,6 +126,7 @@ sudo brctl delbr docker0
You may need to install the ```bridge-utils``` package for the ```brctl``` binary.
#### Restart Docker
Again this is system dependent, it may be:
```sh
@ -133,7 +140,9 @@ systemctl start docker
```
### Start Kubernetes on the worker node
#### Run the kubelet
Again this is similar to the above, but the ```--api_servers``` now points to the master we set up in the beginning.
```sh
@ -141,6 +150,7 @@ sudo docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.
```
#### Run the service proxy
The service proxy provides load-balancing between groups of containers defined by Kubernetes ```Services```
```sh

View File

@ -53,6 +53,7 @@ Here's a diagram of what the final result will look like:
![Kubernetes Single Node on Docker](k8s-singlenode-docker.png)
### Prerequisites
1. You need to have docker installed on one machine.
### Step One: Run etcd
@ -70,6 +71,7 @@ docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/go
This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md) that contains the other master components.
### Step Three: Run the service proxy
*Note, this could be combined with master above, but it requires --privileged for iptables manipulation*
```sh
@ -77,6 +79,7 @@ docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.21.2
```
### Test it out
At this point you should have a running kubernetes cluster. You can test this by downloading the kubectl
binary
([OS X](https://storage.googleapis.com/kubernetes-release/release/v0.18.2/bin/darwin/amd64/kubectl))
@ -134,6 +137,7 @@ curl <insert-ip-from-above-here>
Note that you will need run this curl command on your boot2docker VM if you are running on OS X.
### A note on turning down your cluster
Many of these containers run under the management of the ```kubelet``` binary, which attempts to keep containers running, even if they fail. So, in order to turn down
the cluster, you need to first kill the kubelet container, and then any other containers.

View File

@ -44,7 +44,7 @@ Configuring kubernetes on Fedora via Ansible offers a simple way to quickly crea
- [Setting up the cluster](#setting-up-the-cluster)
- [Testing and using your new cluster](#testing-and-using-your-new-cluster)
##Prerequisites
## Prerequisites
1. Host able to run ansible and able to clone the following repo: [kubernetes-ansible](https://github.com/eparis/kubernetes-ansible)
2. A Fedora 20+ or RHEL7 host to act as cluster master

View File

@ -39,6 +39,7 @@ Getting started on [Fedora](http://fedoraproject.org)
- [Instructions](#instructions)
## Prerequisites
1. You need 2 or more machines with Fedora installed.
## Instructions

View File

@ -46,6 +46,7 @@ Kubernetes multiple nodes cluster with flannel on Fedora
This document describes how to deploy kubernetes on multiple hosts to set up a multi-node cluster and networking with flannel. Follow fedora [getting started guide](fedora_manual_config.md) to setup 1 master (fed-master) and 2 or more nodes. Make sure that all nodes have different names (fed-node1, fed-node2 and so on) and labels (fed-node1-label, fed-node2-label, and so on) to avoid any conflict. Also make sure that the kubernetes master host is running etcd, kube-controller-manager, kube-scheduler, and kube-apiserver services, and the nodes are running docker, kube-proxy and kubelet services. Now install flannel on kubernetes nodes. flannel on each node configures an overlay network that docker uses. flannel runs on each node to setup a unique class-C container network.
## Prerequisites
1. You need 2 or more machines with Fedora installed.
## Master Setup
@ -124,7 +125,7 @@ FLANNEL_OPTIONS=""
***
##**Test the cluster and flannel configuration**
## **Test the cluster and flannel configuration**
* Now check the interfaces on the nodes. Notice there is now a flannel.1 interface, and the ip addresses of docker0 and flannel.1 interfaces are in the same network. You will notice that docker0 is assigned a subnet (18.16.29.0/24 as shown below) on each kubernetes node out of the IP range configured above. A working output should look like this:

View File

@ -188,6 +188,7 @@ Then, see [a simple nginx example](../../docs/user-guide/simple-nginx.md) to try
For more complete applications, please look in the [examples directory](../../examples/). The [guestbook example](../../examples/guestbook/) is a good "getting started" walkthrough.
### Tearing down the cluster
To remove/delete/teardown the cluster, use the `kube-down.sh` script.
```bash

View File

@ -160,6 +160,7 @@ hack/local-up-cluster.sh
One or more of the kubernetes daemons might've crashed. Tail the logs of each in /tmp.
#### The pods fail to connect to the services by host names
The local-up-cluster.sh script doesn't start a DNS service. Similar situation can be found [here](https://github.com/GoogleCloudPlatform/kubernetes/issues/6667). You can start a manually. Related documents can be found [here](../../cluster/addons/dns/#how-do-i-configure-it)

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Cluster Level Logging with Elasticsearch and Kibana
On the Google Compute Engine (GCE) platform the default cluster level logging support targets

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Cluster Level Logging to Google Cloud Logging
A Kubernetes cluster will typically be humming along running many system and application pods. How does the system administrator collect, manage and query the logs of the system pods? How does a user query the logs of their application which is composed of many pods which may be restarted or automatically generated by the Kubernetes system? These questions are addressed by the Kubernetes **cluster level logging** services.

View File

@ -46,6 +46,7 @@ Getting started with Kubernetes on Mesos
- [Test Guestbook App](#test-guestbook-app)
## About Kubernetes on Mesos
<!-- TODO: Update, clean up. -->
Mesos allows dynamic sharing of cluster resources between Kubernetes and other first-class Mesos frameworks such as [Hadoop][1], [Spark][2], and [Chronos][3].
@ -97,6 +98,7 @@ $ export KUBERNETES_MASTER=http://${KUBERNETES_MASTER_IP}:8888
```
### Deploy etcd
Start etcd and verify that it is running:
```bash
@ -118,6 +120,7 @@ curl -L http://${KUBERNETES_MASTER_IP}:4001/v2/keys/
If connectivity is OK, you will see an output of the available keys in etcd (if any).
### Start Kubernetes-Mesos Services
Update your PATH to more easily run the Kubernetes-Mesos binaries:
```bash
@ -176,6 +179,7 @@ $ disown -a
```
#### Validate KM Services
Add the appropriate binary folder to your ```PATH``` to access kubectl:
```bash

View File

@ -58,23 +58,26 @@ The current cluster design is inspired by:
- [Angus Lees](https://github.com/anguslees/kube-openstack)
## Prerequisites
1. Python2.7
2. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into.
3. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova.html) for more details.
##Provider: Rackspace
## Provider: Rackspace
- To build your own released version from source use `export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh`
- Note: The get.k8s.io install method is not working yet for our scripts.
* To install the latest released version of kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash`
## Build
1. The kubernetes binaries will be built via the common build scripts in `build/`.
2. If you've set the ENV `KUBERNETES_PROVIDER=rackspace`, the scripts will upload `kubernetes-server-linux-amd64.tar.gz` to Cloud Files.
2. A cloud files container will be created via the `swiftly` CLI and a temp URL will be enabled on the object.
3. The built `kubernetes-server-linux-amd64.tar.gz` will be uploaded to this container and the URL will be passed to master/nodes when booted.
## Cluster
There is a specific `cluster/rackspace` directory with the scripts for the following steps:
1. A cloud network will be created and all instances will be attached to this network.
- flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network.
@ -83,6 +86,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo
4. We then boot as many nodes as defined via `$NUM_MINIONS`.
## Some notes
- The scripts expect `eth2` to be the cloud network that the containers will communicate across.
- A number of the items in `config-default.sh` are overridable via environment variables.
- For older versions please either:
@ -92,6 +96,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo
* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz)
## Network Design
- eth0 - Public Interface used for servers/containers to reach the internet
- eth1 - ServiceNet - Intra-cluster communication (k8s, etcd, etc) communicate via this interface. The `cloud-config` files use the special CoreOS identifier `$private_ipv4` to configure the services.
- eth2 - Cloud Network - Used for k8s pods to communicate with one another. The proxy service will pass traffic via this interface.

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Run Kubernetes with rkt
This document describes how to run Kubernetes using [rkt](https://github.com/coreos/rkt) as a container runtime.
@ -127,6 +128,7 @@ Note: CoreOS is not supported as the master using the automated launch
scripts. The master node is always Ubuntu.
### Getting started with your cluster
See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try out your new cluster.
For more complete applications, please look in the [examples directory](../../../examples/).

View File

@ -73,6 +73,7 @@ steps that existing cluster setup scripts are making.
## Designing and Preparing
### Learning
1. You should be familiar with using Kubernetes already. We suggest you set
up a temporary cluster by following one of the other Getting Started Guides.
This will help you become familiar with the CLI ([kubectl](../user-guide/kubectl/kubectl.md)) and concepts ([pods](../user-guide/pods.md), [services](../user-guide/services.md), etc.) first.
@ -80,6 +81,7 @@ steps that existing cluster setup scripts are making.
effect of completing one of the other Getting Started Guides.
### Cloud Provider
Kubernetes has the concept of a Cloud Provider, which is a module which provides
an interface for managing TCP Load Balancers, Nodes (Instances) and Networking Routes.
The interface is defined in `pkg/cloudprovider/cloud.go`. It is possible to
@ -88,6 +90,7 @@ bare-metal), and not all parts of the interface need to be implemented, dependin
on how flags are set on various components.
### Nodes
- You can use virtual or physical machines.
- While you can build a cluster with 1 machine, in order to run all the examples and tests you
need at least 4 nodes.
@ -101,6 +104,7 @@ on how flags are set on various components.
have identical configurations.
### Network
Kubernetes has a distinctive [networking model](../admin/networking.md).
Kubernetes allocates an IP address to each pod. When creating a cluster, you
@ -168,6 +172,7 @@ region of the world, etc.
need to distinguish which resources each created. Call this `CLUSTERNAME`.
### Software Binaries
You will need binaries for:
- etcd
- A container runner, one of:
@ -181,6 +186,7 @@ You will need binaries for:
- kube-scheduler
#### Downloading and Extracting Kubernetes Binaries
A Kubernetes binary release includes all the Kubernetes binaries as well as the supported release of etcd.
You can use a Kubernetes binary release (recommended) or build your Kubernetes binaries following the instructions in the
[Developer Documentation](../devel/README.md). Only using a binary release is covered in this guide.
@ -191,6 +197,7 @@ Then, within the second set of unzipped files, locate `./kubernetes/server/bin`,
all the necessary binaries.
#### Selecting Images
You will run docker, kubelet, and kube-proxy outside of a container, the same way you would run any system daemon, so
you just need the bare binaries. For etcd, kube-apiserver, kube-controller-manager, and kube-scheduler,
we recommend that you run these as containers, so you need an image to be built.
@ -239,6 +246,7 @@ There are two main options for security:
If following the HTTPS approach, you will need to prepare certs and credentials.
#### Preparing Certs
You need to prepare several certs:
- The master needs a cert to act as an HTTPS server.
- The kubelets optionally need certs to identify themselves as clients of the master, and when
@ -263,6 +271,7 @@ You will end up with the following files (we will use these variables later on)
- optional
#### Preparing Credentials
The admin user (and any users) need:
- a token or a password to identify them.
- tokens are just long alphanumeric strings, e.g. 32 chars. See
@ -340,6 +349,7 @@ Started Guide. After getting a cluster running, you can then copy the init.d s
cluster, and then modify them for use on your custom cluster.
### Docker
The minimum required Docker version will vary as the kubelet version changes. The newest stable release is a good choice. Kubelet will log a warning and refuse to start pods if the version is too old, so pick a version and try it.
If you previously had Docker installed on a node without setting Kubernetes-specific
@ -423,6 +433,7 @@ Arguments to consider:
- `--api-servers=http://$MASTER_IP`
### Networking
Each node needs to be allocated its own CIDR range for pod networking.
Call this `NODE_X_POD_CIDR`.
@ -463,6 +474,7 @@ any masquerading at all. Others, such as GCE, will not allow pod IPs to send
traffic to the internet, but have no problem with them inside your GCE Project.
### Other
- Enable auto-upgrades for your OS package manager, if desired.
- Configure log rotation for all node components (e.g. using [logrotate](http://linux.die.net/man/8/logrotate)).
- Setup liveness-monitoring (e.g. using [monit](http://linux.die.net/man/1/monit)).
@ -471,6 +483,7 @@ traffic to the internet, but have no problem with them inside your GCE Project.
volumes.
### Using Configuration Management
The previous steps all involved "conventional" system administration techniques for setting up
machines. You may want to use a Configuration Management system to automate the node configuration
process. There are examples of [Saltstack](../admin/salt.md), Ansible, Juju, and CoreOS Cloud Config in the
@ -486,6 +499,7 @@ all configured and managed *by Kubernetes*:
- they are kept running by Kubernetes rather than by init.
### etcd
You will need to run one or more instances of etcd.
- Recommended approach: run one etcd instance, with its log written to a directory backed
by durable storage (RAID, GCE PD)
@ -614,6 +628,7 @@ node disk.
Optionally, you may want to mount `/var/log` as well and redirect output there.
#### Starting Apiserver
Place the completed pod template into the kubelet config dir
(whatever `--config=` argument of kubelet is set to, typically
`/etc/kubernetes/manifests`).
@ -689,6 +704,7 @@ Optionally, you may want to mount `/var/log` as well and redirect output there.
Start as described for apiserver.
### Controller Manager
To run the controller manager:
- select the correct flags for your cluster
- write a pod spec for the controller manager using the provided template
@ -804,6 +820,7 @@ The nodes must be able to connect to each other using their private IP. Verify t
pinging or SSH-ing from one node to another.
### Getting Help
If you run into trouble, please see the section on [troubleshooting](gce.md#troubleshooting), post to the
[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on IRC at [#google-containers](http://webchat.freenode.net/?channels=google-containers) on freenode.

View File

@ -48,6 +48,7 @@ This document describes how to deploy kubernetes on ubuntu nodes, including 1 ku
[Cloud team from Zhejiang University](https://github.com/ZJU-SEL) will maintain this work.
## Prerequisites
*1 The nodes have installed docker version 1.2+ and bridge-utils to manipulate linux bridge*
*2 All machines can communicate with each other, no need to connect Internet (should use private docker registry in this case)*
@ -60,6 +61,7 @@ This document describes how to deploy kubernetes on ubuntu nodes, including 1 ku
### Starting a Cluster
#### Make *kubernetes* , *etcd* and *flanneld* binaries
First clone the kubernetes github repo, `$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git`
@ -74,6 +76,7 @@ Please make sure that there are `kube-apiserver`, `kube-controller-manager`, `ku
> We used flannel here because we want to use overlay network, but please remember it is not the only choice, and it is also not a k8s' necessary dependence. Actually you can just build up k8s cluster natively, or use flannel, Open vSwitch or any other SDN tool you like, we just choose flannel here as a example.
#### Configure and start the kubernetes cluster
An example cluster is listed as below:
| IP Address|Role |

View File

@ -30,6 +30,7 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Getting started with Vagrant
Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/develop on your local machine (Linux, Mac OS X).
@ -53,6 +54,7 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve
- [I want vagrant to sync folders via nfs!](#i-want-vagrant-to-sync-folders-via-nfs)
### Prerequisites
1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html
2. Install one of:
1. Version 4.3.28 of Virtual Box from https://www.virtualbox.org/wiki/Download_Old_Builds_4_3
@ -366,6 +368,7 @@ export KUBERNETES_MINION_MEMORY=2048
```
#### I ran vagrant suspend and nothing works!
```vagrant suspend``` seems to mess up the network. This is not supported at this time.
#### I want vagrant to sync folders via nfs!

View File

@ -30,7 +30,9 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Abstract
Auto-scaling is a data-driven feature that allows users to increase or decrease capacity as needed by controlling the
number of pods deployed within the system automatically.
@ -230,6 +232,7 @@ Since an auto-scaler is a durable object it is best represented as a resource.
```
#### Boundary Definitions
The `AutoScaleThreshold` definitions provide the boundaries for the auto-scaler. By defining comparisons that form a range
along with positive and negative increments you may define bi-directional scaling. For example the upper bound may be
specified as "when requests per second rise above 50 for 30 seconds scale the application up by 1" and a lower bound may
@ -251,6 +254,7 @@ Of note: If the statistics gathering mechanisms can be initialized with a regist
potentially piggyback on this registry.
### Multi-target Scaling Policy
If multiple scalable targets satisfy the `TargetSelector` criteria the auto-scaler should be configurable as to which
target(s) are scaled. To begin with, if multiple targets are found the auto-scaler will scale the largest target up
or down as appropriate. In the future this may be more configurable.

View File

@ -30,12 +30,15 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
#Kubernetes Cluster Federation
##(a.k.a. "Ubernetes")
# Kubernetes Cluster Federation
## (a.k.a. "Ubernetes")
## Requirements Analysis and Product Proposal
## _by Quinton Hoole ([quinton@google.com](mailto:quinton@google.com))_
_Initial revision: 2015-03-05_
_Last updated: 2015-03-09_
This doc: [tinyurl.com/ubernetesv2](http://tinyurl.com/ubernetesv2)
@ -417,7 +420,7 @@ TBD: All very hand-wavey still, but some initial thoughts to get the conversatio
![image](federation-high-level-arch.png)
## Ubernetes API
## Ubernetes API
This looks a lot like the existing Kubernetes API but is explicitly multi-cluster.

View File

@ -30,10 +30,13 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# High Availability of Scheduling and Controller Components in Kubernetes
This document serves as a proposal for high availability of the scheduler and controller components in kubernetes. This proposal is intended to provide a simple High Availability api for kubernetes components with the potential to extend to services running on kubernetes. Those services would be subject to their own constraints.
## Design Options
For complete reference see [this](https://www.ibm.com/developerworks/community/blogs/RohitShetty/entry/high_availability_cold_warm_hot?lang=en)
1. Hot Standby: In this scenario, data and state are shared between the two components such that an immediate failure in one component causes the standby daemon to take over exactly where the failed component had left off. This would be an ideal solution for kubernetes, however it poses a series of challenges in the case of controllers where component-state is cached locally and not persisted in a transactional way to a storage facility. This would also introduce additional load on the apiserver, which is not desirable. As a result, we are **NOT** planning on this approach at this time.
@ -43,6 +46,7 @@ For complete reference see [this](https://www.ibm.com/developerworks/community/b
3. Active-Active (Load Balanced): Clients can simply load-balance across any number of servers that are currently running. Their general availability can be continuously updated, or published, such that load balancing only occurs across active participants. This aspect of HA is outside of the scope of *this* proposal because there is already a partial implementation in the apiserver.
## Design Discussion Notes on Leader Election
Implementation References:
* [zookeeper](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_leaderElection)
* [etcd](https://groups.google.com/forum/#!topic/etcd-dev/EbAa4fjypb4)
@ -55,11 +59,13 @@ The first component to request leadership will become the master. All other com
The component that becomes master should create a thread to manage the lease. This thread should be created with a channel that the main process can use to release the master lease. The master should release the lease in cases of an unrecoverable error and clean shutdown. Otherwise, this process will renew the lease and sleep, waiting for the next renewal time or notification to release the lease. If there is a failure to renew the lease, this process should force the entire component to exit. Daemon exit is meant to prevent potential split-brain conditions. Daemon restart is implied in this scenario, by either the init system (systemd), or possible watchdog processes. (See Design Discussion Notes)
## Options added to components with HA functionality
Some command line options would be added to components that can do HA:
* Lease Duration - How long a component can be master
## Design Discussion Notes
Some components may run numerous threads in order to perform tasks in parallel. Upon losing master status, such components should exit instantly instead of attempting to gracefully shut down such threads. This is to ensure that, in the case there's some propagation delay in informing the threads they should stop, the lame-duck threads won't interfere with the new master. The component should exit with an exit code indicating that the component is not the master. Since all components will be run by systemd or some other monitoring system, this will just result in a restart.
There is a short window after a new master acquires the lease, during which data from the old master might be committed. This is because there is currently no way to condition a write on its source being the master. Having the daemons exit shortens this window but does not eliminate it. A proper solution for this problem will be addressed at a later date. The proposed solution is:
@ -75,6 +81,7 @@ There is a short window after a new master acquires the lease, during which data
5. When the API server makes the corresponding write to etcd, it includes it in a transaction that does a compare-and-swap on the "current master" entry (old value == new value == host:port and sequence number from the replica that sent the mutating operation). This basically guarantees that if we elect the new master, all transactions coming from the old master will fail. You can think of this as the master attaching a "precondition" of its belief about who is the latest master.
## Open Questions
* Is there a desire to keep track of all nodes for a specific component type?

Some files were not shown because too many files have changed in this diff Show More