rewrite all links to issues to k8s links

This commit is contained in:
Mike Danese 2015-08-05 18:08:26 -07:00
parent 7c9bbef96e
commit fe6b15ba2f
40 changed files with 66 additions and 66 deletions

View File

@ -6,7 +6,7 @@ To build Kubernetes you need to have access to a Docker installation through eit
1. Be running Docker. 2 options supported/tested:
1. **Mac OS X** The best way to go is to use `boot2docker`. See instructions [here](https://docs.docker.com/installation/mac/).
**Note**: You will want to set the boot2docker vm to have at least 3GB of initial memory or building will likely fail. (See: [#11852]( https://github.com/GoogleCloudPlatform/kubernetes/issues/11852))
**Note**: You will want to set the boot2docker vm to have at least 3GB of initial memory or building will likely fail. (See: [#11852]( http://issue.k8s.io/11852))
2. **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS. The scripts here assume that they are using a local Docker server and that they can "reach around" docker and grab results directly from the file system.
2. Have python installed. Pretty much it is installed everywhere at this point so you can probably ignore this.
3. *Optional* For uploading your release to Google Cloud Storage, have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed and configured.
@ -89,7 +89,7 @@ These are in no particular order
* [X] Harmonize with scripts in `hack/`. How much do we support building outside of Docker and these scripts?
* [X] Deprecate/replace most of the stuff in the hack/
* [ ] Finish support for the Dockerized runtime. Issue (#19)[https://github.com/GoogleCloudPlatform/kubernetes/issues/19]. A key issue here is to make this fast/light enough that we can use it for development workflows.
* [ ] Finish support for the Dockerized runtime. Issue (#19)[http://issue.k8s.io/19]. A key issue here is to make this fast/light enough that we can use it for development workflows.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/README.md?pixel)]()

View File

@ -636,7 +636,7 @@ function kube-up {
# Generate a bearer token for this cluster. We push this separately
# from the other cluster variables so that the client (this
# computer) can forget it later. This should disappear with
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
# http://issue.k8s.io/3168
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
@ -1083,7 +1083,7 @@ function kube-push {
# is solved (because that's blocking automatic dynamic nodes from
# working). The node-kube-env has to be composed with the KUBELET_TOKEN
# and KUBE_PROXY_TOKEN. Ideally we would have
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
# http://issue.k8s.io/3168
# implemented before then, though, so avoiding this mess until then.
echo

View File

@ -87,7 +87,7 @@ class Registrator:
'message', '').startswith('The requested resource does not exist'):
# There's something fishy in the kube api here (0.4 dev), first time we
# go to register a new minion, we always seem to get this error.
# https://github.com/GoogleCloudPlatform/kubernetes/issues/1995
# http://issue.k8s.io/1995
time.sleep(1)
print("Retrying registration...")
raise ValueError("Registration returned 500, retry")

View File

@ -33,7 +33,7 @@
# boot, run-salt will installs kube-apiserver.manifest files to
# kubelet config directory before the installation of proper version
# kubelet. Please see
# https://github.com/GoogleCloudPlatform/kubernetes/issues/10122#issuecomment-114566063
# http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/kube-apiserver.manifest:
file.managed:

View File

@ -2,7 +2,7 @@
# The ordering of salt states for service docker, kubelet and
# master-addon below is very important to avoid the race between
# salt restart docker or kubelet and kubelet start master components.
# Please see https://github.com/GoogleCloudPlatform/kubernetes/issues/10122#issuecomment-114566063
# Please see http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/kube-controller-manager.manifest:
file.managed:

View File

@ -2,7 +2,7 @@
# The ordering of salt states for service docker, kubelet and
# master-addon below is very important to avoid the race between
# salt restart docker or kubelet and kubelet start master components.
# Please see https://github.com/GoogleCloudPlatform/kubernetes/issues/10122#issuecomment-114566063
# Please see http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/kube-scheduler.manifest:
file.managed:

View File

@ -32,7 +32,7 @@ KUBECTL="${KUBE_OUTPUT_HOSTBIN}/kubectl"
# List of resources to be updated.
# TODO: Get this list of resources from server once
# https://github.com/GoogleCloudPlatform/kubernetes/issues/2057 is fixed.
# http://issue.k8s.io/2057 is fixed.
declare -a resources=(
"endpoints"
"events"

View File

@ -78,7 +78,7 @@ containers:
memory: 200Mi
```
These limits, however, are based on data collected from addons running on 4-node clusters (see [#10335](https://github.com/GoogleCloudPlatform/kubernetes/issues/10335#issuecomment-117861225)). The addons consume a lot more resources when running on large deployment clusters (see [#5880](https://github.com/GoogleCloudPlatform/kubernetes/issues/5880#issuecomment-113984085)). So, if a large cluster is deployed without adjusting these values, the addons may continuously get killed because they keep hitting the limits.
These limits, however, are based on data collected from addons running on 4-node clusters (see [#10335](http://issue.k8s.io/10335#issuecomment-117861225)). The addons consume a lot more resources when running on large deployment clusters (see [#5880](http://issue.k8s.io/5880#issuecomment-113984085)). So, if a large cluster is deployed without adjusting these values, the addons may continuously get killed because they keep hitting the limits.
To avoid running into cluster addon resource issues, when creating a cluster with many nodes, consider the following:
* Scale memory and CPU limits for each of the following addons, if used, along with the size of cluster (there is one replica of each handling the entire cluster so memory and CPU usage tends to grow proportionally with size/load on cluster):

View File

@ -118,8 +118,8 @@ Pods configs should be largely portable between Org-run and hosted configuration
# Design
Related discussion:
- https://github.com/GoogleCloudPlatform/kubernetes/issues/442
- https://github.com/GoogleCloudPlatform/kubernetes/issues/443
- http://issue.k8s.io/442
- http://issue.k8s.io/443
This doc describes two security profiles:
- Simple profile: like single-user mode. Make it easy to evaluate K8s without lots of configuring accounts and policies. Protects from unauthorized users, but does not partition authorized users.
@ -176,7 +176,7 @@ Initially:
Improvements:
- Kubelet allocates disjoint blocks of root-namespace uids for each container. This may provide some defense-in-depth against container escapes. (https://github.com/docker/docker/pull/4572)
- requires docker to integrate user namespace support, and deciding what getpwnam() does for these uids.
- any features that help users avoid use of privileged containers (https://github.com/GoogleCloudPlatform/kubernetes/issues/391)
- any features that help users avoid use of privileged containers (http://issue.k8s.io/391)
### Namespaces
@ -253,7 +253,7 @@ Policy objects may be applicable only to a single namespace or to all namespaces
## Accounting
The API should have a `quota` concept (see https://github.com/GoogleCloudPlatform/kubernetes/issues/442). A quota object relates a namespace (and optionally a label selector) to a maximum quantity of resources that may be used (see [resources design doc](resources.md)).
The API should have a `quota` concept (see http://issue.k8s.io/442). A quota object relates a namespace (and optionally a label selector) to a maximum quantity of resources that may be used (see [resources design doc](resources.md)).
Initially:
- a `quota` object is immutable.

View File

@ -37,7 +37,7 @@ Documentation for other releases can be found at
| Topic | Link |
| ----- | ---- |
| Separate validation from RESTStorage | https://github.com/GoogleCloudPlatform/kubernetes/issues/2977 |
| Separate validation from RESTStorage | http://issue.k8s.io/2977 |
## Background

View File

@ -44,9 +44,9 @@ This describes an approach for providing support for:
There are several related issues/PRs:
- [Support attach](https://github.com/GoogleCloudPlatform/kubernetes/issues/1521)
- [Real container ssh](https://github.com/GoogleCloudPlatform/kubernetes/issues/1513)
- [Provide easy debug network access to services](https://github.com/GoogleCloudPlatform/kubernetes/issues/1863)
- [Support attach](http://issue.k8s.io/1521)
- [Real container ssh](http://issue.k8s.io/1513)
- [Provide easy debug network access to services](http://issue.k8s.io/1863)
- [OpenShift container command execution proposal](https://github.com/openshift/origin/pull/576)
## Motivation

View File

@ -38,7 +38,7 @@ This document captures the design of event compression.
## Background
Kubernetes components can get into a state where they generate tons of events which are identical except for the timestamp. For example, when pulling a non-existing image, Kubelet will repeatedly generate `image_not_existing` and `container_is_waiting` events until upstream components correct the image. When this happens, the spam from the repeated events makes the entire event mechanism useless. It also appears to cause memory pressure in etcd (see [#3853](https://github.com/GoogleCloudPlatform/kubernetes/issues/3853)).
Kubernetes components can get into a state where they generate tons of events which are identical except for the timestamp. For example, when pulling a non-existing image, Kubelet will repeatedly generate `image_not_existing` and `container_is_waiting` events until upstream components correct the image. When this happens, the spam from the repeated events makes the entire event mechanism useless. It also appears to cause memory pressure in etcd (see [#3853](http://issue.k8s.io/3853)).
## Proposal
@ -109,10 +109,10 @@ This demonstrates what would have been 20 separate entries (indicating schedulin
## Related Pull Requests/Issues
* Issue [#4073](https://github.com/GoogleCloudPlatform/kubernetes/issues/4073): Compress duplicate events
* PR [#4157](https://github.com/GoogleCloudPlatform/kubernetes/issues/4157): Add "Update Event" to Kubernetes API
* PR [#4206](https://github.com/GoogleCloudPlatform/kubernetes/issues/4206): Modify Event struct to allow compressing multiple recurring events in to a single event
* PR [#4306](https://github.com/GoogleCloudPlatform/kubernetes/issues/4306): Compress recurring events in to a single event to optimize etcd storage
* Issue [#4073](http://issue.k8s.io/4073): Compress duplicate events
* PR [#4157](http://issue.k8s.io/4157): Add "Update Event" to Kubernetes API
* PR [#4206](http://issue.k8s.io/4206): Modify Event struct to allow compressing multiple recurring events in to a single event
* PR [#4306](http://issue.k8s.io/4306): Compress recurring events in to a single event to optimize etcd storage
* PR [#4444](https://github.com/GoogleCloudPlatform/kubernetes/pull/4444): Switch events history to use LRU cache instead of map

View File

@ -33,7 +33,7 @@ Documentation for other releases can be found at
# Identifiers and Names in Kubernetes
A summarization of the goals and recommendations for identifiers in Kubernetes. Described in [GitHub issue #199](https://github.com/GoogleCloudPlatform/kubernetes/issues/199).
A summarization of the goals and recommendations for identifiers in Kubernetes. Described in [GitHub issue #199](http://issue.k8s.io/199).
## Definitions

View File

@ -70,7 +70,7 @@ TODO: pluggability
## Bootstrapping
* [Self-hosting](https://github.com/GoogleCloudPlatform/kubernetes/issues/246) of all components is a goal.
* [Self-hosting](http://issue.k8s.io/246) of all components is a goal.
* Minimize the number of dependencies, particularly those required for steady-state operation.
* Stratify the dependencies that remain via principled layering.
* Break any circular dependencies by converting hard dependencies to soft dependencies.

View File

@ -33,7 +33,7 @@ Documentation for other releases can be found at
**Note: this is a design doc, which describes features that have not been completely implemented.
User documentation of the current state is [here](../user-guide/compute-resources.md). The tracking issue for
implementation of this model is
[#168](https://github.com/GoogleCloudPlatform/kubernetes/issues/168). Currently, only memory and
[#168](http://issue.k8s.io/168). Currently, only memory and
cpu limits on containers (not pods) are supported. "memory" is in bytes and "cpu" is in
milli-cores.**
@ -134,7 +134,7 @@ The following resource types are predefined ("reserved") by Kubernetes in the `k
* Units: Kubernetes Compute Unit seconds/second (i.e., CPU cores normalized to a canonical "Kubernetes CPU")
* Internal representation: milli-KCUs
* Compressible? yes
* Qualities: this is a placeholder for the kind of thing that may be supported in the future — see [#147](https://github.com/GoogleCloudPlatform/kubernetes/issues/147)
* Qualities: this is a placeholder for the kind of thing that may be supported in the future — see [#147](http://issue.k8s.io/147)
* [future] `schedulingLatency`: as per lmctfy
* [future] `cpuConversionFactor`: property of a node: the speed of a CPU core on the node's processor divided by the speed of the canonical Kubernetes CPU (a floating point value; default = 1.0).

View File

@ -119,7 +119,7 @@ which consumes this type of secret, the Kubelet may take a number of actions:
file system
2. Configure that node's `kube-proxy` to decorate HTTP requests from that pod to the
`kubernetes-master` service with the auth token, e. g. by adding a header to the request
(see the [LOAS Daemon](https://github.com/GoogleCloudPlatform/kubernetes/issues/2209) proposal)
(see the [LOAS Daemon](http://issue.k8s.io/2209) proposal)
#### Example: service account consumes docker registry credentials
@ -263,11 +263,11 @@ the right storage size for their installation and configuring their Kubelets cor
Configuring each Kubelet is not the ideal story for operator experience; it is more intuitive that
the cluster-wide storage size be readable from a central configuration store like the one proposed
in [#1553](https://github.com/GoogleCloudPlatform/kubernetes/issues/1553). When such a store
in [#1553](http://issue.k8s.io/1553). When such a store
exists, the Kubelet could be modified to read this configuration item from the store.
When the Kubelet is modified to advertise node resources (as proposed in
[#4441](https://github.com/GoogleCloudPlatform/kubernetes/issues/4441)), the capacity calculation
[#4441](http://issue.k8s.io/4441)), the capacity calculation
for available memory should factor in the potential size of the node-level tmpfs in order to avoid
memory overcommit on the node.

View File

@ -42,7 +42,7 @@ A security context is a set of constraints that are applied to a container in or
## Background
The problem of securing containers in Kubernetes has come up [before](https://github.com/GoogleCloudPlatform/kubernetes/issues/398) and the potential problems with container security are [well known](http://opensource.com/business/14/7/docker-security-selinux). Although it is not possible to completely isolate Docker containers from their hosts, new features like [user namespaces](https://github.com/docker/libcontainer/pull/304) make it possible to greatly reduce the attack surface.
The problem of securing containers in Kubernetes has come up [before](http://issue.k8s.io/398) and the potential problems with container security are [well known](http://opensource.com/business/14/7/docker-security-selinux). Although it is not possible to completely isolate Docker containers from their hosts, new features like [user namespaces](https://github.com/docker/libcontainer/pull/304) make it possible to greatly reduce the attack surface.
## Motivation

View File

@ -195,7 +195,7 @@ References in the status of the referee to the referrer may be permitted, when t
#### Lists of named subobjects preferred over maps
Discussed in [#2004](https://github.com/GoogleCloudPlatform/kubernetes/issues/2004) and elsewhere. There are no maps of subobjects in any API objects. Instead, the convention is to use a list of subobjects containing name fields.
Discussed in [#2004](http://issue.k8s.io/2004) and elsewhere. There are no maps of subobjects in any API objects. Instead, the convention is to use a list of subobjects containing name fields.
For example:

View File

@ -161,7 +161,7 @@ One or more of the KUbernetes daemons might've crashed. Tail the logs of each in
#### The pods fail to connect to the services by host names
The local-up-cluster.sh script doesn't start a DNS service. Similar situation can be found [here](https://github.com/GoogleCloudPlatform/kubernetes/issues/6667). You can start a manually. Related documents can be found [here](../../cluster/addons/dns/#how-do-i-configure-it)
The local-up-cluster.sh script doesn't start a DNS service. Similar situation can be found [here](http://issue.k8s.io/6667). You can start a manually. Related documents can be found [here](../../cluster/addons/dns/#how-do-i-configure-it)
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@ -34,7 +34,7 @@ Documentation for other releases can be found at
# Run Kubernetes with rkt
This document describes how to run Kubernetes using [rkt](https://github.com/coreos/rkt) as a container runtime.
We still have [a bunch of work](https://github.com/GoogleCloudPlatform/kubernetes/issues/8262) to do to make the experience with rkt wonderful, please stay tuned!
We still have [a bunch of work](http://issue.k8s.io/8262) to do to make the experience with rkt wonderful, please stay tuned!
### **Prerequisite**

View File

@ -46,18 +46,18 @@ done automatically based on statistical analysis and thresholds.
* Provide a concrete proposal for implementing auto-scaling pods within Kubernetes
* Implementation proposal should be in line with current discussions in existing issues:
* Scale verb - [1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629)
* Scale verb - [1629](http://issue.k8s.io/1629)
* Config conflicts - [Config](https://github.com/GoogleCloudPlatform/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes)
* Rolling updates - [1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353)
* Multiple scalable types - [1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624)
* Rolling updates - [1353](http://issue.k8s.io/1353)
* Multiple scalable types - [1624](http://issue.k8s.io/1624)
## Constraints and Assumptions
* This proposal is for horizontal scaling only. Vertical scaling will be handled in [issue 2072](https://github.com/GoogleCloudPlatform/kubernetes/issues/2072)
* This proposal is for horizontal scaling only. Vertical scaling will be handled in [issue 2072](http://issue.k8s.io/2072)
* `ReplicationControllers` will not know about the auto-scaler, they are the target of the auto-scaler. The `ReplicationController` responsibilities are
constrained to only ensuring that the desired number of pods are operational per the [Replication Controller Design](../user-guide/replication-controller.md#responsibilities-of-the-replication-controller)
* Auto-scalers will be loosely coupled with data gathering components in order to allow a wide variety of input sources
* Auto-scalable resources will support a scale verb ([1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629))
* Auto-scalable resources will support a scale verb ([1629](http://issue.k8s.io/1629))
such that the auto-scaler does not directly manipulate the underlying resource.
* Initially, most thresholds will be set by application administrators. It should be possible for an autoscaler to be
written later that sets thresholds automatically based on past behavior (CPU used vs incoming requests).
@ -120,7 +120,7 @@ Since an auto-scaler is a durable object it is best represented as a resource.
type AutoScalerInterface interface {
//ScaleApplication adjusts a resource's replica count. Calls scale endpoint.
//Args to this are based on what the endpoint
//can support. See https://github.com/GoogleCloudPlatform/kubernetes/issues/1629
//can support. See http://issue.k8s.io/1629
ScaleApplication(num int) error
}

View File

@ -219,7 +219,7 @@ Events:
The `Restart Count: 5` indicates that the `simmemleak` container in this pod was terminated and restarted 5 times.
Once [#10861](https://github.com/GoogleCloudPlatform/kubernetes/issues/10861) is resolved the reason for the termination of the last container will also be printed in this output.
Once [#10861](http://issue.k8s.io/10861) is resolved the reason for the termination of the last container will also be printed in this output.
Until then you can call `get pod` with the `-o template -t ...` option to fetch the status of previously terminated containers:
@ -243,7 +243,7 @@ resource, and a framework for adding custom [resource types](../design/resources
The current system does not facilitate overcommitment of resources because resources reserved
with container limits are assured. It is planned to support multiple levels of [Quality of
Service](https://github.com/GoogleCloudPlatform/kubernetes/issues/168).
Service](http://issue.k8s.io/168).
Currently, one unit of CPU means different things on different cloud providers, and on different
machine types within the same cloud providers. For example, on AWS, the capacity of a node

View File

@ -49,7 +49,7 @@ This document is meant to highlight and consolidate in one place configuration b
1. Use kubectl run and expose to quickly create and expose single container replication controllers. See the [quick start guide](quick-start.md) for an example.
1. Use headless services for easy service discovery when you don't need kube-proxy load balancing. See [headless services](services.md#headless-services).
1. Use kubectl delete rather than stop. Delete has a superset of the functionality of stop and stop is deprecated.
1. If there is a viable alternative to naked pods (i.e. pods not bound to a controller), go with the alternative. Controllers are almost always preferable to creating pods (except for some `restartPolicy: Never` scenarios). A minimal Job is coming. See [#1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624). Naked pods will not be rescheduled in the event of node failure.
1. If there is a viable alternative to naked pods (i.e. pods not bound to a controller), go with the alternative. Controllers are almost always preferable to creating pods (except for some `restartPolicy: Never` scenarios). A minimal Job is coming. See [#1624](http://issue.k8s.io/1624). Naked pods will not be rescheduled in the event of node failure.
1. Put a version number or hash as a suffix to the name and in a label on a replication controller to facilitate rolling update, as we do for [--image](kubectl/kubectl_rolling-update.md). This is necessary because rolling-update actually creates a new controller as opposed to modifying the existing controller. This does not play well with version agnostic controller names.
1. Put an object description in an annotation to allow better introspection.

View File

@ -103,7 +103,7 @@ Lets say you specified `entrypoint` instead of `command`. Youd see output
```console
I0709 06:33:05.600829 14160 schema.go:126] unknown field: entrypoint
I0709 06:33:05.600988 14160 schema.go:129] this may be a false alarm, see https://github.com/GoogleCloudPlatform/kubernetes/issues/6842
I0709 06:33:05.600988 14160 schema.go:129] this may be a false alarm, see http://issue.k8s.io/6842
pods/hello-world
```

View File

@ -119,7 +119,7 @@ A single parameter named reason is passed to the handler which contains the reas
* `Health` - indicating that a health check of the container failed.
* `Dependency` - indicating that a dependency for the container or the pod is missing, and thus, the container needs to be restarted.  Examples include, the pod infra container crashing, or persistent disk failing for a container that mounts PD.
Eventually, user specified reasons may be [added to the API](https://github.com/GoogleCloudPlatform/kubernetes/issues/137).
Eventually, user specified reasons may be [added to the API](http://issue.k8s.io/137).
### Hook Handler Execution

View File

@ -46,7 +46,7 @@ Please consult this document before filing new bugs.
* Wrong node cpu/memory limit metrics from Heapster (https://github.com/GoogleCloudPlatform/heapster/issues/399)
* Services that set `type=LoadBalancer` can not use port `10250` because of Google Compute Engine firewall limitations
* Add-on services can not be created or deleted via `kubectl` or the Kubernetes API (#11435)
* If a pod with a GCE PD is created and deleted in rapid succession, it may fail to attach/mount correctly leaving PD data inaccessible (or corrupted in the worst case). (https://github.com/GoogleCloudPlatform/kubernetes/issues/11231#issuecomment-122049113)
* If a pod with a GCE PD is created and deleted in rapid succession, it may fail to attach/mount correctly leaving PD data inaccessible (or corrupted in the worst case). (http://issue.k8s.io/11231#issuecomment-122049113)
* Suggested temporary work around: introduce a 1-2 minute delay between deleting and recreating a pod with a PD on the same node.
* Explicit errors while detaching GCE PD could prevent PD from ever being detached (#11321)
* GCE PDs may sometimes fail to attach (#11302)

View File

@ -39,7 +39,7 @@ Multiple kubeconfig files are allowed. At runtime they are loaded and merged to
## Related discussion
https://github.com/GoogleCloudPlatform/kubernetes/issues/1755
http://issue.k8s.io/1755
## Example kubeconfig file

View File

@ -81,7 +81,7 @@ The possible values for RestartPolicy are `Always`, `OnFailure`, or `Never`. If
The only controller we have today is [`ReplicationController`](replication-controller.md). `ReplicationController` is *only* appropriate for pods with `RestartPolicy = Always`. `ReplicationController` should refuse to instantiate any pod that has a different restart policy.
There is a legitimate need for a controller which keeps pods with other policies alive. Pods having any of the other policies (`OnFailure` or `Never`) eventually terminate, at which point the controller should stop recreating them. Because of this fundamental distinction, let's hypothesize a new controller, called [`JobController`](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624) for the sake of this document, which can implement this policy.
There is a legitimate need for a controller which keeps pods with other policies alive. Pods having any of the other policies (`OnFailure` or `Never`) eventually terminate, at which point the controller should stop recreating them. Because of this fundamental distinction, let's hypothesize a new controller, called [`JobController`](http://issue.k8s.io/1624) for the sake of this document, which can implement this policy.
## Pod lifetime

View File

@ -106,9 +106,9 @@ Pod is exposed as a primitive in order to facilitate:
* decoupling of pod lifetime from controller lifetime, such as for bootstrapping
* decoupling of controllers and services &mdash; the endpoint controller just watches pods
* clean composition of Kubelet-level functionality with cluster-level functionality &mdash; Kubelet is effectively the "pod controller"
* high-availability applications, which will expect pods to be replaced in advance of their termination and certainly in advance of deletion, such as in the case of planned evictions, image prefetching, or live pod migration [#3949](https://github.com/GoogleCloudPlatform/kubernetes/issues/3949)
* high-availability applications, which will expect pods to be replaced in advance of their termination and certainly in advance of deletion, such as in the case of planned evictions, image prefetching, or live pod migration [#3949](http://issue.k8s.io/3949)
The current best practice for pets is to create a replication controller with `replicas` equal to `1` and a corresponding service. If you find this cumbersome, please comment on [issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260).
The current best practice for pets is to create a replication controller with `replicas` equal to `1` and a corresponding service. If you find this cumbersome, please comment on [issue #260](http://issue.k8s.io/260).
## API Object

View File

@ -55,7 +55,7 @@ Documentation for other releases can be found at
A _replication controller_ ensures that a specified number of pod "replicas" are running at any one time. If there are too many, it will kill some. If there are too few, it will start more. Unlike in the case where a user directly created pods, a replication controller replaces pods that are deleted or terminated for any reason, such as in the case of node failure or disruptive node maintenance, such as a kernel upgrade. For this reason, we recommend that you use a replication controller even if your application requires only a single pod. Think of it similarly to a process supervisor, only it supervises multiple pods across multiple nodes instead of individual processes on a single node. A replication controller delegates local container restarts to some agent on the node (e.g., Kubelet or Docker).
As discussed in [life of a pod](pod-states.md), `ReplicationController` is *only* appropriate for pods with `RestartPolicy = Always`. (Note: If `RestartPolicy` is not set, the default value is `Always`.) `ReplicationController` should refuse to instantiate any pod that has a different restart policy. As discussed in [issue #503](https://github.com/GoogleCloudPlatform/kubernetes/issues/503#issuecomment-50169443), we expect other types of controllers to be added to Kubernetes to handle other types of workloads, such as build/test and batch workloads, in the future.
As discussed in [life of a pod](pod-states.md), `ReplicationController` is *only* appropriate for pods with `RestartPolicy = Always`. (Note: If `RestartPolicy` is not set, the default value is `Always`.) `ReplicationController` should refuse to instantiate any pod that has a different restart policy. As discussed in [issue #503](http://issue.k8s.io/503#issuecomment-50169443), we expect other types of controllers to be added to Kubernetes to handle other types of workloads, such as build/test and batch workloads, in the future.
A replication controller will never terminate on its own, but it isn't expected to be as long-lived as services. Services may be composed of pods controlled by multiple replication controllers, and it is expected that many replication controllers may be created and destroyed over the lifetime of a service (for instance, to perform an update of pods that run the service). Both services themselves and their clients should remain oblivious to the replication controllers that maintain the pods of the services.
@ -63,7 +63,7 @@ A replication controller will never terminate on its own, but it isn't expected
### Pod template
A replication controller creates new pods from a template, which is currently inline in the `ReplicationController` object, but which we plan to extract into its own resource [#170](https://github.com/GoogleCloudPlatform/kubernetes/issues/170).
A replication controller creates new pods from a template, which is currently inline in the `ReplicationController` object, but which we plan to extract into its own resource [#170](http://issue.k8s.io/170).
Rather than specifying the current desired state of all replicas, pod templates are like cookie cutters. Once a cookie has been cut, the cookie has no relationship to the cutter. There is no quantum entanglement. Subsequent changes to the template or even switching to a new template has no direct effect on the pods already created. Similarly, pods created by a replication controller may subsequently be updated directly. This is in deliberate contrast to pods, which do specify the current desired state of all containers belonging to the pod. This approach radically simplifies system semantics and increases the flexibility of the primitive, as demonstrated by the use cases explained below.
@ -83,9 +83,9 @@ Similarly, deleting a replication controller does not affect the pods it created
## Responsibilities of the replication controller
The replication controller simply ensures that the desired number of pods matches its label selector and are operational. Currently, only terminated pods are excluded from its count. In the future, [readiness](https://github.com/GoogleCloudPlatform/kubernetes/issues/620) and other information available from the system may be taken into account, we may add more controls over the replacement policy, and we plan to emit events that could be used by external clients to implement arbitrarily sophisticated replacement and/or scale-down policies.
The replication controller simply ensures that the desired number of pods matches its label selector and are operational. Currently, only terminated pods are excluded from its count. In the future, [readiness](http://issue.k8s.io/620) and other information available from the system may be taken into account, we may add more controls over the replacement policy, and we plan to emit events that could be used by external clients to implement arbitrarily sophisticated replacement and/or scale-down policies.
The replication controller is forever constrained to this narrow responsibility. It itself will not perform readiness nor liveness probes. Rather than performing auto-scaling, it is intended to be controlled by an external auto-scaler (as discussed in [#492](https://github.com/GoogleCloudPlatform/kubernetes/issues/492)), which would change its `replicas` field. We will not add scheduling policies (e.g., [spreading](https://github.com/GoogleCloudPlatform/kubernetes/issues/367#issuecomment-48428019)) to the replication controller. Nor should it verify that the pods controlled match the currently specified template, as that would obstruct auto-sizing and other automated processes. Similarly, completion deadlines, ordering dependencies, configuration expansion, and other features belong elsewhere. We even plan to factor out the mechanism for bulk pod creation ([#170](https://github.com/GoogleCloudPlatform/kubernetes/issues/170)).
The replication controller is forever constrained to this narrow responsibility. It itself will not perform readiness nor liveness probes. Rather than performing auto-scaling, it is intended to be controlled by an external auto-scaler (as discussed in [#492](http://issue.k8s.io/492)), which would change its `replicas` field. We will not add scheduling policies (e.g., [spreading](http://issue.k8s.io/367#issuecomment-48428019)) to the replication controller. Nor should it verify that the pods controlled match the currently specified template, as that would obstruct auto-sizing and other automated processes. Similarly, completion deadlines, ordering dependencies, configuration expansion, and other features belong elsewhere. We even plan to factor out the mechanism for bulk pod creation ([#170](http://issue.k8s.io/170)).
The replication controller is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, stop, scale, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing replication controllers, auto-scalers, services, scheduling policies, canaries, etc.
@ -103,7 +103,7 @@ The replication controller makes it easy to scale the number of replicas up or d
The replication controller is designed to facilitate rolling updates to a service by replacing pods one-by-one.
As explained in [#1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353), the recommended approach is to create a new replication controller with 1 replica, scale the new (+1) and old (-1) controllers one by one, and then delete the old controller after it reaches 0 replicas. This predictably updates the set of pods regardless of unexpected failures.
As explained in [#1353](http://issue.k8s.io/1353), the recommended approach is to create a new replication controller with 1 replica, scale the new (+1) and old (-1) controllers one by one, and then delete the old controller after it reaches 0 replicas. This predictably updates the set of pods regardless of unexpected failures.
Ideally, the rolling update controller would take application readiness into account, and would ensure that a sufficient number of pods were productively serving at any given time.

View File

@ -165,7 +165,7 @@ Use of imagePullSecrets is desribed in the [images documentation](images.md#spec
### Automatic use of Manually Created Secrets
*This feature is planned but not implemented. See [issue
9902](https://github.com/GoogleCloudPlatform/kubernetes/issues/9902).*
9902](http://issue.k8s.io/9902).*
You can reference manually created secrets from a [service account](service-accounts.md).
Then, pods which use that service account will have

View File

@ -441,7 +441,7 @@ though exactly how that works depends on the cloud provider.
We expect that using iptables and userspace proxies for VIPs will work at
small to medium scale, but may not scale to very large clusters with thousands
of Services. See [the original design proposal for
portals](https://github.com/GoogleCloudPlatform/kubernetes/issues/1107) for more
portals](http://issue.k8s.io/1107) for more
details.
Using the kube-proxy obscures the source-IP of a packet accessing a `Service`.
@ -462,7 +462,7 @@ envision that some `Services` will have "real" load balancers, in which case the
VIP will simply transport the packets there.
There's a
[proposal](https://github.com/GoogleCloudPlatform/kubernetes/issues/3760) to
[proposal](http://issue.k8s.io/3760) to
eliminate userspace proxying in favor of doing it all in iptables. This should
perform better and fix the source-IP obfuscation, though is less flexible than
arbitrary userspace code.

View File

@ -39,7 +39,7 @@ var ForTesting_ReferencesAllowBlankSelfLinks = false
// GetReference returns an ObjectReference which refers to the given
// object, or an error if the object doesn't follow the conventions
// that would allow this.
// TODO: should take a meta.Interface see https://github.com/GoogleCloudPlatform/kubernetes/issues/7127
// TODO: should take a meta.Interface see http://issue.k8s.io/7127
func GetReference(obj runtime.Object) (*ObjectReference, error) {
if obj == nil {
return nil, ErrNilObject

View File

@ -547,7 +547,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
addParams(route, action.Params)
ws.Route(route)
case "PROXY": // Proxy requests to a resource.
// Accept all methods as per https://github.com/GoogleCloudPlatform/kubernetes/issues/3996
// Accept all methods as per http://issue.k8s.io/3996
addProxyRoute(ws, "GET", a.prefix, action.Path, proxyHandler, namespaced, kind, resource, subresource, hasSubresource, action.Params)
addProxyRoute(ws, "PUT", a.prefix, action.Path, proxyHandler, namespaced, kind, resource, subresource, hasSubresource, action.Params)
addProxyRoute(ws, "POST", a.prefix, action.Path, proxyHandler, namespaced, kind, resource, subresource, hasSubresource, action.Params)

View File

@ -169,7 +169,7 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
// Redirect requests of the form "/{resource}/{name}" to "/{resource}/{name}/"
// This is essentially a hack for https://github.com/GoogleCloudPlatform/kubernetes/issues/4958.
// This is essentially a hack for http://issue.k8s.io/4958.
// Note: Keep this code after tryUpgrade to not break that flow.
if len(parts) == 2 && !strings.HasSuffix(req.URL.Path, "/") {
var queryPart string

View File

@ -869,7 +869,7 @@ func makePortMappings(container *api.Container) (ports []kubecontainer.PortMappi
// We need to create some default port name if it's not specified, since
// this is necessary for rkt.
// https://github.com/GoogleCloudPlatform/kubernetes/issues/7710
// http://issue.k8s.io/7710
if p.Name == "" {
pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort)
} else {

View File

@ -768,7 +768,7 @@ func (r *runtime) writeDockerAuthConfig(image string, credsSlice []docker.AuthCo
// TODO(yifan): Now we only support docker images, this should be changed
// once the format of image is landed, see:
//
// https://github.com/GoogleCloudPlatform/kubernetes/issues/7203
// http://issue.k8s.io/7203
//
func (r *runtime) PullImage(image kubecontainer.ImageSpec, pullSecrets []api.Secret) error {
img := image.Image

View File

@ -89,7 +89,7 @@ func (h *UpgradeAwareProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Re
// From pkg/apiserver/proxy.go#ServeHTTP:
// Redirect requests with an empty path to a location that ends with a '/'
// This is essentially a hack for https://github.com/GoogleCloudPlatform/kubernetes/issues/4958.
// This is essentially a hack for http://issue.k8s.io/4958.
// Note: Keep this code after tryUpgrade to not break that flow.
if len(loc.Path) == 0 {
var queryPart string

View File

@ -34,7 +34,7 @@ import (
// and the group will be set to allow containers to use emptyDir volumes
// from the group attribute.
//
// https://github.com/GoogleCloudPlatform/kubernetes/issues/2630
// http://issue.k8s.io/2630
const perm os.FileMode = 0777
// This is the primary entrypoint for volume plugins.

View File

@ -586,7 +586,7 @@ var _ = Describe("Services", func() {
}
ingress2 := service.Status.LoadBalancer.Ingress[0]
// TODO: Fix the issue here: https://github.com/GoogleCloudPlatform/kubernetes/issues/11002
// TODO: Fix the issue here: http://issue.k8s.io/11002
if providerIs("aws") {
// TODO: Make this less of a hack (or fix the underlying bug)
time.Sleep(time.Second * 120)