Merge pull request #11038 from lavalamp/doclinks

Link verification
This commit is contained in:
Rohit Jnagal 2015-07-10 12:15:48 -07:00
commit 8df6c5c00a
51 changed files with 284 additions and 133 deletions

143
cmd/mungedocs/links.go Normal file
View File

@ -0,0 +1,143 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"net/url"
"os"
"path"
"regexp"
"strings"
)
var (
// Finds markdown links of the form [foo](bar "alt-text").
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
// Splits the link target into link target and alt-text.
altTextRE = regexp.MustCompile(`(.*)( ".*")`)
)
// checkLinks assumes fileBytes has links in markdown syntax, and verifies that
// any relative links actually point to files that exist.
func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
dir := path.Dir(filePath)
errors := []string{}
output := linkRE.ReplaceAllFunc(fileBytes, func(in []byte) (out []byte) {
match := linkRE.FindSubmatch(in)
// match[0] is the entire expression; [1] is the visible text and [2] is the link text.
visibleText := string(match[1])
linkText := string(match[2])
altText := ""
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
linkText = parts[1]
altText = parts[2]
}
// clean up some random garbage I found in our docs.
linkText = strings.Trim(linkText, " ")
linkText = strings.Trim(linkText, "\n")
linkText = strings.Trim(linkText, " ")
u, err := url.Parse(linkText)
if err != nil {
errors = append(
errors,
fmt.Sprintf("%v, link %q is unparsable: %v", filePath, linkText, err),
)
return in
}
if u.Host != "" {
// We only care about relative links.
return in
}
suggestedVisibleText := visibleText
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
if !targetExists {
errors = append(
errors,
fmt.Sprintf("%v, %q: target not found\n", filePath, linkText),
)
}
u.Path = newPath
// Make the visible text show the absolute path if it's
// not nested in or beneath the current directory.
if strings.HasPrefix(u.Path, "..") {
suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path))
} else {
suggestedVisibleText = u.Path
}
if unescaped, err := url.QueryUnescape(u.String()); err != nil {
// Remove %28 type stuff, be nice to humans.
// And don't fight with the toc generator.
linkText = unescaped
} else {
linkText = u.String()
}
}
// If the current visible text is trying to be a file name, use
// the correct file name.
if (strings.Contains(visibleText, ".md") || strings.Contains(visibleText, "/")) && !strings.ContainsAny(visibleText, ` '"`+"`") {
visibleText = suggestedVisibleText
}
return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText))
})
err := error(nil)
if len(errors) != 0 {
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
}
return output, err
}
func makeRepoRelative(path string) string {
parts := strings.Split(path, "github.com/GoogleCloudPlatform/kubernetes/")
if len(parts) > 1 {
// Take out anything that is specific to the local filesystem.
return parts[1]
}
return path
}
func checkPath(filePath, linkPath string) (newPath string, ok bool) {
dir := path.Dir(filePath)
if strings.HasPrefix(linkPath, "/") {
if !strings.HasPrefix(linkPath, "/GoogleCloudPlatform") {
// Any absolute paths that aren't relative to github.com are wrong.
// Try to fix.
linkPath = linkPath[1:]
}
}
newPath = linkPath
for i := 0; i < 5; i++ {
// The file must exist.
target := path.Join(dir, newPath)
if info, err := os.Stat(target); err == nil {
if info.IsDir() {
return newPath + "/", true
}
return newPath, true
}
newPath = path.Join("..", newPath)
}
return linkPath, false
}

View File

@ -33,52 +33,62 @@ var (
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
ErrChangesNeeded = errors.New("mungedocs: changes required")
// TODO: allow selection from command line. (e.g., just check links in the examples directory.)
mungesToMake = munges{
munger(updateTOC),
munger(checkLinks),
}
)
func visitAndVerify(path string, i os.FileInfo, e error) error {
return visitAndChangeOrVerify(path, i, e, false)
}
// Munger processes a document, returning an updated document xor an error.
// Munger is NOT allowed to mutate 'before', if changes are needed it must copy
// data into a new byte array.
type munger func(filePath string, before []byte) (after []byte, err error)
func visitAndChange(path string, i os.FileInfo, e error) error {
return visitAndChangeOrVerify(path, i, e, true)
type munges []munger
type fileProcessor struct {
// Which munge functions should we call?
munges munges
// Are we allowed to make changes?
verifyOnly bool
}
// Either change a file or verify that it needs no changes (according to modify argument)
func visitAndChangeOrVerify(path string, i os.FileInfo, e error, modify bool) error {
func (f fileProcessor) visit(path string, i os.FileInfo, e error) error {
if !strings.HasSuffix(path, ".md") {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
before, err := ioutil.ReadAll(file)
fileBytes, err := ioutil.ReadFile(path)
if err != nil {
return err
}
after, err := updateTOC(before)
if err != nil {
return err
}
if modify {
// Write out new file with any changes.
if !bytes.Equal(after, before) {
file.Close()
ioutil.WriteFile(path, after, 0644)
modificationsMade := false
for _, munge := range f.munges {
after, err := munge(path, fileBytes)
if err != nil {
return err
}
} else {
// Just verify that there are no changes.
if !bytes.Equal(after, before) {
return ErrChangesNeeded
if !modificationsMade {
if !bytes.Equal(after, fileBytes) {
modificationsMade = true
if f.verifyOnly {
// We're not allowed to make changes.
return ErrChangesNeeded
}
}
}
fileBytes = after
}
// TODO(erictune): more types of passes, such as:
// Linkify terms
// Verify links point to files.
// Write out new file with any changes.
if modificationsMade {
ioutil.WriteFile(path, fileBytes, 0644)
}
return nil
}
@ -91,6 +101,11 @@ func main() {
os.Exit(1)
}
fp := fileProcessor{
munges: mungesToMake,
verifyOnly: *verify,
}
// For each markdown file under source docs root, process the doc.
// If any error occurs, will exit with failure.
// If verify is true, then status is 0 for no changes needed, 1 for changes needed
@ -98,12 +113,7 @@ func main() {
// If verify is false, then status is 0 if changes successfully made or no changes needed,
// 1 if changes were needed but require human intervention, and >1 for an unexpected
// error during processing.
var err error
if *verify {
err = filepath.Walk(*rootDir, visitAndVerify)
} else {
err = filepath.Walk(*rootDir, visitAndChange)
}
err := filepath.Walk(*rootDir, fp.visit)
if err != nil {
if err == ErrChangesNeeded {
if *verify {

View File

@ -30,7 +30,7 @@ import (
// the ToC, thereby updating any previously inserted ToC.
//
// TODO(erictune): put this in own package with tests
func updateTOC(markdown []byte) ([]byte, error) {
func updateTOC(filePath string, markdown []byte) ([]byte, error) {
toc, err := buildTOC(markdown)
if err != nil {
return nil, err

View File

@ -92,7 +92,7 @@ func Test_updateTOC(t *testing.T) {
"# Title\nLorem ipsum \n**table of contents**\n<!-- BEGIN GENERATED TOC -->\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n<!-- END GENERATED TOC -->\n## Section Heading\ndolor sit amet\n"},
}
for _, c := range cases {
actual, err := updateTOC([]byte(c.in))
actual, err := updateTOC("filename.md", []byte(c.in))
assert.NoError(t, err)
if c.out != string(actual) {
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual))

View File

@ -29,9 +29,9 @@ certainly want the docs that go with that version.</h1>
* The [API object documentation](http://kubernetes.io/third_party/swagger-ui/)
is a detailed description of all fields found in core API objects.
* An overview of the [Design of Kubernetes](design)
* An overview of the [Design of Kubernetes](design/)
* There are example files and walkthroughs in the [examples](../examples)
* There are example files and walkthroughs in the [examples](../examples/)
folder.

View File

@ -10,7 +10,7 @@ kubernetes CLI, `kubectl`.
To access a cluster, you need to know the location of the cluster and have credentials
to access it. Typically, this is automatically set-up when you work through
though a [Getting started guide](../docs/getting-started-guide/README.md),
though a [Getting started guide](../docs/getting-started-guides/README.md),
or someone else setup the cluster and provided you with credentials and a location.
Check the location and credentials that kubectl knows about with this command:

View File

@ -24,8 +24,8 @@ By default the Kubernetes APIserver serves HTTP on 2 ports:
- default is port 6443, change with `--secure-port` flag.
- default IP is first non-localhost network interface, change with `--bind-address` flag.
- serves HTTPS. Set cert with `--tls-cert-file` and key with `--tls-private-key-file` flag.
- uses token-file or client-certificate based [authentication](./authentication.md).
- uses policy-based [authorization](./authorization.md).
- uses token-file or client-certificate based [authentication](authentication.md).
- uses policy-based [authorization](authorization.md).
3. Removed: ReadOnly Port
- For security reasons, this had to be removed. Use the service account feature instead.

View File

@ -46,7 +46,7 @@ commands in those containers, we strongly encourage enabling this plug-in.
### ServiceAccount
This plug-in implements automation for [serviceAccounts]( service_accounts.md).
This plug-in implements automation for [serviceAccounts](service_accounts.md).
We strongly recommend using this plug-in if you intend to make use of Kubernetes ```ServiceAccount``` objects.
### SecurityContextDeny
@ -59,7 +59,7 @@ This plug-in will observe the incoming request and ensure that it does not viola
enumerated in the ```ResourceQuota``` object in a ```Namespace```. If you are using ```ResourceQuota```
objects in your Kubernetes deployment, you MUST use this plug-in to enforce quota constraints.
See the [resourceQuota design doc]( design/admission_control_resource_quota.md).
See the [resourceQuota design doc](design/admission_control_resource_quota.md).
It is strongly encouraged that this plug-in is configured last in the sequence of admission control plug-ins. This is
so that quota is not prematurely incremented only for the request to be rejected later in admission control.
@ -70,7 +70,7 @@ This plug-in will observe the incoming request and ensure that it does not viola
enumerated in the ```LimitRange``` object in a ```Namespace```. If you are using ```LimitRange``` objects in
your Kubernetes deployment, you MUST use this plug-in to enforce those constraints.
See the [limitRange design doc]( design/admission_control_limit_range.md).
See the [limitRange design doc](design/admission_control_limit_range.md).
### NamespaceExists

View File

@ -118,7 +118,7 @@ In order to preserve extensibility, in the future, we intend to explicitly conve
Note that historical information status (e.g., last transition time, failure counts) is only provided at best effort, and is not guaranteed to not be lost.
Status information that may be large (especially unbounded in size, such as lists of references to other objects -- see below) and/or rapidly changing, such as [resource usage](./design/resources.md#usage-data), should be put into separate objects, with possibly a reference from the original object. This helps to ensure that GETs and watch remain reasonably efficient for the majority of clients, which may not need that data.
Status information that may be large (especially unbounded in size, such as lists of references to other objects -- see below) and/or rapidly changing, such as [resource usage](design/resources.md#usage-data), should be put into separate objects, with possibly a reference from the original object. This helps to ensure that GETs and watch remain reasonably efficient for the majority of clients, which may not need that data.
#### References to related objects

View File

@ -2,7 +2,7 @@
In Kubernetes, authorization happens as a separate step from authentication.
See the [authentication documentation](./authentication.md) for an
See the [authentication documentation](authentication.md) for an
overview of authentication.
Authorization applies to all HTTP accesses on the main apiserver port. (The

View File

@ -120,7 +120,7 @@ then you need `R + U` clusters. If it is not (e.g you want to ensure low latenc
cluster failure), then you need to have `R * U` clusters (`U` in each of `R` regions). In any case, try to put each cluster in a different zone.
Finally, if any of your clusters would need more than the maximum recommended number of nodes for a Kubernetes cluster, then
you may need even more clusters. Our [roadmap](./roadmap.md)
you may need even more clusters. Our [roadmap](roadmap.md)
calls for maximum 100 node clusters at v1.0 and maximum 1000 node clusters in the middle of 2015.
## Working with multiple clusters

View File

@ -64,13 +64,13 @@ project.](salt.md).
Describes the environment for Kubelet managed containers on a Kubernetes
node.
* **Securing access to the API Server** [accessing the api]( accessing_the_api.md)
* **Securing access to the API Server** [accessing the api](accessing_the_api.md)
* **Authentication** [authentication]( authentication.md)
* **Authentication** [authentication](authentication.md)
* **Authorization** [authorization]( authorization.md)
* **Authorization** [authorization](authorization.md)
* **Admission Controllers** [admission_controllers]( admission_controllers.md)
* **Admission Controllers** [admission_controllers](admission_controllers.md)

View File

@ -9,7 +9,7 @@
- [Troubleshooting](#troubleshooting)
- [Planned Improvements](#planned-improvements)
When specifying a [pod](./pods.md), you can optionally specify how much CPU and memory (RAM) each
When specifying a [pod](pods.md), you can optionally specify how much CPU and memory (RAM) each
container needs. When containers have resource limits, the scheduler is able to make better
decisions about which nodes to place pods on, and contention for resources can be handled in a
consistent manner.
@ -19,8 +19,8 @@ in units of cores. Memory is specified in units of bytes.
CPU and RAM are collectively refered to as *compute resources*, or just *resources*. Compute
resources are measureable quantities which can be requested, allocated, and consumed. They are
distinct from [API resources](./working_with_resources.md). API resources, such as pods and
[services](./services.md) are objects that can be written to and retrieved from the Kubernetes API
distinct from [API resources](working_with_resources.md). API resources, such as pods and
[services](services.md) are objects that can be written to and retrieved from the Kubernetes API
server.
## Container and Pod Resource Limits
@ -110,7 +110,7 @@ However, it will not be killed for excessive CPU usage.
The resource usage of a pod is reported as part of the Pod status.
If [optional monitoring](../cluster/addons/monitoring/README.md) is configured for your cluster,
If [optional monitoring](../cluster/addons/cluster-monitoring/README.md) is configured for your cluster,
then pod resource usage can be retrieved from the monitoring system.
## Troubleshooting
@ -136,7 +136,7 @@ Here are some example command lines that extract just the necessary information:
- `kubectl get nodes -o yaml | grep '\sname\|cpu\|memory'`
- `kubectl get nodes -o json | jq '.items[] | {name: .metadata.name, cap: .status.capacity}'`
The [resource quota](./resource_quota_admin.md) feature can be configured
The [resource quota](resource_quota_admin.md) feature can be configured
to limit the total amount of resources that can be consumed. If used in conjunction
with namespaces, it can prevent one team from hogging all the resources.
@ -144,11 +144,11 @@ with namespaces, it can prevent one team from hogging all the resources.
The current system only allows resource quantities to be specified on a container.
It is planned to improve accounting for resources which are shared by all containers in a pod,
such as [EmptyDir volumes](./volumes.md#emptydir).
such as [EmptyDir volumes](volumes.md#emptydir).
The current system only supports container limits for CPU and Memory.
It is planned to add new resource types, including a node disk space
resource, and a framework for adding custom [resource types](./design/resources.md#resource-types).
resource, and a framework for adding custom [resource types](design/resources.md#resource-types).
The current system does not facilitate overcommitment of resources because resources reserved
with container limits are assured. It is planned to support multiple levels of [Quality of

View File

@ -7,7 +7,7 @@ This document describes the environment for Kubelet managed containers on a Kube
This cluster information makes it possible to build applications that are *cluster aware*.  
Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers.  Container hooks are somewhat analogous to operating system signals in a traditional process model.   However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster.  Containers that participate in this cluster lifecycle become *cluster native*. 
Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](./images.md) and one or more [volumes](./volumes.md).
Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](images.md) and one or more [volumes](volumes.md).
The following sections describe both the cluster information provided to containers, as well as the hooks and life-cycle that allows containers to interact with the management system.

View File

@ -141,7 +141,7 @@ Improvements:
###Namespaces
K8s will have a have a `namespace` API object. It is similar to a Google Compute Engine `project`. It provides a namespace for objects created by a group of people co-operating together, preventing name collisions with non-cooperating groups. It also serves as a reference point for authorization policies.
Namespaces are described in [namespace.md](namespaces.md).
Namespaces are described in [namespaces.md](namespaces.md).
In the Enterprise Profile:
- a `userAccount` may have permission to access several `namespace`s.
@ -151,7 +151,7 @@ In the Simple Profile:
Namespaces versus userAccount vs Labels:
- `userAccount`s are intended for audit logging (both name and UID should be logged), and to define who has access to `namespace`s.
- `labels` (see [docs/labels.md](/docs/labels.md)) should be used to distinguish pods, users, and other objects that cooperate towards a common goal but are different in some way, such as version, or responsibilities.
- `labels` (see [docs/labels.md](../../docs/labels.md)) should be used to distinguish pods, users, and other objects that cooperate towards a common goal but are different in some way, such as version, or responsibilities.
- `namespace`s prevent name collisions between uncoordinated groups of people, and provide a place to attach common policies for co-operating groups of people.

View File

@ -1,5 +1,5 @@
**Note: this is a design doc, which describes features that have not been completely implemented.
User documentation of the current state is [here](../resources.md). The tracking issue for
User documentation of the current state is [here](../compute_resources.md). The tracking issue for
implementation of this model is
[#168](https://github.com/GoogleCloudPlatform/kubernetes/issues/168). Currently, only memory and
cpu limits on containers (not pods) are supported. "memory" is in bytes and "cpu" is in
@ -149,7 +149,7 @@ The following are planned future extensions to the resource model, included here
## Usage data
Because resource usage and related metrics change continuously, need to be tracked over time (i.e., historically), can be characterized in a variety of ways, and are fairly voluminous, we will not include usage in core API objects, such as [Pods](pods.md) and Nodes, but will provide separate APIs for accessing and managing that data. See the Appendix for possible representations of usage data, but the representation we'll use is TBD.
Because resource usage and related metrics change continuously, need to be tracked over time (i.e., historically), can be characterized in a variety of ways, and are fairly voluminous, we will not include usage in core API objects, such as [Pods](../pods.md) and Nodes, but will provide separate APIs for accessing and managing that data. See the Appendix for possible representations of usage data, but the representation we'll use is TBD.
Singleton values for observed and predicted future usage will rapidly prove inadequate, so we will support the following structure for extended usage information:

View File

@ -71,7 +71,7 @@ service would also consume the secrets associated with the MySQL service.
### Use-Case: Secrets associated with service accounts
[Service Accounts](./service_accounts.md) are proposed as a
[Service Accounts](service_accounts.md) are proposed as a
mechanism to decouple capabilities and security contexts from individual human users. A
`ServiceAccount` contains references to some number of secrets. A `Pod` can specify that it is
associated with a `ServiceAccount`. Secrets should have a `Type` field to allow the Kubelet and
@ -241,7 +241,7 @@ memory overcommit on the node.
#### Secret data on the node: isolation
Every pod will have a [security context](./security_context.md).
Every pod will have a [security context](security_context.md).
Secret data on the node should be isolated according to the security context of the container. The
Kubelet volume plugin API will be changed so that a volume plugin receives the security context of
a volume along with the volume spec. This will allow volume plugins to implement setting the
@ -253,7 +253,7 @@ Several proposals / upstream patches are notable as background for this proposal
1. [Docker vault proposal](https://github.com/docker/docker/issues/10310)
2. [Specification for image/container standardization based on volumes](https://github.com/docker/docker/issues/9277)
3. [Kubernetes service account proposal](./service_accounts.md)
3. [Kubernetes service account proposal](service_accounts.md)
4. [Secrets proposal for docker (1)](https://github.com/docker/docker/pull/6075)
5. [Secrets proposal for docker (2)](https://github.com/docker/docker/pull/6697)

View File

@ -63,14 +63,14 @@ Automated process users fall into the following categories:
A pod runs in a *security context* under a *service account* that is defined by an administrator or project administrator, and the *secrets* a pod has access to is limited by that *service account*.
1. The API should authenticate and authorize user actions [authn and authz](./access.md)
1. The API should authenticate and authorize user actions [authn and authz](access.md)
2. All infrastructure components (kubelets, kube-proxies, controllers, scheduler) should have an infrastructure user that they can authenticate with and be authorized to perform only the functions they require against the API.
3. Most infrastructure components should use the API as a way of exchanging data and changing the system, and only the API should have access to the underlying data store (etcd)
4. When containers run on the cluster and need to talk to other containers or the API server, they should be identified and authorized clearly as an autonomous process via a [service account](./service_accounts.md)
4. When containers run on the cluster and need to talk to other containers or the API server, they should be identified and authorized clearly as an autonomous process via a [service account](service_accounts.md)
1. If the user who started a long-lived process is removed from access to the cluster, the process should be able to continue without interruption
2. If the user who started processes are removed from the cluster, administrators may wish to terminate their processes in bulk
3. When containers run with a service account, the user that created / triggered the service account behavior must be associated with the container's action
5. When container processes run on the cluster, they should run in a [security context](./security_context.md) that isolates those processes via Linux user security, user namespaces, and permissions.
5. When container processes run on the cluster, they should run in a [security context](security_context.md) that isolates those processes via Linux user security, user namespaces, and permissions.
1. Administrators should be able to configure the cluster to automatically confine all container processes as a non-root, randomly assigned UID
2. Administrators should be able to ensure that container processes within the same namespace are all assigned the same unix user UID
3. Administrators should be able to limit which developers and project administrators have access to higher privilege actions
@ -79,7 +79,7 @@ A pod runs in a *security context* under a *service account* that is defined by
6. Developers may need to ensure their images work within higher security requirements specified by administrators
7. When available, Linux kernel user namespaces can be used to ensure 5.2 and 5.4 are met.
8. When application developers want to share filesytem data via distributed filesystems, the Unix user ids on those filesystems must be consistent across different container processes
6. Developers should be able to define [secrets](./secrets.md) that are automatically added to the containers when pods are run
6. Developers should be able to define [secrets](secrets.md) that are automatically added to the containers when pods are run
1. Secrets are files injected into the container whose values should not be displayed within a pod. Examples:
1. An SSH private key for git cloning remote data
2. A client certificate for accessing a remote system
@ -93,11 +93,11 @@ A pod runs in a *security context* under a *service account* that is defined by
### Related design discussion
* [Authorization and authentication](./access.md)
* [Authorization and authentication](access.md)
* [Secret distribution via files](https://github.com/GoogleCloudPlatform/kubernetes/pull/2030)
* [Docker secrets](https://github.com/docker/docker/pull/6697)
* [Docker vault](https://github.com/docker/docker/issues/10310)
* [Service Accounts:](./service_accounts.md)
* [Service Accounts:](service_accounts.md)
* [Secret volumes](https://github.com/GoogleCloudPlatform/kubernetes/pull/4126)
## Specific Design Points

View File

@ -32,7 +32,7 @@ Processes in pods will need to have consistent UID/GID/SELinux category labels i
* The concept of a security context should not be tied to a particular security mechanism or platform
(ie. SELinux, AppArmor)
* Applying a different security context to a scope (namespace or pod) requires a solution such as the one proposed for
[service accounts](./service_accounts.md).
[service accounts](service_accounts.md).
## Use Cases

View File

@ -21,9 +21,9 @@ They also may interact with services other than the Kubernetes API, such as:
A service account binds together several things:
- a *name*, understood by users, and perhaps by peripheral systems, for an identity
- a *principal* that can be authenticated and [authorized](../authorization.md)
- a [security context](./security_context.md), which defines the Linux Capabilities, User IDs, Groups IDs, and other
- a [security context](security_context.md), which defines the Linux Capabilities, User IDs, Groups IDs, and other
capabilities and controls on interaction with the file system and OS.
- a set of [secrets](./secrets.md), which a container may use to
- a set of [secrets](secrets.md), which a container may use to
access various networked resources.
## Design Discussion

View File

@ -9,7 +9,7 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve
2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware)
3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware)
4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/)
3. Get or build a [binary release](/docs/getting-started-guides/binary_release.md)
3. Get or build a [binary release](../../../docs/getting-started-guides/binary_release.md)
### Setup
@ -244,7 +244,7 @@ my-nginx nginx run=my-nginx 3
```
We did not start any services, hence there are none listed. But we see three replicas displayed properly.
Check the [guestbook](/examples/guestbook/README.md) application to learn how to create a service.
Check the [guestbook](../../../examples/guestbook/README.md) application to learn how to create a service.
You can already play with scaling the replicas with:
```sh

View File

@ -97,7 +97,7 @@ others around it will either have `v0.4-dev` or `v0.5-dev`.
The diagram below illustrates it.
![Diagram of git commits involved in the release](./releasing.png)
![Diagram of git commits involved in the release](releasing.png)
After working on `v0.4-dev` and merging PR 99 we decide it is time to release
`v0.5`. So we start a new branch, create one commit to update

View File

@ -37,7 +37,7 @@ can be overridden by passing the command-line flag `--policy-config-file` to the
file specifying which scheduling policies to use. See
[examples/scheduler-policy-config.json](../../examples/scheduler-policy-config.json) for an example
config file. (Note that the config file format is versioned; the API is defined in
[plugin/pkg/scheduler/api/](../../plugin/pkg/scheduler/api/)).
[plugin/pkg/scheduler/api](../../plugin/pkg/scheduler/api/)).
Thus to add a new scheduling policy, you should modify predicates.go or priorities.go,
and either register the policy in `defaultPredicates()` or `defaultPriorities()`, or use a policy config file.

View File

@ -29,7 +29,7 @@ These guidelines say *what* to do. See the Rationale section for *why*.
search for uses of flags by guides.
- We may ask that you host binary assets or large amounts of code in our `contrib` directory or on your
own repo.
- Setup a cluster and run the [conformance test](../../docs/devel/conformance-test.md) against it, and report the
- Setup a cluster and run the [conformance test](../../docs/devel/development.md#conformance-testing) against it, and report the
results in your PR.
- Add or update a row in [The Matrix](../../docs/getting-started-guides/README.md).
- State the binary version of kubernetes that you tested clearly in your Guide doc and in The Matrix.

View File

@ -79,9 +79,9 @@ For more information, please read [kubeconfig files](https://github.com/GoogleCl
### Examples
See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster.
The "Guestbook" application is another popular example to get started with Kubernetes: [guestbook example](../../examples/guestbook)
The "Guestbook" application is another popular example to get started with Kubernetes: [guestbook example](../../examples/guestbook/)
For more complete applications, please look in the [examples directory](../../examples)
For more complete applications, please look in the [examples directory](../../examples/)
## Tearing down the cluster
Make sure the environment variables you used to provision your cluster are still exported, then call the following script inside the

View File

@ -51,7 +51,7 @@ can tweak some of these parameters by editing `cluster/azure/config-default.sh`.
## Getting started with your cluster
See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster.
For more complete applications, please look in the [examples directory](../../examples).
For more complete applications, please look in the [examples directory](../../examples/).
## Tearing down the cluster
```

View File

@ -20,7 +20,7 @@ cd kubernetes
make release
```
For more details on the release process see the [`build/` directory](../../build)
For more details on the release process see the [`build/` directory](../../build/)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/binary_release.md?pixel)]()

View File

@ -15,7 +15,7 @@ CloudStack is a software to build public and private clouds based on hardware vi
[CoreOS](http://coreos.com) templates for CloudStack are built [nightly](http://stable.release.core-os.net/amd64-usr/current/). CloudStack operators need to [register](http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/templates.html) this template in their cloud before proceeding with these Kubernetes deployment instructions.
This guide uses an [Ansible playbook](https://github.com/runseb/ansible-kubernetes).
This is a completely automated, a single playbook deploys Kubernetes based on the coreOS [instructions](./coreos/coreos_multinode_cluster.md).
This is a completely automated, a single playbook deploys Kubernetes based on the coreOS [instructions](coreos/coreos_multinode_cluster.md).
This [Ansible](http://ansibleworks.com) playbook deploys Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init.

View File

@ -2,7 +2,6 @@
There are multiple guides on running Kubernetes with [CoreOS](http://coreos.com):
* [Single Node Cluster](coreos/coreos_single_node_cluster.md)
* [Multi-node Cluster](coreos/coreos_multinode_cluster.md)
* [Setup Multi-node Cluster on Google Compute Engine in an easy way](https://github.com/rimusz/coreos-multi-node-k8s-gce/blob/master/README.md)
* [Multi-node cluster using cloud-config and Weave on Vagrant](https://github.com/errordeveloper/weave-demos/blob/master/poseidon/README.md)

View File

@ -188,7 +188,7 @@ You then should be able to access it from anywhere via the Azure virtual IP for
You now have a full-blow cluster running in Azure, congrats!
You should probably try deploy other [example apps](../../../../examples) or write your own ;)
You should probably try deploy other [example apps](../../../../examples/) or write your own ;)
## Tear down...

View File

@ -213,7 +213,7 @@ Now for the good stuff!
## Cloud Configs
The following config files are tailored for the OFFLINE version of a Kubernetes deployment.
These are based on the work found here: [master.yml](./cloud-configs/master.yaml), [node.yml](./cloud-configs/node.yaml)
These are based on the work found here: [master.yml](cloud-configs/master.yaml), [node.yml](cloud-configs/node.yaml)
To make the setup work, you need to replace a few placeholders:
@ -622,7 +622,7 @@ Now that the CoreOS with Kubernetes installed is up and running lets spin up som
See [a simple nginx example](../../../examples/simple-nginx.md) to try out your new cluster.
For more complete applications, please look in the [examples directory](../../../examples).
For more complete applications, please look in the [examples directory](../../../examples/).
## Helping commands for debugging

View File

@ -49,7 +49,7 @@ See [here](docker-multinode/worker.md) for detailed instructions.
Once your cluster has been created you can [test it out](docker-multinode/testing.md)
For more complete applications, please look in the [examples directory](../../examples)
For more complete applications, please look in the [examples directory](../../examples/)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/docker-multinode.md?pixel)]()

View File

@ -150,7 +150,7 @@ Some of the pods may take a few seconds to start up (during this time they'll sh
Then, see [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster.
For more complete applications, please look in the [examples directory](../../examples). The [guestbook example](../../examples/guestbook) is a good "getting started" walkthrough.
For more complete applications, please look in the [examples directory](../../examples/). The [guestbook example](../../examples/guestbook/) is a good "getting started" walkthrough.
### Tearing down the cluster
To remove/delete/teardown the cluster, use the `kube-down.sh` script.

View File

@ -178,7 +178,7 @@ We can add minion units like so:
## Launch the "k8petstore" example app
The [k8petstore example](../../examples/k8petstore) is available as a
The [k8petstore example](../../examples/k8petstore/) is available as a
[juju action](https://jujucharms.com/docs/devel/actions).
juju action do kubernetes-master/0

View File

@ -18,12 +18,12 @@ monitoring-heapster-v1-20ej 0/1 Running 9 32
Here is the same information in a picture which shows how the pods might be placed on specific nodes.
![Cluster](/examples/blog-logging/diagrams/cloud-logging.png)
![Cluster](../../examples/blog-logging/diagrams/cloud-logging.png)
This diagram shows four nodes created on a Google Compute Engine cluster with the name of each VM node on a purple background. The internal and public IPs of each node are shown on gray boxes and the pods running in each node are shown in green boxes. Each pod box shows the name of the pod and the namespace it runs in, the IP address of the pod and the images which are run as part of the pods execution. Here we see that every node is running a fluentd-cloud-logging pod which is collecting the log output of the containers running on the same node and sending them to Google Cloud Logging. A pod which provides the
[cluster DNS service](/docs/dns.md) runs on one of the nodes and a pod which provides monitoring support runs on another node.
[cluster DNS service](../../docs/dns.md) runs on one of the nodes and a pod which provides monitoring support runs on another node.
To help explain how cluster level logging works lets start off with a synthetic log generator pod specification [counter-pod.yaml](/examples/blog-logging/counter-pod.yaml):
To help explain how cluster level logging works lets start off with a synthetic log generator pod specification [counter-pod.yaml](../../examples/blog-logging/counter-pod.yaml):
```
apiVersion: v1
kind: Pod
@ -55,7 +55,7 @@ This step may take a few minutes to download the ubuntu:14.04 image during which
One of the nodes is now running the counter pod:
![Counter Pod](/examples/blog-logging/diagrams/27gf-counter.png)
![Counter Pod](../../examples/blog-logging/diagrams/27gf-counter.png)
When the pod status changes to `Running` we can use the kubectl logs command to view the output of this counter pod.
@ -114,7 +114,7 @@ Weve lost the log lines from the first invocation of the container in this po
When a Kubernetes cluster is created with logging to Google Cloud Logging enabled, the system creates a pod called `fluentd-cloud-logging` on each node of the cluster to collect Docker container logs. These pods were shown at the start of this blog article in the response to the first get pods command.
This log collection pod has a specification which looks something like this [fluentd-gcp.yaml](/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml):
This log collection pod has a specification which looks something like this [fluentd-gcp.yaml](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml):
```
apiVersion: v1
@ -187,7 +187,7 @@ Now we can run queries over the ingested logs. The example below uses the [jq](h
...
```
This page has touched briefly on the underlying mechanisms that support gathering cluster level logs on a Kubernetes deployment. The approach here only works for gathering the standard output and standard error output of the processes running in the pods containers. To gather other logs that are stored in files one can use a sidecar container to gather the required files as described at the page [Collecting log files within containers with Fluentd](/contrib/logging/fluentd-sidecar-gcp/README.md) and sending them to the Google Cloud Logging service.
This page has touched briefly on the underlying mechanisms that support gathering cluster level logs on a Kubernetes deployment. The approach here only works for gathering the standard output and standard error output of the processes running in the pods containers. To gather other logs that are stored in files one can use a sidecar container to gather the required files as described at the page [Collecting log files within containers with Fluentd](../../contrib/logging/fluentd-sidecar-gcp/README.md) and sending them to the Google Cloud Logging service.
Some of the material in this section also appears in the blog article [Cluster Level Logging with Kubernetes](http://blog.kubernetes.io/2015/06/cluster-level-logging-with-kubernetes.html).

View File

@ -84,9 +84,9 @@ Note: CoreOS is not supported as the master using the automated launch
scripts. The master node is always Ubuntu.
### Getting started with your cluster
See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster.
See [a simple nginx example](../../../examples/simple-nginx.md) to try out your new cluster.
For more complete applications, please look in the [examples directory](../../examples).
For more complete applications, please look in the [examples directory](../../../examples/).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/rkt/README.md?pixel)]()

View File

@ -167,7 +167,7 @@ You will need binaries for:
#### Downloading and Extracting Kubernetes Binaries
A Kubernetes binary release includes all the Kubernetes binaries as well as the supported release of etcd.
You can use a Kubernetes binary release (recommended) or build your Kubernetes binaries following the instructions in the
[Developer Documentation]( ../devel/README.md). Only using a binary release is covered in this guide.
[Developer Documentation](../devel/README.md). Only using a binary release is covered in this guide.
Download the [latest binary release](
https://github.com/GoogleCloudPlatform/kubernetes/releases/latest) and unzip it.
@ -255,8 +255,7 @@ The admin user (and any users) need:
Your tokens and passwords need to be stored in a file for the apiserver
to read. This guide uses `/var/lib/kube-apiserver/known_tokens.csv`.
The format for this file is described in the [authentication documentation](
../authentication.md).
The format for this file is described in the [authentication documentation](../authentication.md).
For distributing credentials to clients, the convention in Kubernetes is to put the credentials
into a [kubeconfig file](../kubeconfig-file.md).
@ -759,7 +758,7 @@ At this point you should be able to run through one of the basic examples, such
### Running the Conformance Test
You may want to try to run the [Conformance test](../hack/conformance.sh). Any failures may give a hint as to areas that need more attention.
You may want to try to run the [Conformance test](../../hack/conformance-test.sh). Any failures may give a hint as to areas that need more attention.
### Networking

View File

@ -120,7 +120,7 @@ NAME LABELS STATUS
```
Also you can run kubernetes [guest-example](../../examples/guestbook) to build a redis backend cluster on the k8s
Also you can run kubernetes [guest-example](../../examples/guestbook/) to build a redis backend cluster on the k8s
#### Deploy addons

View File

@ -3,7 +3,7 @@
**Authorization**
:Kubernetes does not currently have an authorization system. Anyone with the cluster password can do anything. We plan
to add sophisticated authorization, and to make it pluggable. See the [access control design doc](./design/access.md) and
to add sophisticated authorization, and to make it pluggable. See the [access control design doc](design/access.md) and
[this issue](https://github.com/GoogleCloudPlatform/kubernetes/issues/1430).
**Annotation**
@ -11,11 +11,11 @@ to add sophisticated authorization, and to make it pluggable. See the [access c
non-identifying metadata associated with an object, such as provenance information. Not indexed.
**Image**
: A [Docker Image](https://docs.docker.com/userguide/dockerimages/). See [images](./images.md).
: A [Docker Image](https://docs.docker.com/userguide/dockerimages/). See [images](images.md).
**Label**
: A key/value pair conveying user-defined identifying attributes of an object, and used to form sets of related objects, such as
pods which are replicas in a load-balanced service. Not intended to hold large or non-human-readable data. See [labels](./labels.md).
pods which are replicas in a load-balanced service. Not intended to hold large or non-human-readable data. See [labels](labels.md).
**Name**
: A user-provided name for an object. See [identifiers](identifiers.md).
@ -26,7 +26,7 @@ so you do not have to type it all the time. Namespaces allow multiple projects t
**Pod**
: A collection of containers which will be scheduled onto the same node, which share and an IP and port space, and which
can be created/destroyed together. See [pods](./pods.md).
can be created/destroyed together. See [pods](pods.md).
**Replication Controller**
: A _replication controller_ ensures that a specified number of pod "replicas" are running at any one time. Both allows
@ -43,7 +43,7 @@ for easy scaling of replicated systems, and handles restarting of a Pod when the
service. See [labels](labels.md).
**Service**
: A load-balanced set of `pods` which can be accessed via a single stable IP address. See [services](./services.md).
: A load-balanced set of `pods` which can be accessed via a single stable IP address. See [services](services.md).
**UID**
: An identifier on all Kubernetes objects that is set by the Kubernetes API server. Can be used to distinguish between historical

View File

@ -46,11 +46,11 @@ choices. For example, on systemd-based systems (e.g. RHEL, CentOS), you can run
If you are extending from a standard Kubernetes installation, the ```kubelet``` binary should already be present on your system. You can run
```which kubelet``` to determine if the binary is in fact installed. If it is not installed,
you should install the [kubelet binary](https://storage.googleapis.com/kubernetes-release/release/v0.19.3/bin/linux/amd64/kubelet), the
[/etc/init.d/kubelet](high-availability/init-kubelet) and [/etc/default/kubelet](high-availability/default-kubelet)
[high-availability/init-kubelet](TODO:high-availability/init-kubelet) and [high-availability/default-kubelet](TODO:high-availability/default-kubelet)
scripts.
If you are using monit, you should also install the monit daemon (```apt-get install monit```) and the [/etc/monit/conf.d/kubelet](high-availability/monit-kubelet) and
[/etc/monit/conf.d/docker](high-availability/monit-docker) configs.
If you are using monit, you should also install the monit daemon (```apt-get install monit```) and the [high-availability/monit-kubelet](TODO:high-availability/monit-kubelet) and
[high-availability/monit-docker](TODO:high-availability/monit-docker) configs.
On systemd systems you ```systemctl enable kubelet``` and ```systemctl enable docker```.

View File

@ -83,7 +83,7 @@ The rules for loading and merging the kubeconfig files are straightforward, but
## Manipulation of kubeconfig via `kubectl config <subcommand>`
In order to more easily manipulate kubeconfig files, there are a series of subcommands to `kubectl config` to help.
See [docs/kubectl_config.md](kubectl_config.md) for help.
See [kubectl_config.md](kubectl_config.md) for help.
### Example
```

View File

@ -6,7 +6,7 @@ Kubernetes components, such as kubelet and apiserver, use the [glog](https://god
## Examining the logs of running containers
The logs of a running container may be fetched using the command `kubectl logs`. For example, given
this pod specification which has a container which writes out some text to standard
output every second [counter-pod.yaml](/examples/blog-logging/counter-pod.yaml):
output every second [counter-pod.yaml](../examples/blog-logging/counter-pod.yaml):
```
apiVersion: v1
kind: Pod
@ -66,6 +66,6 @@ describes how to ingest cluster level logs into Elasticsearch and view them usin
## Ingesting Application Log Files
Cluster level logging only collects the standard output and standard error output of the applications
running in containers. The guide [Collecting log files within containers with Fluentd](/contrib/logging/fluentd-sidecar-gcp/README.md) explains how the log files of applications can also be ingested into Google Cloud logging.
running in containers. The guide [Collecting log files within containers with Fluentd](../contrib/logging/fluentd-sidecar-gcp/README.md) explains how the log files of applications can also be ingested into Google Cloud logging.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/logging.md?pixel)]()

View File

@ -171,7 +171,7 @@ people have reported success with Flannel and Kubernetes.
### OpenVSwitch
[OpenVSwitch](./ovs-networking.md) is a somewhat more mature but also
[OpenVSwitch](ovs-networking.md) is a somewhat more mature but also
complicated way to build an overlay network. This is endorsed by several of the
"Big Shops" for networking.

View File

@ -3,7 +3,7 @@
This document describes how OpenVSwitch is used to setup networking between pods across nodes.
The tunnel type could be GRE or VxLAN. VxLAN is preferable when large scale isolation needs to be performed within the network.
![ovs-networking](./ovs-networking.png "OVS Networking")
![ovs-networking](ovs-networking.png "OVS Networking")
The vagrant setup in Kubernetes does the following:

View File

@ -1,6 +1,6 @@
# Persistent Volumes and Claims
This document describes the current state of `PersistentVolumes` in Kubernetes. Familiarity with [volumes](./volumes.md) is suggested.
This document describes the current state of `PersistentVolumes` in Kubernetes. Familiarity with [volumes](volumes.md) is suggested.
Managing storage is a distinct problem from managing compute. The `PersistentVolume` subsystem provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed. To do this we introduce two new API resources: `PersistentVolume` and `PersistentVolumeClaim`.
@ -8,7 +8,7 @@ A `PersistentVolume` (PV) is a piece of networked storage in the cluster that ha
A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g, can be mounted once read/write or many times read-only).
Please see the [detailed walkthrough with working examples](../examples/persistent-volumes).
Please see the [detailed walkthrough with working examples](../examples/persistent-volumes/).
## Lifecycle of a volume and claim
@ -77,7 +77,7 @@ Each PV contains a spec and status, which is the specification and status of the
### Capacity
Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. See the Kubernetes [Resource Model](./design/resources.md) to understand the units expected by `capacity`.
Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. See the Kubernetes [Resource Model](design/resources.md) to understand the units expected by `capacity`.
Currently, storage size is the only resource that can be set or requested. Future attributes may include IOPS, throughput, etc.
@ -145,7 +145,7 @@ Claims use the same conventions as volumes when requesting storage with specific
### Resources
Claims, like pods, can request specific quantities of a resource. In this case, the request is for storage. The same [resource model](./design/resources.md) applies to both volumes and claims.
Claims, like pods, can request specific quantities of a resource. In this case, the request is for storage. The same [resource model](design/resources.md) applies to both volumes and claims.
## <a name="claims-as-volumes"></a> Claims As Volumes

View File

@ -95,7 +95,7 @@ We should define a grains.conf key that captures more specifically what network
## Further reading
The [cluster/saltbase](../cluster/saltbase) tree has more details on the current SaltStack configuration.
The [cluster/saltbase](../cluster/saltbase/) tree has more details on the current SaltStack configuration.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/salt.md?pixel)]()

View File

@ -52,7 +52,7 @@ allowed. The values are arbitrary data, encoded using base64. The values of
username and password in the example above, before base64 encoding,
are `value-1` and `value-2`, respectively, with carriage return and newline characters at the end.
Create the secret using [`kubectl create`](kubectl-create.md).
Create the secret using [`kubectl create`](kubectl_create.md).
Once the secret is created, you can:
- create pods that automatically use it via a [Service Account](service_accounts.md).
@ -98,15 +98,13 @@ You can package many files into one secret, or use many secrets,
whichever is convenient.
### Manually specifying an imagePullSecret
Use of imagePullSecrets is desribed in the [images documentation](
images.md#specifying-imagepullsecrets-on-a-pod)
Use of imagePullSecrets is desribed in the [images documentation](images.md#specifying-imagepullsecrets-on-a-pod)
### Automatic use of Manually Created Secrets
*This feature is planned but not implemented. See [issue
9902](https://github.com/GoogleCloudPlatform/kubernetes/issues/9902).*
You can reference manually created secrets from a [service account](
service_accounts.md).
You can reference manually created secrets from a [service account](service_accounts.md).
Then, pods which use that service account will have
`volumeMounts` and/or `imagePullSecrets` added to them.
The secrets will be mounted at **TBD**.

View File

@ -102,7 +102,7 @@ scp host2:/path/to/home2/.kube/config path/to/other/.kube/config
export $KUBECONFIG=path/to/other/.kube/config
```
Detailed examples and explanation of `kubeconfig` loading/merging rules can be found in [kubeconfig-file.md](./kubeconfig-file.md).
Detailed examples and explanation of `kubeconfig` loading/merging rules can be found in [kubeconfig-file.md](kubeconfig-file.md).

View File

@ -38,7 +38,7 @@ kubernetes API, or to contribute directly to the kubernetes project.
(e.g. ssh keys, passwords) separately from the Pods that use them, protecting
the sensitive data from proliferation by tools that process pods.
* **Accessing the API and other cluster services via a Proxy** [accessing-the-cluster.md](../docs/accessing-the-cluster.md)
* **Accessing the API and other cluster services via a Proxy** [docs/accessing-the-cluster.md](../docs/accessing-the-cluster.md)
* **API Overview** ([api.md](api.md)): Pointers to API documentation on various topics
and explanation of Kubernetes's approaches to API changes and API versioning.

View File

@ -7,7 +7,7 @@ container starts with a clean slate. second, when running containers together
in a `Pod` it is often necessary to share files between those containers. The
Kubernetes `Volume` abstraction solves both of these problems.
Familiarity with [pods](./pods.md) is suggested.
Familiarity with [pods](pods.md) is suggested.
## Background
@ -332,7 +332,7 @@ medium of the filesystem holding the kubelet root dir (typically
pods.
In the future, we expect that `emptyDir` and `hostPath` volumes will be able to
request a certain amount of space using a [resource](./compute_resources.md)
request a certain amount of space using a [resource](compute_resources.md)
specification, and to select the type of media to use, for clusters that have
several media types.

View File

@ -46,7 +46,9 @@ _tmp="${KUBE_ROOT}/_tmp"
mkdir -p "${_tmp}"
cp -a "${DOCROOT}" "${TMP_DOCROOT}"
"${mungedocs}" "--verify=true" "--root-dir=${TMP_DOCROOT}"
# mungedocs --verify can (and should) be run on the real docs, otherwise their
# links will be distorted. --verify means that it will not make changes.
"${mungedocs}" "--verify=true" "--root-dir=${DOCROOT}"
ret=$?
if [[ $ret -eq 1 ]]; then
echo "${DOCROOT} is out of date. Please run hack/run-gendocs.sh"