Merge pull request #11350 from thockin/munge-check-kubectl-dash-f

Add munger to verify kubectl -f targets
This commit is contained in:
Brian Grant 2015-07-16 22:46:37 -07:00
commit cb58e8e43b
47 changed files with 377 additions and 122 deletions

View File

@ -0,0 +1,116 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"path"
"strings"
)
// Looks for lines that have kubectl commands with -f flags and files that
// don't exist.
func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) {
inside := false
lines := splitLines(markdown)
errors := []string{}
for i := range lines {
if strings.HasPrefix(lines[i], "```") {
inside = !inside
}
if inside {
if err := lookForKubectl(lines, i); err != nil {
errors = append(errors, err.Error())
}
}
}
err := error(nil)
if len(errors) != 0 {
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
}
return markdown, err
}
func lookForKubectl(lines []string, lineNum int) error {
fields := strings.Fields(lines[lineNum])
for i := range fields {
if fields[i] == "kubectl" {
return gotKubectl(lineNum, fields, i)
}
}
return nil
}
func gotKubectl(line int, fields []string, fieldNum int) error {
for i := fieldNum + 1; i < len(fields); i++ {
switch fields[i] {
case "create", "update", "replace", "delete":
return gotCommand(line, fields, i)
}
}
return nil
}
func gotCommand(line int, fields []string, fieldNum int) error {
for i := fieldNum + 1; i < len(fields); i++ {
if strings.HasPrefix(fields[i], "-f") {
return gotDashF(line, fields, i)
}
}
return nil
}
func gotDashF(line int, fields []string, fieldNum int) error {
target := ""
if fields[fieldNum] == "-f" {
if fieldNum+1 == len(fields) {
return fmt.Errorf("ran out of fields after '-f'")
}
target = fields[fieldNum+1]
} else {
target = fields[fieldNum][2:]
}
// Turn dirs into file-like names.
target = strings.TrimRight(target, "/")
// Now exclude special-cases
if target == "-" || target == "FILENAME" {
// stdin and "FILENAME" are OK
return nil
}
if strings.HasPrefix(target, "http://") || strings.HasPrefix(target, "https://") {
// URLs are ok
return nil
}
if strings.HasPrefix(target, "./") {
// Same-dir files are usually created in the same example
return nil
}
if strings.HasPrefix(target, "/") {
// Absolute paths tend to be /tmp/* and created in the same example.
return nil
}
// If we got here we expect the file to exist.
_, err := os.Stat(path.Join(*rootDir, *repoRoot, target))
if os.IsNotExist(err) {
return fmt.Errorf("%d: target file %q does not exist", line, target)
}
return err
}

View File

@ -0,0 +1,139 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import "testing"
func TestKubectlDashF(t *testing.T) {
var cases = []struct {
in string
ok bool
}{
// No match
{"", true},
{
"Foo\nBar\n",
true,
},
{
"Foo\nkubectl blah blech\nBar",
true,
},
{
"Foo\n```shell\nkubectl blah blech\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create blech\n```\nBar",
true,
},
// Special cases
{
"Foo\n```\nkubectl -blah create -f -\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f-\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f FILENAME\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -fFILENAME\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f http://google.com/foobar\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -fhttp://google.com/foobar\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f ./foobar\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f./foobar\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f /foobar\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -f/foobar\n```\nBar",
true,
},
// Real checks
{
"Foo\n```\nkubectl -blah create -f mungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah create -fmungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah update -f mungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah update -fmungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah replace -f mungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah replace -fmungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah delete -f mungedocs.go\n```\nBar",
true,
},
{
"Foo\n```\nkubectl -blah delete -fmungedocs.go\n```\nBar",
true,
},
// Failures
{
"Foo\n```\nkubectl -blah delete -f does_not_exist\n```\nBar",
false,
},
{
"Foo\n```\nkubectl -blah delete -fdoes_not_exist\n```\nBar",
false,
},
}
for i, c := range cases {
*rootDir = ""
*repoRoot = ""
_, err := checkKubectlFileTargets("filename.md", []byte(c.in))
if err != nil && c.ok {
t.Errorf("case[%d]: expected success, got %v", i, err)
}
if err == nil && !c.ok {
t.Errorf("case[%d]: unexpected success", i)
}
}
}

View File

@ -49,6 +49,7 @@ Examples:
{"check-links", checkLinks},
{"unversioned-warning", updateUnversionedWarning},
{"analytics", checkAnalytics},
{"kubectl-dash-f", checkKubectlFileTargets},
}
availableMungeList = func() string {
names := []string{}

View File

@ -93,7 +93,7 @@ $ cat <<EOF > quota.json
}
}
EOF
$ kubectl create -f quota.json
$ kubectl create -f ./quota.json
$ kubectl get quota
NAME
quota

View File

@ -94,7 +94,7 @@ secret.json:
"type": "kubernetes.io/service-account-token"
}
$ kubectl create -f secret.json
$ kubectl create -f ./secret.json
$ kubectl describe secret mysecretname
```

View File

@ -56,7 +56,7 @@ spec:
Note that we omit the labels and the selector fields of the replication controller, because they will be populated from the labels field of the pod template by default.
```
kubectl create -f controller.yaml
kubectl create -f ./controller.yaml
```
This will spin up 24 instances of the test. They will run to completion, then exit, and the kubelet will restart them, accumulating more and more runs of the test.

View File

@ -194,7 +194,7 @@ Create a pod manifest: `pod.json`
### Create the pod using the kubectl command line tool
```bash
kubectl create -f pod.json
kubectl create -f ./pod.json
```
### Testing

View File

@ -97,12 +97,12 @@ kube-02 environment=production Ready
Let's follow the Guestbook example now:
```
cd guestbook-example
kubectl create -f redis-master-controller.json
kubectl create -f redis-master-service.json
kubectl create -f redis-slave-controller.json
kubectl create -f redis-slave-service.json
kubectl create -f frontend-controller.json
kubectl create -f frontend-service.json
kubectl create -f examples/guestbook/redis-master-controller.yaml
kubectl create -f examples/guestbook/redis-master-service.yaml
kubectl create -f examples/guestbook/redis-slave-controller.yaml
kubectl create -f examples/guestbook/redis-slave-service.yaml
kubectl create -f examples/guestbook/frontend-controller.yaml
kubectl create -f examples/guestbook/frontend-service.yaml
```
You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Unknown`, through `Pending` to `Running`.

View File

@ -146,7 +146,7 @@ done
Now create a node object internally in your kubernetes cluster by running:
```
$ kubectl create -f node.json
$ kubectl create -f ./node.json
$ kubectl get nodes
NAME LABELS STATUS
@ -205,7 +205,7 @@ fed-node name=fed-node-label Ready
To delete _fed-node_ from your kubernetes cluster, one should run the following on fed-master (Please do not do it, it is just for information):
```
$ kubectl delete -f node.json
$ kubectl delete -f ./node.json
```
*You should be finished!*

View File

@ -63,7 +63,7 @@ This pod specification has one container which runs a bash script when the conta
namespace.
```
$ kubectl create -f counter-pod.yaml
$ kubectl create -f examples/blog-logging/counter-pod.yaml
pods/counter
```
@ -114,7 +114,7 @@ pods/counter
Now lets restart the counter.
```
$ kubectl create -f counter-pod.yaml
$ kubectl create -f examples/blog-logging/counter-pod.yaml
pods/counter
```
Lets wait for the container to restart and get the log lines again.

View File

@ -204,7 +204,7 @@ EOPOD
Send the pod description to Kubernetes using the `kubectl` CLI:
```bash
$ kubectl create -f nginx.yaml
$ kubectl create -f ./nginx.yaml
pods/nginx
```
@ -262,8 +262,8 @@ sed -e "s/{{ pillar\['dns_server'\] }}/10.10.10.10/g" \
Now the kube-dns pod and service are ready to be launched:
```bash
kubectl create -f skydns-rc.yaml
kubectl create -f skydns-svc.yaml
kubectl create -f ./skydns-rc.yaml
kubectl create -f ./skydns-svc.yaml
```
Check with `kubectl get pods --namespace=kube-system` that 3/3 containers of the pods are eventually up and running. Note that the kube-dns pods run in the `kube-system` namespace, not in `default`.
@ -292,7 +292,7 @@ EOF
Then start the pod:
```bash
kubectl create -f busybox.yaml
kubectl create -f ./busybox.yaml
```
When the pod is up and running, start a lookup for the Kubernetes master service, made available on 10.10.10.1 by default:

View File

@ -133,7 +133,7 @@ JSON and YAML formats are accepted.
.nf
// Create a pod using the data in pod.json.
$ kubectl create \-f pod.json
$ kubectl create \-f ./pod.json
// Create a pod based on the JSON passed into stdin.
$ cat pod.json | kubectl create \-f \-

View File

@ -166,7 +166,7 @@ will be lost along with the rest of the resource.
.nf
// Delete a pod using the type and name specified in pod.json.
$ kubectl delete \-f pod.json
$ kubectl delete \-f ./pod.json
// Delete a pod based on the type and name in the JSON passed into stdin.
$ cat pod.json | kubectl delete \-f \-

View File

@ -149,13 +149,13 @@ JSON and YAML formats are accepted.
.nf
// Replace a pod using the data in pod.json.
$ kubectl replace \-f pod.json
$ kubectl replace \-f ./pod.json
// Replace a pod based on the JSON passed into stdin.
$ cat pod.json | kubectl replace \-f \-
// Force replace, delete and then re\-create the resource
kubectl replace \-\-force \-f pod.json
kubectl replace \-\-force \-f ./pod.json
.fi
.RE

View File

@ -71,7 +71,7 @@ The [`command`](containers.md#containers-and-commands) overrides the Docker cont
This pod can be created using the `create` command:
```bash
$ kubectl create -f hello-world.yaml
$ kubectl create -f ./hello-world.yaml
pods/hello-world
```
`kubectl` prints the resource type and name of the resource created when successful.
@ -80,7 +80,7 @@ pods/hello-world
If youre not sure you specified the resource correctly, you can ask `kubectl` to validate it for you:
```bash
$ kubectl create -f hello-world.yaml --validate
$ kubectl create -f ./hello-world.yaml --validate
```
Lets say you specified `entrypoint` instead of `command`. Youd see output as follows:

View File

@ -72,7 +72,7 @@ spec:
This makes it accessible from any node in your cluster. Check the nodes the pod is running on:
```shell
$ kubectl create -f nginxrc.yaml
$ kubectl create -f ./nginxrc.yaml
$ kubectl get pods -l app=nginx -o wide
my-nginx-6isf4 1/1 Running 0 2h e2e-test-beeps-minion-93ly
my-nginx-t26zt 1/1 Running 0 2h e2e-test-beeps-minion-93ly
@ -191,7 +191,7 @@ spec:
```
And perform a lookup of the nginx Service
```shell
$ kubectl create -f curlpod.yaml
$ kubectl create -f ./curlpod.yaml
default/curlpod
$ kubectl get pods curlpod
NAME READY STATUS RESTARTS AGE
@ -275,7 +275,7 @@ Noteworthy points about the nginx-app manifest:
- Each container has access to the keys through a volume mounted at /etc/nginx/ssl. This is setup *before* the nginx server is started.
```shell
$ kubectl delete rc,svc -l app=nginx; kubectl create -f nginx-app.yaml
$ kubectl delete rc,svc -l app=nginx; kubectl create -f ./nginx-app.yaml
replicationcontrollers/my-nginx
services/nginxsvc
services/nginxsvc
@ -323,7 +323,7 @@ spec:
- mountPath: /etc/nginx/ssl
name: secret-volume
$ kubectl create -f curlpod.yaml
$ kubectl create -f ./curlpod.yaml
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
curlpod 1/1 Running 0 2m
@ -375,7 +375,7 @@ $ curl https://104.197.63.17:30645 -k
Lets now recreate the Service to use a cloud load balancer, just change the `Type` of Service in the nginx-app.yaml from `NodePort` to `LoadBalancer`:
```shell
$ kubectl delete rc, svc -l app=nginx
$ kubectl create -f nginx-app.yaml
$ kubectl create -f ./nginx-app.yaml
$ kubectl get svc -o json | grep -i ingress -A 5
"ingress": [
{

View File

@ -64,7 +64,7 @@ Some differences compared to specifying just a pod are that the `kind` is `Repli
This replication controller can be created using `create`, just as with pods:
```bash
$ kubectl create -f nginx-rc.yaml
$ kubectl create -f ./nginx-rc.yaml
replicationcontrollers/my-nginx
```

View File

@ -53,7 +53,7 @@ We can use these environment variables in applications to find the service.
It is convenient to use `kubectl exec` to check if the volumes are mounted as expected.
We first create a Pod with a volume mounted at /data/redis,
```
kubectl create -f docs/user-guide/walkthrough/pod2.yaml
kubectl create -f docs/user-guide/walkthrough/pod-redis.yaml
```
wait until the pod is Running and Ready,
```

View File

@ -104,7 +104,7 @@ example, run these on your desktop/laptop:
Verify by creating a pod that uses a private image, e.g.:
```
$ cat <<EOF > private-image-test-1.yaml
$ cat <<EOF > /tmp/private-image-test-1.yaml
apiVersion: v1
kind: Pod
metadata:
@ -116,7 +116,7 @@ spec:
command: [ "echo", "SUCCESS" ]
imagePullPolicy: Always
EOF
$ kubectl create -f private-image-test-1.yaml
$ kubectl create -f /tmp/private-image-test-1.yaml
pods/private-image-test-1
$
```
@ -186,7 +186,7 @@ $ echo $(cat ~/.dockercfg)
$ cat ~/.dockercfg | base64
eyAiaHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjogeyAiYXV0aCI6ICJabUZyWlhCaGMzTjNiM0prTVRJSyIsICJlbWFpbCI6ICJqZG9lQGV4YW1wbGUuY29tIiB9IH0K
$ cat > image-pull-secret.yaml <<EOF
$ cat > /tmp/image-pull-secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
@ -196,7 +196,7 @@ data:
type: kubernetes.io/dockercfg
EOF
$ kubectl create -f image-pull-secret.yaml
$ kubectl create -f /tmp/image-pull-secret.yaml
secrets/myregistrykey
$
```

View File

@ -64,7 +64,7 @@ spec:
```
```bash
$ kubectl create -f <file with contents listed above>
$ kubectl create -f ./my-nginx-rc.yaml
replicationcontrollers/my-nginx
```

View File

@ -39,7 +39,7 @@ kubectl create -f FILENAME
```
// Create a pod using the data in pod.json.
$ kubectl create -f pod.json
$ kubectl create -f ./pod.json
// Create a pod based on the JSON passed into stdin.
$ cat pod.json | kubectl create -f -
@ -84,7 +84,7 @@ $ cat pod.json | kubectl create -f -
### SEE ALSO
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.955765309 +0000 UTC
###### Auto generated by spf13/cobra at 2015-07-16 22:39:16.132575015 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@ -46,7 +46,7 @@ kubectl delete ([-f FILENAME] | (RESOURCE [(NAME | -l label | --all)]
```
// Delete a pod using the type and name specified in pod.json.
$ kubectl delete -f pod.json
$ kubectl delete -f ./pod.json
// Delete a pod based on the type and name in the JSON passed into stdin.
$ cat pod.json | kubectl delete -f -
@ -106,7 +106,7 @@ $ kubectl delete pods --all
### SEE ALSO
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.95616314 +0000 UTC
###### Auto generated by spf13/cobra at 2015-07-16 05:13:00.190175769 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@ -39,13 +39,13 @@ kubectl replace -f FILENAME
```
// Replace a pod using the data in pod.json.
$ kubectl replace -f pod.json
$ kubectl replace -f ./pod.json
// Replace a pod based on the JSON passed into stdin.
$ cat pod.json | kubectl replace -f -
// Force replace, delete and then re-create the resource
kubectl replace --force -f pod.json
kubectl replace --force -f ./pod.json
```
### Options
@ -91,7 +91,7 @@ kubectl replace --force -f pod.json
### SEE ALSO
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.955895303 +0000 UTC
###### Auto generated by spf13/cobra at 2015-07-16 22:39:16.132838722 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@ -62,7 +62,7 @@ This example will work in a custom namespace to demonstrate the concepts involve
Let's create a new namespace called limit-example:
```shell
$ kubectl create -f namespace.yaml
$ kubectl create -f docs/user-guide/limitrange/namespace.yaml
namespaces/limit-example
$ kubectl get namespaces
NAME LABELS STATUS
@ -75,7 +75,7 @@ Step 2: Apply a limit to the namespace
Let's create a simple limit in our namespace.
```shell
$ kubectl create -f limits.yaml --namespace=limit-example
$ kubectl create -f docs/user-guide/limitrange/limits.yaml --namespace=limit-example
limitranges/mylimits
```
@ -140,14 +140,14 @@ Note that our nginx container has picked up the namespace default cpu and memory
Let's create a pod that exceeds our allowed limits by having it have a container that requests 3 cpu cores.
```shell
$ kubectl create -f invalid-pod.yaml --namespace=limit-example
$ kubectl create -f docs/user-guide/limitrange/invalid-pod.yaml --namespace=limit-example
Error from server: Pod "invalid-pod" is forbidden: Maximum CPU usage per pod is 2, but requested 3
```
Let's create a pod that falls within the allowed limit boundaries.
```shell
$ kubectl create -f valid-pod.yaml --namespace=limit-example
$ kubectl create -f docs/user-guide/limitrange/valid-pod.yaml --namespace=limit-example
pods/valid-pod
$ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 5 resources
containers:

View File

@ -58,8 +58,8 @@ This [guide](../walkthrough/k8s201.md#health-checking) has more information on h
## Get your hands dirty
To show the health check is actually working, first create the pods:
```
# kubectl create -f exec-liveness.yaml
# kubectl create -f http-liveness.yaml
# kubectl create -f docs/user-guide/liveness/exec-liveness.yaml
# kubectl create -f docs/user-guide/liveness/http-liveness.yaml
```
Check the status of the pods once they are created:

View File

@ -43,7 +43,7 @@ output every second [counter-pod.yaml](../../examples/blog-logging/counter-pod.y
```
we can run the pod:
```
$ kubectl create -f counter-pod.yaml
$ kubectl create -f ./counter-pod.yaml
pods/counter
```
and then fetch the logs:

View File

@ -78,7 +78,7 @@ spec:
Multiple resources can be created the same way as a single resource:
```bash
$ kubectl create -f nginx-app.yaml
$ kubectl create -f ./nginx-app.yaml
services/my-nginx-svc
replicationcontrollers/my-nginx
```
@ -87,12 +87,12 @@ The resources will be created in the order they appear in the file. Therefore, i
`kubectl create` also accepts multiple `-f` arguments:
```bash
$ kubectl create -f nginx-svc.yaml -f nginx-rc.yaml
$ kubectl create -f ./nginx-svc.yaml -f ./nginx-rc.yaml
```
And a directory can be specified rather than or in addition to individual files:
```bash
$ kubectl create -f nginx/
$ kubectl create -f ./nginx/
```
`kubectl` will read any files with suffixes `.yaml`, `.yml`, or `.json`.
@ -107,7 +107,7 @@ replicationcontrollers/nginx
Resource creation isnt the only operation that `kubectl` can perform in bulk. It can also extract resource names from configuration files in order to perform other operations, in particular to delete the same resources you created:
```bash
$ kubectl delete -f nginx/
$ kubectl delete -f ./nginx/
replicationcontrollers/my-nginx
services/my-nginx-svc
```
@ -126,7 +126,7 @@ services/my-nginx-svc
Because `kubectl` outputs resource names in the same syntax it accepts, its easy to chain operations using `$()` or `xargs`:
```bash
$ kubectl get $(kubectl create -f nginx/ | grep my-nginx)
$ kubectl get $(kubectl create -f ./nginx/ | grep my-nginx)
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
my-nginx nginx nginx app=nginx 2
NAME LABELS SELECTOR IP(S) PORT(S)
@ -158,7 +158,7 @@ and
```
The labels allow us to slice and dice our resources along any dimension specified by a label:
```bash
$ kubectl create -f guestbook-fe.yaml -f redis-master.yaml -f redis-slave.yaml
$ kubectl create -f ./guestbook-fe.yaml -f ./redis-master.yaml -f ./redis-slave.yaml
replicationcontrollers/guestbook-fe
replicationcontrollers/guestbook-redis-master
replicationcontrollers/guestbook-redis-slave
@ -339,7 +339,7 @@ spec:
```
and roll it out:
```bash
$ kubectl rolling-update my-nginx -f nginx-rc.yaml
$ kubectl rolling-update my-nginx -f ./nginx-rc.yaml
Creating my-nginx-v4
At beginning of loop: my-nginx replicas: 4, my-nginx-v4 replicas: 1
Updating my-nginx replicas: 4, my-nginx-v4 replicas: 1
@ -380,10 +380,9 @@ The patch is specified using json.
For more significant changes, you can `get` the resource, edit it, and then `replace` the resource with the updated version:
```bash
$ export TMP=/tmp/nginx.yaml
$ kubectl get rc my-nginx-v4 -o yaml > $TMP
$ emacs $TMP
$ kubectl replace -f $TMP
$ kubectl get rc my-nginx-v4 -o yaml > /tmp/nginx.yaml
$ vi /tmp/nginx.yaml
$ kubectl replace -f /tmp/nginx.yaml
replicationcontrollers/my-nginx-v4
$ rm $TMP
```
@ -392,7 +391,7 @@ The system ensures that you dont clobber changes made by other users or compo
In some cases, you may need to update resource fields that cannot be updated once initialized, or you may just want to make a recursive change immediately, such as to fix broken pods created by a replication controller. To change such fields, use `replace --force`, which deletes and re-creates the resource. In this case, you can simply modify your original configuration file:
```bash
$ kubectl replace -f nginx-rc.yaml --force
$ kubectl replace -f ./nginx-rc.yaml --force
replicationcontrollers/my-nginx-v4
replicationcontrollers/my-nginx-v4
```

View File

@ -129,7 +129,7 @@ More information on the ```finalizers``` field can be found in the namespace [de
Then run:
```
kubectl create -f my-namespace.yaml
kubectl create -f ./my-namespace.yaml
```
### Setting the namespace for a request

View File

@ -91,7 +91,7 @@ data:
```
As with other resources, this secret can be instantiated using `create` and can be viewed with `get`:
```bash
$ kubectl create -f secret.yaml
$ kubectl create -f ./secret.yaml
secrets/mysecret
$ kubectl get secrets
NAME TYPE DATA
@ -154,7 +154,7 @@ $ echo $(cat ~/.dockercfg)
$ cat ~/.dockercfg | base64
eyAiaHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjogeyAiYXV0aCI6ICJabUZyWlhCaGMzTjNiM0prTVRJSyIsICJlbWFpbCI6ICJqZG9lQGV4YW1wbGUuY29tIiB9IH0K
$ cat > image-pull-secret.yaml <<EOF
$ cat > /tmp/image-pull-secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
@ -164,7 +164,7 @@ data:
type: kubernetes.io/dockercfg
EOF
$ kubectl create -f image-pull-secret.yaml
$ kubectl create -f ./image-pull-secret.yaml
secrets/myregistrykey
```
@ -342,7 +342,7 @@ spec:
The message is recorded along with the other state of the last (i.e., most recent) termination:
```bash
$ kubectl create -f pod.yaml
$ kubectl create -f ./pod.yaml
pods/pod-w-message
$ sleep 70
$ kubectl get pods/pod-w-message -o template -t "{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}"

View File

@ -33,7 +33,7 @@ This example will work in a custom namespace to demonstrate the concepts involve
Let's create a new namespace called quota-example:
```shell
$ kubectl create -f namespace.yaml
$ kubectl create -f docs/user-guide/resourcequota/namespace.yaml
$ kubectl get namespaces
NAME LABELS STATUS
default <none> Active
@ -53,7 +53,7 @@ and API resources (pods, services, etc.) that a namespace may consume.
Let's create a simple quota in our namespace:
```shell
$ kubectl create -f quota.yaml --namespace=quota-example
$ kubectl create -f docs/user-guide/resourcequota/quota.yaml --namespace=quota-example
```
Once your quota is applied to a namespace, the system will restrict any creation of content
@ -121,7 +121,7 @@ do not specify any memory usage.
So let's set some default limits for the amount of cpu and memory a pod can consume:
```shell
$ kubectl create -f limits.yaml --namespace=quota-example
$ kubectl create -f docs/user-guide/resourcequota/limits.yaml --namespace=quota-example
limitranges/limits
$ kubectl describe limits limits --namespace=quota-example
Name: limits

View File

@ -62,13 +62,13 @@ default 1
You can create additional serviceAccounts like this:
```
$ cat > serviceaccount.yaml <<EOF
$ cat > /tmp/serviceaccount.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: build-robot
EOF
$ kubectl create -f serviceaccount.json
$ kubectl create -f /tmp/serviceaccount.json
serviceacccounts/build-robot
```

View File

@ -31,7 +31,7 @@ can be code reviewed, producing a more robust, reliable and archival system.
```bash
cd kubernetes
kubectl create -f pod.yaml
kubectl create -f ./pod.yaml
```
Where pod.yaml contains something like:
@ -70,7 +70,7 @@ cluster.
```bash
cd kubernetes
kubectl create -f replication.yaml
kubectl create -f ./replication.yaml
```
Where ```replication.yaml``` contains:

View File

@ -34,7 +34,7 @@ When you create a resource such as pod, and then retrieve the created
resource, a number of the fields of the resource are added.
You can see this at work in the following example:
```
$ cat > original.yaml <<EOF
$ cat > /tmp/original.yaml <<EOF
apiVersion: v1
kind: Pod
metadata:
@ -45,17 +45,17 @@ spec:
image: busybox
restartPolicy: Never
EOF
$ kubectl create -f original.yaml
$ kubectl create -f /tmp/original.yaml
pods/original
$ kubectl get pods/original -o yaml > current.yaml
$ kubectl get pods/original -o yaml > /tmp/current.yaml
pods/original
$ wc -l original.yaml current.yaml
51 current.yaml
9 original.yaml
$ wc -l /tmp/original.yaml /tmp/current.yaml
51 /tmp/current.yaml
9 /tmp/original.yaml
60 total
```
The resource we posted had only 9 lines, but the one we got back had 51 lines.
If you `diff original.yaml current.yaml`, you can see the fields added to the pod.
If you `diff -u /tmp/original.yaml /tmp/current.yaml`, you can see the fields added to the pod.
The system adds fields in several ways:
- Some fields are added synchronously with creation of the resource and some are set asynchronously.
- For example: `metadata.uid` is set synchronously. (Read more about [metadata](../devel/api-conventions.md#metadata)).

View File

@ -27,7 +27,7 @@ Create a volume in the same region as your node add your volume
information in the pod description file aws-ebs-web.yaml then create
the pod:
```shell
$ kubectl create -f aws-ebs-web.yaml
$ kubectl create -f examples/aws_ebs/aws-ebs-web.yaml
```
Add some data to the volume if is empty:
```shell

View File

@ -104,13 +104,13 @@ The important thing to note here is the ```selector```. It is a query over label
Create this service as follows:
```sh
$ kubectl create -f cassandra-service.yaml
$ kubectl create -f examples/cassandra/cassandra-service.yaml
```
Now, as the service is running, we can create the first Cassandra pod using the mentioned specification.
```sh
$ kubectl create -f cassandra.yaml
$ kubectl create -f examples/cassandra/cassandra.yaml
```
After a few moments, you should be able to see the pod running, plus its single container:
@ -208,7 +208,7 @@ Most of this replication controller definition is identical to the Cassandra pod
Create this controller:
```sh
$ kubectl create -f cassandra-controller.yaml
$ kubectl create -f examples/cassandra/cassandra-controller.yaml
```
Now this is actually not that interesting, since we haven't actually done anything new. Now it will get interesting.
@ -267,13 +267,13 @@ For those of you who are impatient, here is the summary of the commands we ran i
```sh
# create a service to track all cassandra nodes
kubectl create -f cassandra-service.yaml
kubectl create -f examples/cassandra/cassandra-service.yaml
# create a single cassandra node
kubectl create -f cassandra.yaml
kubectl create -f examples/cassandra/cassandra.yaml
# create a replication controller to replicate cassandra nodes
kubectl create -f cassandra-controller.yaml
kubectl create -f examples/cassandra/cassandra-controller.yaml
# scale up to 2 nodes
kubectl scale rc cassandra --replicas=2

View File

@ -125,13 +125,13 @@ data:
```
which can be used to create the secret in your namespace:
```
kubectl create -f apiserver-secret.yaml --namespace=mytunes
kubectl create -f examples/elasticsearch/apiserver-secret.yaml --namespace=mytunes
secrets/apiserver-secret
```
Now you are ready to create the replication controller which will then create the pods:
```
$ kubectl create -f music-rc.yaml --namespace=mytunes
$ kubectl create -f examples/elasticsearch/music-rc.yaml --namespace=mytunes
replicationcontrollers/music-db
```
@ -156,7 +156,7 @@ spec:
```
Let's create the service with an external load balancer:
```
$ kubectl create -f music-service.yaml --namespace=mytunes
$ kubectl create -f examples/elasticsearch/music-service.yaml --namespace=mytunes
services/music-server
```

View File

@ -35,7 +35,7 @@ Currently, you can look at:
Example from command line (the DNS lookup looks better from a web browser):
```
$ kubectl create -f pod.json
$ kubectl create -f examples/explorer/pod.json
$ kubectl proxy &
Starting to serve on localhost:8001

View File

@ -93,7 +93,7 @@ spec:
Change to the `<kubernetes>/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running:
```shell
$ kubectl create -f redis-master-controller.yaml
$ kubectl create -f examples/guestbook/redis-master-controller.yaml
replicationcontrollers/redis-master
```
@ -208,7 +208,7 @@ spec:
Create the service by running:
```shell
$ kubectl create -f redis-master-service.yaml
$ kubectl create -f examples/guestbook/redis-master-service.yaml
services/redis-master
```
Then check the list of services, which should include the redis-master:
@ -276,7 +276,7 @@ spec:
and create the replication controller by running:
```shell
$ kubectl create -f redis-slave-controller.yaml
$ kubectl create -f examples/guestbook/redis-slave-controller.yaml
replicationcontrollers/redis-slave
$ kubectl get rc
@ -324,7 +324,7 @@ This time the selector for the service is `name=redis-slave`, because that ident
Now that you have created the service specification, create it in your cluster by running:
```shell
$ kubectl create -f redis-slave-service.yaml
$ kubectl create -f examples/guestbook/redis-slave-service.yaml
services/redis-slave
$ kubectl get services
@ -367,7 +367,7 @@ spec:
Using this file, you can turn up your frontend with:
```shell
$ kubectl create -f frontend-controller.yaml
$ kubectl create -f examples/guestbook/frontend-controller.yaml
replicationcontrollers/frontend
```
@ -476,7 +476,7 @@ To do this, uncomment the `type: LoadBalancer` line in the `frontend-service.yam
Create the service like this:
```shell
$ kubectl create -f frontend-service.yaml
$ kubectl create -f examples/guestbook/frontend-service.yaml
services/frontend
```

View File

@ -69,7 +69,7 @@ The important thing to note here is the `selector`. It is a query over labels, t
Create this service as follows:
```sh
$ kubectl create -f hazelcast-service.yaml
$ kubectl create -f examples/hazelcast/hazelcast-service.yaml
```
### Adding replicated nodes
@ -124,7 +124,7 @@ Last but not least, we set `DNS_DOMAIN` environment variable according to your K
Create this controller:
```sh
$ kubectl create -f hazelcast-controller.yaml
$ kubectl create -f examples/hazelcast/hazelcast-controller.yaml
```
After the controller provisions successfully the pod, you can query the service endpoints:
@ -230,10 +230,10 @@ For those of you who are impatient, here is the summary of the commands we ran i
```sh
# create a service to track all hazelcast nodes
kubectl create -f hazelcast-service.yaml
kubectl create -f examples/hazelcast/hazelcast-service.yaml
# create a replication controller to replicate hazelcast nodes
kubectl create -f hazelcast-controller.yaml
kubectl create -f examples/hazelcast/hazelcast-controller.yaml
# scale up to 2 nodes
kubectl scale rc hazelcast --replicas=2

View File

@ -40,7 +40,7 @@ You need a [running kubernetes cluster](../../docs/getting-started-guides/) for
$ kubectl create -f /tmp/secret.json
secrets/nginxsecret
$ kubectl create -f nginx-app.yaml
$ kubectl create -f examples/https-nginx/nginx-app.yaml
services/nginxsvc
replicationcontrollers/my-nginx

View File

@ -52,7 +52,7 @@ mkfs.ext4 /dev/<name of device>
Once your pod is created, run it on the Kubernetes master:
```console
kubectl create -f your_new_pod.json
kubectl create -f ./your_new_pod.json
```
Here is my command and output:

View File

@ -135,14 +135,14 @@ gcloud compute disks create --size=200GB mongo-disk
Now you can start Mongo using that disk:
```
kubectl create -f mongo-pod.json
kubectl create -f mongo-service.json
kubectl create -f examples/meteor/mongo-pod.json
kubectl create -f examples/meteor/mongo-service.json
```
Wait until Mongo is started completely and then start up your Meteor app:
```
kubectl create -f meteor-controller.json
kubectl create -f meteor-service.json
kubectl create -f examples/meteor/meteor-controller.json
kubectl create -f examples/meteor/meteor-service.json
```
Note that [`meteor-service.json`](meteor-service.json) creates a load balancer, so

View File

@ -122,7 +122,7 @@ Note that we've defined a volume mount for `/var/lib/mysql`, and specified a vol
Once you've edited the file to set your database password, create the pod as follows, where `<kubernetes>` is the path to your Kubernetes installation:
```shell
$ kubectl create -f mysql.yaml
$ kubectl create -f examples/mysql-wordpress-pd/mysql.yaml
```
It may take a short period before the new pod reaches the `Running` state.
@ -171,7 +171,7 @@ spec:
Start the service like this:
```shell
$ kubectl create -f mysql-service.yaml
$ kubectl create -f examples/mysql-wordpress-pd/mysql-service.yaml
```
You can see what services are running via:
@ -221,7 +221,7 @@ spec:
Create the pod:
```shell
$ kubectl create -f wordpress.yaml
$ kubectl create -f examples/mysql-wordpress-pd/wordpress.yaml
```
And list the pods to check that the status of the new pod changes
@ -260,7 +260,7 @@ Note also that we've set the service port to 80. We'll return to that shortly.
Start the service:
```shell
$ kubectl create -f wordpress-service.yaml
$ kubectl create -f examples/mysql-wordpress-pd/wordpress-service.yaml
```
and see it in the list of services:
@ -307,8 +307,8 @@ Set up your WordPress blog and play around with it a bit. Then, take down its p
If you are just experimenting, you can take down and bring up only the pods:
```shell
$ kubectl delete -f wordpress.yaml
$ kubectl delete -f mysql.yaml
$ kubectl delete -f examples/mysql-wordpress-pd/wordpress.yaml
$ kubectl delete -f examples/mysql-wordpress-pd/mysql.yaml
```
When you restart the pods again (using the `create` operation as described above), their services will pick up the new pods based on their labels.

View File

@ -39,7 +39,7 @@ Rethinkdb will discover peer using endpoints provided by kubernetes service,
so first create a service so the following pod can query its endpoint
```shell
$kubectl create -f driver-service.yaml
$kubectl create -f examples/rethinkdb/driver-service.yaml
```
check out:
@ -56,7 +56,7 @@ rethinkdb-driver db=influxdb db=rethinkdb 10.0.27.114 28015/TCP
start fist server in cluster
```shell
$kubectl create -f rc.yaml
$kubectl create -f examples/rethinkdb/rc.yaml
```
Actually, you can start servers as many as you want at one time, just modify the `replicas` in `rc.ymal`
@ -99,8 +99,8 @@ Admin
You need a separate pod (labeled as role:admin) to access Web Admin UI
```shell
kubectl create -f admin-pod.yaml
kubectl create -f admin-service.yaml
kubectl create -f examples/rethinkdb/admin-pod.yaml
kubectl create -f examples/rethinkdb/admin-service.yaml
```
find the service

View File

@ -36,7 +36,7 @@ const (
JSON and YAML formats are accepted.`
create_example = `// Create a pod using the data in pod.json.
$ kubectl create -f pod.json
$ kubectl create -f ./pod.json
// Create a pod based on the JSON passed into stdin.
$ cat pod.json | kubectl create -f -`

View File

@ -43,7 +43,7 @@ Note that the delete command does NOT do resource version checks, so if someone
submits an update to a resource right when you submit a delete, their update
will be lost along with the rest of the resource.`
delete_example = `// Delete a pod using the type and name specified in pod.json.
$ kubectl delete -f pod.json
$ kubectl delete -f ./pod.json
// Delete a pod based on the type and name in the JSON passed into stdin.
$ cat pod.json | kubectl delete -f -

View File

@ -35,13 +35,13 @@ const (
JSON and YAML formats are accepted.`
replace_example = `// Replace a pod using the data in pod.json.
$ kubectl replace -f pod.json
$ kubectl replace -f ./pod.json
// Replace a pod based on the JSON passed into stdin.
$ cat pod.json | kubectl replace -f -
// Force replace, delete and then re-create the resource
kubectl replace --force -f pod.json`
kubectl replace --force -f ./pod.json`
)
func NewCmdReplace(f *cmdutil.Factory, out io.Writer) *cobra.Command {