mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Merge pull request #11839 from eparis/munger-rewrite
Major rewrite for docs munger
This commit is contained in:
commit
fd7a48f379
22
cmd/mungedocs/README.md
Normal file
22
cmd/mungedocs/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# Documentation Mungers
|
||||
|
||||
Basically this is like lint/gofmt for md docs.
|
||||
|
||||
It basically does the following:
|
||||
- iterate over all files in the given doc root.
|
||||
- for each file split it into a slice (mungeLines) of lines (mungeLine)
|
||||
- a mungeline has metadata about each line typically determined by a 'fast' regex.
|
||||
- metadata contains things like 'is inside a preformmatted block'
|
||||
- contains a markdown header
|
||||
- has a link to another file
|
||||
- etc..
|
||||
- if you have a really slow regex with a lot of backtracking you might want to write a fast one to limit how often you run the slow one.
|
||||
- each munger is then called in turn
|
||||
- they are given the mungeLines
|
||||
- they create an entirely new set of mungeLines with their modifications
|
||||
- the new set is returned
|
||||
- the new set is then fed into the next munger.
|
||||
- in the end we might commit the end mungeLines to the file or not (--verify)
|
||||
|
||||
|
||||
[]()
|
@ -17,43 +17,42 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
beginMungeExp = regexp.QuoteMeta(beginMungeTag("GENERATED_ANALYTICS"))
|
||||
endMungeExp = regexp.QuoteMeta(endMungeTag("GENERATED_ANALYTICS"))
|
||||
analyticsExp = regexp.QuoteMeta("[ +
|
||||
"[^?]*" +
|
||||
regexp.QuoteMeta("?pixel)]()")
|
||||
const analyticsMungeTag = "GENERATED_ANALYTICS"
|
||||
const analyticsLinePrefix = "[
|
||||
)
|
||||
|
||||
// This adds the analytics link to every .md file.
|
||||
func checkAnalytics(fileName string, fileBytes []byte) (output []byte, err error) {
|
||||
fileName = makeRepoRelative(fileName)
|
||||
desired := fmt.Sprintf(`
|
||||
|
||||
|
||||
`+beginMungeTag("GENERATED_ANALYTICS")+`
|
||||
[]()
|
||||
`+endMungeTag("GENERATED_ANALYTICS")+`
|
||||
`, fileName)
|
||||
if !analyticsRE.MatchString(desired) {
|
||||
fmt.Printf("%q does not match %q", analyticsRE.String(), desired)
|
||||
os.Exit(1)
|
||||
func updateAnalytics(fileName string, mlines mungeLines) (mungeLines, error) {
|
||||
var out mungeLines
|
||||
fileName, err := makeRepoRelative(fileName, fileName)
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
//output = replaceNonPreformattedRegexp(fileBytes, analyticsRE, func(in []byte) []byte {
|
||||
output = analyticsRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte {
|
||||
return []byte{}
|
||||
})
|
||||
output = bytes.TrimRight(output, "\n")
|
||||
output = append(output, []byte(desired)...)
|
||||
return output, nil
|
||||
|
||||
link := fmt.Sprintf(analyticsLinePrefix+"%s?pixel)]()", fileName)
|
||||
insertLines := getMungeLines(link)
|
||||
mlines, err = removeMacroBlock(analyticsMungeTag, mlines)
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
|
||||
// Remove floating analytics links not surrounded by the munge tags.
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted || mline.header || mline.beginTag || mline.endTag {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(mline.data, analyticsLinePrefix) {
|
||||
continue
|
||||
}
|
||||
out = append(out, mline)
|
||||
}
|
||||
out = appendMacroBlock(out, analyticsMungeTag)
|
||||
out, err = updateMacroBlock(out, analyticsMungeTag, insertLines)
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
@ -23,67 +23,71 @@ import (
|
||||
)
|
||||
|
||||
func TestAnalytics(t *testing.T) {
|
||||
b := beginMungeTag("GENERATED_ANALYTICS")
|
||||
e := endMungeTag("GENERATED_ANALYTICS")
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"aoeu",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"aoeu" + "\n" + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
e + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
"[]()",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
e + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
e + "\n",
|
||||
"aoeu" + "\n" + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
e + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" + "\n" +
|
||||
"[]()" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
e + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
e + "\n"},
|
||||
{
|
||||
"prefix" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") +
|
||||
e +
|
||||
"\n" + "suffix",
|
||||
"prefix" + "\n" + "suffix" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"prefix" + "\n" + "suffix" + "\n" + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
e + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
||||
e + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
b + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
e + "\n"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
out, err := checkAnalytics("path/to/file-name.md", []byte(c.in))
|
||||
for i, c := range cases {
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
out, err := updateAnalytics("path/to/file-name.md", in)
|
||||
assert.NoError(t, err)
|
||||
if string(out) != c.out {
|
||||
t.Errorf("Expected \n\n%v\n\n but got \n\n%v\n\n", c.out, string(out))
|
||||
if !expected.Equal(out) {
|
||||
t.Errorf("Case %d Expected \n\n%v\n\n but got \n\n%v\n\n", i, expected.String(), out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,15 +17,17 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const exampleMungeTag = "EXAMPLE"
|
||||
const exampleToken = "EXAMPLE"
|
||||
|
||||
const exampleLineStart = "<!-- BEGIN MUNGE: EXAMPLE"
|
||||
|
||||
var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", exampleToken, `(([^ ])*.(yaml|json))`)))
|
||||
|
||||
// syncExamples updates all examples in markdown file.
|
||||
//
|
||||
@ -43,75 +45,70 @@ const exampleMungeTag = "EXAMPLE"
|
||||
//
|
||||
// [Download example](../../examples/guestbook/frontend-controller.yaml)
|
||||
// <!-- END MUNGE: EXAMPLE -->
|
||||
func syncExamples(filePath string, markdown []byte) ([]byte, error) {
|
||||
// find the example syncer begin tag
|
||||
header := beginMungeTag(fmt.Sprintf("%s %s", exampleMungeTag, `(([^ ])*.(yaml|json))`))
|
||||
exampleLinkRE := regexp.MustCompile(header)
|
||||
lines := splitLines(markdown)
|
||||
updatedMarkdown, err := updateExampleMacroBlock(filePath, lines, exampleLinkRE, endMungeTag(exampleMungeTag))
|
||||
if err != nil {
|
||||
return updatedMarkdown, err
|
||||
func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||
var err error
|
||||
type exampleTag struct {
|
||||
token string
|
||||
linkText string
|
||||
fileType string
|
||||
}
|
||||
return updatedMarkdown, nil
|
||||
exampleTags := []exampleTag{}
|
||||
|
||||
// collect all example Tags
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted || !mline.beginTag {
|
||||
continue
|
||||
}
|
||||
line := mline.data
|
||||
if !strings.HasPrefix(line, exampleLineStart) {
|
||||
continue
|
||||
}
|
||||
match := exampleMungeTagRE.FindStringSubmatch(line)
|
||||
if len(match) < 4 {
|
||||
err = fmt.Errorf("Found unparsable EXAMPLE munge line %v", line)
|
||||
return mlines, err
|
||||
}
|
||||
tag := exampleTag{
|
||||
token: exampleToken + " " + match[1],
|
||||
linkText: match[1],
|
||||
fileType: match[3],
|
||||
}
|
||||
exampleTags = append(exampleTags, tag)
|
||||
}
|
||||
// update all example Tags
|
||||
for _, tag := range exampleTags {
|
||||
example, err := exampleContent(filePath, tag.linkText, tag.fileType)
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
mlines, err = updateMacroBlock(mlines, tag.token, example)
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
}
|
||||
return mlines, nil
|
||||
}
|
||||
|
||||
// exampleContent retrieves the content of the file at linkPath
|
||||
func exampleContent(filePath, linkPath, fileType string) (content string, err error) {
|
||||
realRoot := path.Join(*rootDir, *repoRoot) + "/"
|
||||
path := path.Join(realRoot, path.Dir(filePath), linkPath)
|
||||
dat, err := ioutil.ReadFile(path)
|
||||
func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) {
|
||||
repoRel, err := makeRepoRelative(linkPath, filePath)
|
||||
if err != nil {
|
||||
return content, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileRel, err := makeFileRelative(linkPath, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(repoRel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// remove leading and trailing spaces and newlines
|
||||
trimmedFileContent := strings.TrimSpace(string(dat))
|
||||
content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, linkPath)
|
||||
return
|
||||
}
|
||||
|
||||
// updateExampleMacroBlock sync the yaml/json example between begin tag and end tag
|
||||
func updateExampleMacroBlock(filePath string, lines []string, beginMarkExp *regexp.Regexp, endMark string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
betweenBeginAndEnd := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if beginMarkExp.Match([]byte(trimmedLine)) {
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = true
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
match := beginMarkExp.FindStringSubmatch(line)
|
||||
if len(match) < 4 {
|
||||
return nil, fmt.Errorf("failed to parse the link in example header")
|
||||
}
|
||||
// match[0] is the entire expression; [1] is the link text and [3] is the file type (yaml or json).
|
||||
linkText := match[1]
|
||||
fileType := match[3]
|
||||
example, err := exampleContent(filePath, linkText, fileType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buffer.WriteString(example)
|
||||
} else if trimmedLine == endMark {
|
||||
if !betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
||||
}
|
||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
||||
buffer.WriteString("\n")
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
betweenBeginAndEnd = false
|
||||
} else {
|
||||
if !betweenBeginAndEnd {
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel)
|
||||
out := getMungeLines(content)
|
||||
return out, nil
|
||||
}
|
||||
|
@ -35,24 +35,27 @@ spec:
|
||||
- containerPort: 80
|
||||
`
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||
},
|
||||
{
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||
},
|
||||
}
|
||||
repoRoot = ""
|
||||
for _, c := range cases {
|
||||
actual, err := syncExamples("mungedocs/filename.md", []byte(c.in))
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := syncExamples("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected example \n'%v' but got \n'%v'", c.out, string(actual))
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Expected example \n'%q' but got \n'%q'", expected.String(), actual.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,53 +19,56 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
|
||||
var whitespaceRegex = regexp.MustCompile(`^\s*$`)
|
||||
|
||||
func fixHeaderLines(fileBytes []byte) []byte {
|
||||
lines := splitLines(fileBytes)
|
||||
out := []string{}
|
||||
for i := range lines {
|
||||
matches := headerRegex.FindStringSubmatch(lines[i])
|
||||
if matches == nil {
|
||||
out = append(out, lines[i])
|
||||
continue
|
||||
}
|
||||
if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) {
|
||||
out = append(out, "")
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2]))
|
||||
if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) {
|
||||
out = append(out, "")
|
||||
func fixHeaderLine(mlines mungeLines, newlines mungeLines, linenum int) mungeLines {
|
||||
var out mungeLines
|
||||
|
||||
mline := mlines[linenum]
|
||||
line := mlines[linenum].data
|
||||
|
||||
matches := headerRegex.FindStringSubmatch(line)
|
||||
if matches == nil {
|
||||
out = append(out, mline)
|
||||
return out
|
||||
}
|
||||
|
||||
// There must be a blank line before the # (unless first line in file)
|
||||
if linenum != 0 {
|
||||
newlen := len(newlines)
|
||||
if newlines[newlen-1].data != "" {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
}
|
||||
final := strings.Join(out, "\n")
|
||||
// Preserve the end of the file.
|
||||
if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' {
|
||||
final += "\n"
|
||||
|
||||
// There must be a space AFTER the ##'s
|
||||
newline := fmt.Sprintf("%s %s", matches[1], matches[2])
|
||||
newmline := newMungeLine(newline)
|
||||
out = append(out, newmline)
|
||||
|
||||
// The next line needs to be a blank line (unless last line in file)
|
||||
if len(mlines) > linenum+1 && mlines[linenum+1].data != "" {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
return []byte(final)
|
||||
return out
|
||||
}
|
||||
|
||||
// Header lines need whitespace around them and after the #s.
|
||||
func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
fbs := splitByPreformatted(fileBytes)
|
||||
fbs = append([]fileBlock{{false, []byte{}}}, fbs...)
|
||||
fbs = append(fbs, fileBlock{false, []byte{}})
|
||||
|
||||
for i := range fbs {
|
||||
block := &fbs[i]
|
||||
if block.preformatted {
|
||||
func updateHeaderLines(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||
var out mungeLines
|
||||
for i, mline := range mlines {
|
||||
if mline.preformatted {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
block.data = fixHeaderLines(block.data)
|
||||
if !mline.header {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
newLines := fixHeaderLine(mlines, out, i)
|
||||
out = append(out, newLines...)
|
||||
}
|
||||
output := []byte{}
|
||||
for _, block := range fbs {
|
||||
output = append(output, block.data...)
|
||||
}
|
||||
return output, nil
|
||||
return out, nil
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
|
||||
func TestHeaderLines(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{
|
||||
@ -62,10 +62,12 @@ func TestHeaderLines(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
actual, err := checkHeaderLines("filename.md", []byte(c.in))
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := updateHeaderLines("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if string(actual) != c.out {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
|
||||
if !actual.Equal(expected) {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,29 +25,25 @@ import (
|
||||
|
||||
// Looks for lines that have kubectl commands with -f flags and files that
|
||||
// don't exist.
|
||||
func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) {
|
||||
inside := false
|
||||
lines := splitLines(markdown)
|
||||
errors := []string{}
|
||||
for i := range lines {
|
||||
if strings.HasPrefix(lines[i], "```") {
|
||||
inside = !inside
|
||||
func updateKubectlFileTargets(file string, mlines mungeLines) (mungeLines, error) {
|
||||
var errors []string
|
||||
for i, mline := range mlines {
|
||||
if !mline.preformatted {
|
||||
continue
|
||||
}
|
||||
if inside {
|
||||
if err := lookForKubectl(lines, i); err != nil {
|
||||
errors = append(errors, err.Error())
|
||||
}
|
||||
if err := lookForKubectl(mline.data, i); err != nil {
|
||||
errors = append(errors, err.Error())
|
||||
}
|
||||
}
|
||||
err := error(nil)
|
||||
if len(errors) != 0 {
|
||||
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
||||
}
|
||||
return markdown, err
|
||||
return mlines, err
|
||||
}
|
||||
|
||||
func lookForKubectl(lines []string, lineNum int) error {
|
||||
fields := strings.Fields(lines[lineNum])
|
||||
func lookForKubectl(line string, lineNum int) error {
|
||||
fields := strings.Fields(line)
|
||||
for i := range fields {
|
||||
if fields[i] == "kubectl" {
|
||||
return gotKubectl(lineNum, fields, i)
|
||||
@ -56,26 +52,26 @@ func lookForKubectl(lines []string, lineNum int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func gotKubectl(line int, fields []string, fieldNum int) error {
|
||||
func gotKubectl(lineNum int, fields []string, fieldNum int) error {
|
||||
for i := fieldNum + 1; i < len(fields); i++ {
|
||||
switch fields[i] {
|
||||
case "create", "update", "replace", "delete":
|
||||
return gotCommand(line, fields, i)
|
||||
return gotCommand(lineNum, fields, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gotCommand(line int, fields []string, fieldNum int) error {
|
||||
func gotCommand(lineNum int, fields []string, fieldNum int) error {
|
||||
for i := fieldNum + 1; i < len(fields); i++ {
|
||||
if strings.HasPrefix(fields[i], "-f") {
|
||||
return gotDashF(line, fields, i)
|
||||
return gotDashF(lineNum, fields, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gotDashF(line int, fields []string, fieldNum int) error {
|
||||
func gotDashF(lineNum int, fields []string, fieldNum int) error {
|
||||
target := ""
|
||||
if fields[fieldNum] == "-f" {
|
||||
if fieldNum+1 == len(fields) {
|
||||
@ -112,9 +108,9 @@ func gotDashF(line int, fields []string, fieldNum int) error {
|
||||
}
|
||||
|
||||
// If we got here we expect the file to exist.
|
||||
_, err := os.Stat(path.Join(*rootDir, *repoRoot, target))
|
||||
_, err := os.Stat(path.Join(repoRoot, target))
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("%d: target file %q does not exist", line, target)
|
||||
return fmt.Errorf("%d: target file %q does not exist", lineNum, target)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ func TestKubectlDashF(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
*rootDir = ""
|
||||
*repoRoot = ""
|
||||
_, err := checkKubectlFileTargets("filename.md", []byte(c.in))
|
||||
repoRoot = ""
|
||||
in := getMungeLines(c.in)
|
||||
_, err := updateKubectlFileTargets("filename.md", in)
|
||||
if err != nil && c.ok {
|
||||
t.Errorf("case[%d]: expected success, got %v", i, err)
|
||||
}
|
||||
|
@ -29,20 +29,20 @@ var (
|
||||
// Finds markdown links of the form [foo](bar "alt-text").
|
||||
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
|
||||
// Splits the link target into link target and alt-text.
|
||||
altTextRE = regexp.MustCompile(`(.*)( ".*")`)
|
||||
altTextRE = regexp.MustCompile(`([^)]*)( ".*")`)
|
||||
)
|
||||
|
||||
// checkLinks assumes fileBytes has links in markdown syntax, and verifies that
|
||||
// any relative links actually point to files that exist.
|
||||
func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
dir := path.Dir(filePath)
|
||||
errors := []string{}
|
||||
|
||||
output := replaceNonPreformattedRegexp(fileBytes, linkRE, func(in []byte) (out []byte) {
|
||||
match := linkRE.FindSubmatch(in)
|
||||
// match[0] is the entire expression; [1] is the visible text and [2] is the link text.
|
||||
visibleText := string(match[1])
|
||||
linkText := string(match[2])
|
||||
func processLink(in string, filePath string) (string, error) {
|
||||
var err error
|
||||
out := linkRE.ReplaceAllStringFunc(in, func(in string) string {
|
||||
match := linkRE.FindStringSubmatch(in)
|
||||
if match == nil {
|
||||
err = fmt.Errorf("Detected this line had a link, but unable to parse, %v", in)
|
||||
return ""
|
||||
}
|
||||
// match[0] is the entire expression;
|
||||
visibleText := match[1]
|
||||
linkText := match[2]
|
||||
altText := ""
|
||||
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
|
||||
linkText = parts[1]
|
||||
@ -54,13 +54,10 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
linkText = strings.Trim(linkText, "\n")
|
||||
linkText = strings.Trim(linkText, " ")
|
||||
|
||||
u, err := url.Parse(linkText)
|
||||
if err != nil {
|
||||
errors = append(
|
||||
errors,
|
||||
fmt.Sprintf("link %q is unparsable: %v", linkText, err),
|
||||
)
|
||||
return in
|
||||
u, terr := url.Parse(linkText)
|
||||
if terr != nil {
|
||||
err = fmt.Errorf("link %q is unparsable: %v", linkText, terr)
|
||||
return ""
|
||||
}
|
||||
|
||||
if u.Host != "" && u.Host != "github.com" {
|
||||
@ -72,10 +69,8 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
|
||||
newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
|
||||
if !targetExists {
|
||||
errors = append(
|
||||
errors,
|
||||
fmt.Sprintf("%q: target not found", linkText),
|
||||
)
|
||||
err = fmt.Errorf("%q: target not found", linkText)
|
||||
return ""
|
||||
}
|
||||
u.Path = newPath
|
||||
if strings.HasPrefix(u.Path, "/") {
|
||||
@ -89,11 +84,16 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
// Make the visible text show the absolute path if it's
|
||||
// not nested in or beneath the current directory.
|
||||
if strings.HasPrefix(u.Path, "..") {
|
||||
suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path))
|
||||
dir := path.Dir(filePath)
|
||||
suggestedVisibleText, err = makeRepoRelative(path.Join(dir, u.Path), filePath)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
} else {
|
||||
suggestedVisibleText = u.Path
|
||||
}
|
||||
if unescaped, err := url.QueryUnescape(u.String()); err != nil {
|
||||
var unescaped string
|
||||
if unescaped, err = url.QueryUnescape(u.String()); err != nil {
|
||||
// Remove %28 type stuff, be nice to humans.
|
||||
// And don't fight with the toc generator.
|
||||
linkText = unescaped
|
||||
@ -107,18 +107,37 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
visibleText = suggestedVisibleText
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText))
|
||||
return fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)
|
||||
})
|
||||
if out == "" {
|
||||
return in, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// updateLinks assumes lines has links in markdown syntax, and verifies that
|
||||
// any relative links actually point to files that exist.
|
||||
func updateLinks(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||
var out mungeLines
|
||||
errors := []string{}
|
||||
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted || !mline.link {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
line, err := processLink(mline.data, filePath)
|
||||
if err != nil {
|
||||
errors = append(errors, err.Error())
|
||||
}
|
||||
ml := newMungeLine(line)
|
||||
out = append(out, ml)
|
||||
}
|
||||
err := error(nil)
|
||||
if len(errors) != 0 {
|
||||
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
||||
}
|
||||
return output, err
|
||||
}
|
||||
|
||||
func makeRepoRelative(filePath string) string {
|
||||
realRoot := path.Join(*rootDir, *repoRoot) + "/"
|
||||
return strings.TrimPrefix(filePath, realRoot)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// We have to append together before path.Clean will be able to tell that stuff
|
||||
|
76
cmd/mungedocs/links_test.go
Normal file
76
cmd/mungedocs/links_test.go
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var _ = fmt.Printf
|
||||
|
||||
func TestBadLinks(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
}{
|
||||
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/NOTREADME.md)"},
|
||||
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/NOTREADME.md)"},
|
||||
{"[NOTREADME](../NOTREADME.md)"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
in := getMungeLines(c.in)
|
||||
_, err := updateLinks("filename.md", in)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
func TestGoodLinks(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/README.md)",
|
||||
"[README](README.md)"},
|
||||
{"[README](../README.md)",
|
||||
"[README](README.md)"},
|
||||
{"[README](https://lwn.net)",
|
||||
"[README](https://lwn.net)"},
|
||||
// _ to -
|
||||
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/devel/cli_roadmap.md)",
|
||||
"[README](../../docs/devel/cli-roadmap.md)"},
|
||||
// - to _
|
||||
{"[README](../../docs/devel/api-changes.md)",
|
||||
"[README](../../docs/devel/api_changes.md)"},
|
||||
|
||||
// Does this even make sense? i dunno
|
||||
{"[README](/docs/README.md)",
|
||||
"[README](https://github.com/docs/README.md)"},
|
||||
{"[README](/GoogleCloudPlatform/kubernetes/tree/master/docs/README.md)",
|
||||
"[README](../../docs/README.md)"},
|
||||
}
|
||||
for i, c := range cases {
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := updateLinks("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if !actual.Equal(expected) {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
|
||||
}
|
||||
}
|
||||
}
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -30,28 +29,31 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
|
||||
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
|
||||
repoRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
|
||||
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
|
||||
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
|
||||
// "repo-root" seems like a dumb name, this is the relative path (from rootDir) to get to the repoRoot
|
||||
relRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
|
||||
It's done this way so that generally you just have to set --root-dir.
|
||||
Examples:
|
||||
* --root-dir=docs/ --repo-root=.. means the repository root is ./
|
||||
* --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/
|
||||
* --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`)
|
||||
skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList)
|
||||
repoRoot string
|
||||
|
||||
ErrChangesNeeded = errors.New("mungedocs: changes required")
|
||||
|
||||
// All of the munge operations to perform.
|
||||
// TODO: allow selection from command line. (e.g., just check links in the examples directory.)
|
||||
allMunges = []munge{
|
||||
{"remove-whitespace", updateWhitespace},
|
||||
{"table-of-contents", updateTOC},
|
||||
{"unversioned-warning", updateUnversionedWarning},
|
||||
{"check-links", checkLinks},
|
||||
{"blank-lines-surround-preformatted", checkPreformatted},
|
||||
{"header-lines", checkHeaderLines},
|
||||
{"analytics", checkAnalytics},
|
||||
{"kubectl-dash-f", checkKubectlFileTargets},
|
||||
{"md-links", updateLinks},
|
||||
{"blank-lines-surround-preformatted", updatePreformatted},
|
||||
{"header-lines", updateHeaderLines},
|
||||
{"analytics", updateAnalytics},
|
||||
{"kubectl-dash-f", updateKubectlFileTargets},
|
||||
{"sync-examples", syncExamples},
|
||||
}
|
||||
availableMungeList = func() string {
|
||||
@ -68,7 +70,7 @@ Examples:
|
||||
// data into a new byte array and return that.
|
||||
type munge struct {
|
||||
name string
|
||||
fn func(filePath string, before []byte) (after []byte, err error)
|
||||
fn func(filePath string, mlines mungeLines) (after mungeLines, err error)
|
||||
}
|
||||
|
||||
type fileProcessor struct {
|
||||
@ -90,12 +92,14 @@ func (f fileProcessor) visit(path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mungeLines := getMungeLines(string(fileBytes))
|
||||
|
||||
modificationsMade := false
|
||||
errFound := false
|
||||
filePrinted := false
|
||||
for _, munge := range f.munges {
|
||||
after, err := munge.fn(path, fileBytes)
|
||||
if err != nil || !bytes.Equal(after, fileBytes) {
|
||||
after, err := munge.fn(path, mungeLines)
|
||||
if err != nil || !after.Equal(mungeLines) {
|
||||
if !filePrinted {
|
||||
fmt.Printf("%s\n----\n", path)
|
||||
filePrinted = true
|
||||
@ -110,7 +114,7 @@ func (f fileProcessor) visit(path string) error {
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
fileBytes = after
|
||||
mungeLines = after
|
||||
}
|
||||
|
||||
// Write out new file with any changes.
|
||||
@ -119,7 +123,7 @@ func (f fileProcessor) visit(path string) error {
|
||||
// We're not allowed to make changes.
|
||||
return ErrChangesNeeded
|
||||
}
|
||||
ioutil.WriteFile(path, fileBytes, 0644)
|
||||
ioutil.WriteFile(path, mungeLines.Bytes(), 0644)
|
||||
}
|
||||
if errFound {
|
||||
return ErrChangesNeeded
|
||||
@ -165,6 +169,7 @@ func wantedMunges() (filtered []munge) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
flag.Parse()
|
||||
|
||||
if *rootDir == "" {
|
||||
@ -172,11 +177,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Split the root dir of "foo/docs" into "foo" and "docs". We
|
||||
// chdir into "foo" and walk "docs" so the walk is always at a
|
||||
// relative path.
|
||||
stem, leaf := path.Split(strings.TrimRight(*rootDir, "/"))
|
||||
if err := os.Chdir(stem); err != nil {
|
||||
repoRoot = path.Join(*rootDir, *relRoot)
|
||||
repoRoot, err = filepath.Abs(repoRoot)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
@ -194,7 +197,7 @@ func main() {
|
||||
// changes needed, exit 1 if manual changes are needed.
|
||||
var changesNeeded bool
|
||||
|
||||
err := filepath.Walk(leaf, newWalkFunc(&fp, &changesNeeded))
|
||||
err = filepath.Walk(*rootDir, newWalkFunc(&fp, &changesNeeded))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
os.Exit(2)
|
||||
|
@ -16,40 +16,26 @@ limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Blocks of ``` need to have blank lines on both sides or they don't look
|
||||
// right in HTML.
|
||||
func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
f := splitByPreformatted(fileBytes)
|
||||
f = append(fileBlocks{{false, []byte{}}}, f...)
|
||||
f = append(f, fileBlock{false, []byte{}})
|
||||
|
||||
output := []byte(nil)
|
||||
for i := 1; i < len(f)-1; i++ {
|
||||
prev := &f[i-1]
|
||||
block := &f[i]
|
||||
next := &f[i+1]
|
||||
if !block.preformatted {
|
||||
continue
|
||||
}
|
||||
neededSuffix := []byte("\n\n")
|
||||
for !bytes.HasSuffix(prev.data, neededSuffix) {
|
||||
prev.data = append(prev.data, '\n')
|
||||
}
|
||||
for !bytes.HasSuffix(block.data, neededSuffix) {
|
||||
block.data = append(block.data, '\n')
|
||||
if bytes.HasPrefix(next.data, []byte("\n")) {
|
||||
// don't change the number of newlines unless needed.
|
||||
next.data = next.data[1:]
|
||||
if len(next.data) == 0 {
|
||||
f = append(f[:i+1], f[i+2:]...)
|
||||
}
|
||||
func updatePreformatted(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||
var out mungeLines
|
||||
inpreformat := false
|
||||
for i, mline := range mlines {
|
||||
if !inpreformat && mline.preformatted {
|
||||
if i == 0 || out[len(out)-1].data != "" {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
// start of a preformat block
|
||||
inpreformat = true
|
||||
}
|
||||
out = append(out, mline)
|
||||
if inpreformat && !mline.preformatted {
|
||||
if i >= len(mlines)-2 || mlines[i+1].data != "" {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
inpreformat = false
|
||||
}
|
||||
}
|
||||
for _, block := range f {
|
||||
output = append(output, block.data...)
|
||||
}
|
||||
return output, nil
|
||||
return out, nil
|
||||
}
|
||||
|
57
cmd/mungedocs/preformatted_test.go
Normal file
57
cmd/mungedocs/preformatted_test.go
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPreformatted(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{
|
||||
"```\nbob\n```",
|
||||
"\n```\nbob\n```\n\n",
|
||||
},
|
||||
{
|
||||
"```\nbob\n```\n```\nnotbob\n```\n",
|
||||
"\n```\nbob\n```\n\n```\nnotbob\n```\n\n",
|
||||
},
|
||||
{
|
||||
"```bob```\n",
|
||||
"```bob```\n",
|
||||
},
|
||||
{
|
||||
" ```\n bob\n ```",
|
||||
"\n ```\n bob\n ```\n\n",
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := updatePreformatted("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if !actual.Equal(expected) {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
|
||||
}
|
||||
}
|
||||
}
|
@ -17,8 +17,6 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
@ -26,6 +24,8 @@ import (
|
||||
|
||||
const tocMungeTag = "GENERATED_TOC"
|
||||
|
||||
var r = regexp.MustCompile("[^A-Za-z0-9-]")
|
||||
|
||||
// inserts/updates a table of contents in markdown file.
|
||||
//
|
||||
// First, builds a ToC.
|
||||
@ -33,15 +33,11 @@ const tocMungeTag = "GENERATED_TOC"
|
||||
// the ToC, thereby updating any previously inserted ToC.
|
||||
//
|
||||
// TODO(erictune): put this in own package with tests
|
||||
func updateTOC(filePath string, markdown []byte) ([]byte, error) {
|
||||
toc, err := buildTOC(markdown)
|
||||
func updateTOC(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||
toc := buildTOC(mlines)
|
||||
updatedMarkdown, err := updateMacroBlock(mlines, tocMungeTag, toc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lines := splitLines(markdown)
|
||||
updatedMarkdown, err := updateMacroBlock(lines, beginMungeTag(tocMungeTag), endMungeTag(tocMungeTag), string(toc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return mlines, err
|
||||
}
|
||||
return updatedMarkdown, nil
|
||||
}
|
||||
@ -52,24 +48,19 @@ func updateTOC(filePath string, markdown []byte) ([]byte, error) {
|
||||
// and builds a table of contents from those. Assumes bookmarks for those will be
|
||||
// like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces.
|
||||
// builds the ToC.
|
||||
func buildTOC(markdown []byte) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("\n")
|
||||
scanner := bufio.NewScanner(bytes.NewReader(markdown))
|
||||
inBlockQuotes := false
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
match, err := regexp.Match("^```", []byte(line))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
inBlockQuotes = !inBlockQuotes
|
||||
|
||||
func buildTOC(mlines mungeLines) mungeLines {
|
||||
var out mungeLines
|
||||
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted || !mline.header {
|
||||
continue
|
||||
}
|
||||
if inBlockQuotes {
|
||||
continue
|
||||
// Add a blank line after the munge start tag
|
||||
if len(out) == 0 {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
line := mline.data
|
||||
noSharps := strings.TrimLeft(line, "#")
|
||||
numSharps := len(line) - len(noSharps)
|
||||
heading := strings.Trim(noSharps, " \n")
|
||||
@ -77,16 +68,15 @@ func buildTOC(markdown []byte) ([]byte, error) {
|
||||
indent := strings.Repeat(" ", numSharps-1)
|
||||
bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1)
|
||||
// remove symbols (except for -) in bookmarks
|
||||
r := regexp.MustCompile("[^A-Za-z0-9-]")
|
||||
bookmark = r.ReplaceAllString(bookmark, "")
|
||||
tocLine := fmt.Sprintf("%s- [%s](#%s)\n", indent, heading, bookmark)
|
||||
buffer.WriteString(tocLine)
|
||||
tocLine := fmt.Sprintf("%s- [%s](#%s)", indent, heading, bookmark)
|
||||
out = append(out, newMungeLine(tocLine))
|
||||
}
|
||||
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return []byte{}, err
|
||||
// Add a blank line before the munge end tag
|
||||
if len(out) != 0 {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
return out
|
||||
}
|
||||
|
@ -24,37 +24,38 @@ import (
|
||||
|
||||
func Test_buildTOC(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", "\n"},
|
||||
{"Lorem ipsum\ndolor sit amet\n", "\n"},
|
||||
{"", ""},
|
||||
{"Lorem ipsum\ndolor sit amet\n", ""},
|
||||
{
|
||||
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
|
||||
},
|
||||
{
|
||||
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
|
||||
},
|
||||
{
|
||||
"# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n\n",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := buildTOC([]byte(c.in))
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual))
|
||||
for i, c := range cases {
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual := buildTOC(in)
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Case[%d] Expected TOC '%v' but got '%v'", i, expected.String(), actual.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_updateTOC(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{
|
||||
@ -67,10 +68,12 @@ func Test_updateTOC(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := updateTOC("filename.md", []byte(c.in))
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := updateTOC("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual))
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Expected TOC '%v' but got '%v'", expected.String(), actual.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,10 +20,7 @@ import "fmt"
|
||||
|
||||
const unversionedWarningTag = "UNVERSIONED_WARNING"
|
||||
|
||||
var beginUnversionedWarning = beginMungeTag(unversionedWarningTag)
|
||||
var endUnversionedWarning = endMungeTag(unversionedWarningTag)
|
||||
|
||||
const unversionedWarningFmt = `
|
||||
const unversionedWarningPre = `
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
@ -44,7 +41,11 @@ refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/%s).
|
||||
`
|
||||
|
||||
const unversionedWarningFmt = `[here](http://releases.k8s.io/release-1.0/%s).`
|
||||
|
||||
const unversionedWarningPost = `
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
@ -52,21 +53,31 @@ Documentation for other releases can be found at
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
`
|
||||
|
||||
func makeUnversionedWarning(fileName string) string {
|
||||
return fmt.Sprintf(unversionedWarningFmt, fileName)
|
||||
func makeUnversionedWarning(fileName string) mungeLines {
|
||||
insert := unversionedWarningPre + fmt.Sprintf(unversionedWarningFmt, fileName) + unversionedWarningPost
|
||||
return getMungeLines(insert)
|
||||
}
|
||||
|
||||
// inserts/updates a warning for unversioned docs
|
||||
func updateUnversionedWarning(file string, markdown []byte) ([]byte, error) {
|
||||
lines := splitLines(markdown)
|
||||
if hasLine(lines, "<!-- TAG IS_VERSIONED -->") {
|
||||
func updateUnversionedWarning(file string, mlines mungeLines) (mungeLines, error) {
|
||||
file, err := makeRepoRelative(file, file)
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
if hasLine(mlines, "<!-- TAG IS_VERSIONED -->") {
|
||||
// No warnings on release branches
|
||||
return markdown, nil
|
||||
return mlines, nil
|
||||
}
|
||||
if !hasMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning) {
|
||||
lines = append([]string{beginUnversionedWarning, endUnversionedWarning}, lines...)
|
||||
if !hasMacroBlock(mlines, unversionedWarningTag) {
|
||||
mlines = prependMacroBlock(unversionedWarningTag, mlines)
|
||||
}
|
||||
return updateMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning, makeUnversionedWarning(file))
|
||||
|
||||
mlines, err = updateMacroBlock(mlines, unversionedWarningTag, makeUnversionedWarning(file))
|
||||
if err != nil {
|
||||
return mlines, err
|
||||
}
|
||||
return mlines, nil
|
||||
}
|
||||
|
@ -23,30 +23,34 @@ import (
|
||||
)
|
||||
|
||||
func TestUnversionedWarning(t *testing.T) {
|
||||
warningBlock := beginUnversionedWarning + "\n" + makeUnversionedWarning("filename.md") + "\n" + endUnversionedWarning + "\n"
|
||||
beginMark := beginMungeTag(unversionedWarningTag)
|
||||
endMark := endMungeTag(unversionedWarningTag)
|
||||
|
||||
warningString := makeUnversionedWarning("filename.md").String()
|
||||
warningBlock := beginMark + "\n" + warningString + endMark + "\n"
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", warningBlock},
|
||||
{
|
||||
"Foo\nBar\n",
|
||||
warningBlock + "Foo\nBar\n",
|
||||
warningBlock + "\nFoo\nBar\n",
|
||||
},
|
||||
{
|
||||
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
|
||||
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
|
||||
},
|
||||
{
|
||||
beginUnversionedWarning + "\n" + endUnversionedWarning + "\n",
|
||||
beginMark + "\n" + endMark + "\n",
|
||||
warningBlock,
|
||||
},
|
||||
{
|
||||
beginUnversionedWarning + "\n" + "something\n" + endUnversionedWarning + "\n",
|
||||
beginMark + "\n" + "something\n" + endMark + "\n",
|
||||
warningBlock,
|
||||
},
|
||||
{
|
||||
"Foo\n" + beginUnversionedWarning + "\n" + endUnversionedWarning + "\nBar\n",
|
||||
"Foo\n" + beginMark + "\n" + endMark + "\nBar\n",
|
||||
"Foo\n" + warningBlock + "Bar\n",
|
||||
},
|
||||
{
|
||||
@ -55,10 +59,12 @@ func TestUnversionedWarning(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
actual, err := updateUnversionedWarning("filename.md", []byte(c.in))
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := updateUnversionedWarning("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if string(actual) != c.out {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("case[%d]: expected %v got %v", i, expected.String(), actual.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,83 +17,140 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Splits a document up into a slice of lines.
|
||||
func splitLines(document []byte) []string {
|
||||
lines := strings.Split(string(document), "\n")
|
||||
// Skip trailing empty string from Split-ing
|
||||
if len(lines) > 0 && lines[len(lines)-1] == "" {
|
||||
lines = lines[:len(lines)-1]
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// Replaces the text between matching "beginMark" and "endMark" within the
|
||||
// document represented by "lines" with "insertThis".
|
||||
//
|
||||
// Delimiters should occupy own line.
|
||||
// Returns copy of document with modifications.
|
||||
func updateMacroBlock(lines []string, beginMark, endMark, insertThis string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
func updateMacroBlock(mlines mungeLines, token string, insertThis mungeLines) (mungeLines, error) {
|
||||
beginMark := beginMungeTag(token)
|
||||
endMark := endMungeTag(token)
|
||||
var out mungeLines
|
||||
betweenBeginAndEnd := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if trimmedLine == beginMark {
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted && !betweenBeginAndEnd {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
line := mline.data
|
||||
if mline.beginTag && line == beginMark {
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = true
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
} else if trimmedLine == endMark {
|
||||
out = append(out, mline)
|
||||
} else if mline.endTag && line == endMark {
|
||||
if !betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
||||
return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
|
||||
}
|
||||
buffer.WriteString(insertThis)
|
||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
||||
buffer.WriteString("\n")
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
betweenBeginAndEnd = false
|
||||
out = append(out, insertThis...)
|
||||
out = append(out, mline)
|
||||
} else {
|
||||
if !betweenBeginAndEnd {
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
out = append(out, mline)
|
||||
}
|
||||
}
|
||||
}
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Tests that a document, represented as a slice of lines, has a line. Ignores
|
||||
// leading and trailing space.
|
||||
func hasLine(lines []string, needle string) bool {
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if trimmedLine == needle {
|
||||
func hasLine(lines mungeLines, needle string) bool {
|
||||
for _, mline := range lines {
|
||||
haystack := strings.TrimSpace(mline.data)
|
||||
if haystack == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func removeMacroBlock(token string, mlines mungeLines) (mungeLines, error) {
|
||||
beginMark := beginMungeTag(token)
|
||||
endMark := endMungeTag(token)
|
||||
var out mungeLines
|
||||
betweenBeginAndEnd := false
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
line := mline.data
|
||||
if mline.beginTag && line == beginMark {
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = true
|
||||
} else if mline.endTag && line == endMark {
|
||||
if !betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = false
|
||||
} else {
|
||||
if !betweenBeginAndEnd {
|
||||
out = append(out, mline)
|
||||
}
|
||||
}
|
||||
}
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Add a macro block to the beginning of a set of lines
|
||||
func prependMacroBlock(token string, mlines mungeLines) mungeLines {
|
||||
beginLine := newMungeLine(beginMungeTag(token))
|
||||
endLine := newMungeLine(endMungeTag(token))
|
||||
out := mungeLines{beginLine, endLine}
|
||||
if len(mlines) > 0 && mlines[0].data != "" {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
return append(out, mlines...)
|
||||
}
|
||||
|
||||
// Add a macro block to the end of a set of lines
|
||||
func appendMacroBlock(mlines mungeLines, token string) mungeLines {
|
||||
beginLine := newMungeLine(beginMungeTag(token))
|
||||
endLine := newMungeLine(endMungeTag(token))
|
||||
out := mlines
|
||||
if len(mlines) > 0 && mlines[len(mlines)-1].data != "" {
|
||||
out = append(out, blankMungeLine)
|
||||
}
|
||||
return append(out, beginLine, endLine)
|
||||
}
|
||||
|
||||
// Tests that a document, represented as a slice of lines, has a macro block.
|
||||
func hasMacroBlock(lines []string, begin string, end string) bool {
|
||||
func hasMacroBlock(lines mungeLines, token string) bool {
|
||||
beginMark := beginMungeTag(token)
|
||||
endMark := endMungeTag(token)
|
||||
|
||||
foundBegin := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
for _, mline := range lines {
|
||||
if mline.preformatted {
|
||||
continue
|
||||
}
|
||||
if !mline.beginTag && !mline.endTag {
|
||||
continue
|
||||
}
|
||||
line := mline.data
|
||||
switch {
|
||||
case !foundBegin && trimmedLine == begin:
|
||||
case !foundBegin && line == beginMark:
|
||||
foundBegin = true
|
||||
case foundBegin && trimmedLine == end:
|
||||
case foundBegin && line == endMark:
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -112,72 +169,123 @@ func endMungeTag(desc string) string {
|
||||
return fmt.Sprintf("<!-- END MUNGE: %s -->", desc)
|
||||
}
|
||||
|
||||
// Calls 'replace' for all sections of the document not in ``` / ``` blocks. So
|
||||
// that you don't have false positives inside those blocks.
|
||||
func replaceNonPreformatted(input []byte, replace func([]byte) []byte) []byte {
|
||||
f := splitByPreformatted(input)
|
||||
output := []byte(nil)
|
||||
for _, block := range f {
|
||||
if block.preformatted {
|
||||
output = append(output, block.data...)
|
||||
} else {
|
||||
output = append(output, replace(block.data)...)
|
||||
type mungeLine struct {
|
||||
data string
|
||||
preformatted bool
|
||||
header bool
|
||||
link bool
|
||||
beginTag bool
|
||||
endTag bool
|
||||
}
|
||||
|
||||
type mungeLines []mungeLine
|
||||
|
||||
func (m1 mungeLines) Equal(m2 mungeLines) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for i := range m1 {
|
||||
if m1[i].data != m2[i].data {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return output
|
||||
return true
|
||||
}
|
||||
|
||||
type fileBlock struct {
|
||||
preformatted bool
|
||||
data []byte
|
||||
func (mlines mungeLines) String() string {
|
||||
slice := []string{}
|
||||
for _, mline := range mlines {
|
||||
slice = append(slice, mline.data)
|
||||
}
|
||||
s := strings.Join(slice, "\n")
|
||||
// We need to tack on an extra newline at the end of the file
|
||||
return s + "\n"
|
||||
}
|
||||
|
||||
type fileBlocks []fileBlock
|
||||
func (mlines mungeLines) Bytes() []byte {
|
||||
return []byte(mlines.String())
|
||||
}
|
||||
|
||||
var (
|
||||
// Finds all preformatted block start/stops.
|
||||
preformatRE = regexp.MustCompile("^\\s*```")
|
||||
notPreformatRE = regexp.MustCompile("^\\s*```.*```")
|
||||
// Is this line a header?
|
||||
mlHeaderRE = regexp.MustCompile(`^#`)
|
||||
// Is there a link on this line?
|
||||
mlLinkRE = regexp.MustCompile(`\[[^]]*\]\([^)]*\)`)
|
||||
beginTagRE = regexp.MustCompile(`<!-- BEGIN MUNGE:`)
|
||||
endTagRE = regexp.MustCompile(`<!-- END MUNGE:`)
|
||||
|
||||
blankMungeLine = newMungeLine("")
|
||||
)
|
||||
|
||||
func splitByPreformatted(input []byte) fileBlocks {
|
||||
f := fileBlocks{}
|
||||
// Does not set 'preformatted'
|
||||
func newMungeLine(line string) mungeLine {
|
||||
return mungeLine{
|
||||
data: line,
|
||||
header: mlHeaderRE.MatchString(line),
|
||||
link: mlLinkRE.MatchString(line),
|
||||
beginTag: beginTagRE.MatchString(line),
|
||||
endTag: endTagRE.MatchString(line),
|
||||
}
|
||||
}
|
||||
|
||||
cur := []byte(nil)
|
||||
func trimRightSpace(in string) string {
|
||||
return strings.TrimRightFunc(in, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// Splits a document up into a slice of lines.
|
||||
func splitLines(document string) []string {
|
||||
lines := strings.Split(document, "\n")
|
||||
// Skip trailing empty string from Split-ing
|
||||
if len(lines) > 0 && lines[len(lines)-1] == "" {
|
||||
lines = lines[:len(lines)-1]
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func getMungeLines(in string) mungeLines {
|
||||
var out mungeLines
|
||||
preformatted := false
|
||||
// SplitAfter keeps the newline, so you don't have to worry about
|
||||
// omitting it on the last line or anything. Also, the documentation
|
||||
// claims it's unicode safe.
|
||||
for _, line := range bytes.SplitAfter(input, []byte("\n")) {
|
||||
|
||||
lines := splitLines(in)
|
||||
// We indicate if any given line is inside a preformatted block or
|
||||
// outside a preformatted block
|
||||
for _, line := range lines {
|
||||
if !preformatted {
|
||||
if preformatRE.Match(line) && !notPreformatRE.Match(line) {
|
||||
if len(cur) > 0 {
|
||||
f = append(f, fileBlock{false, cur})
|
||||
}
|
||||
cur = []byte{}
|
||||
if preformatRE.MatchString(line) && !notPreformatRE.MatchString(line) {
|
||||
preformatted = true
|
||||
}
|
||||
cur = append(cur, line...)
|
||||
} else {
|
||||
cur = append(cur, line...)
|
||||
if preformatRE.Match(line) {
|
||||
if len(cur) > 0 {
|
||||
f = append(f, fileBlock{true, cur})
|
||||
}
|
||||
cur = []byte{}
|
||||
if preformatRE.MatchString(line) {
|
||||
preformatted = false
|
||||
}
|
||||
}
|
||||
ml := newMungeLine(line)
|
||||
ml.preformatted = preformatted
|
||||
out = append(out, ml)
|
||||
}
|
||||
if len(cur) > 0 {
|
||||
f = append(f, fileBlock{preformatted, cur})
|
||||
}
|
||||
return f
|
||||
return out
|
||||
}
|
||||
|
||||
// As above, but further uses exp to parse the non-preformatted sections.
|
||||
func replaceNonPreformattedRegexp(input []byte, exp *regexp.Regexp, replace func([]byte) []byte) []byte {
|
||||
return replaceNonPreformatted(input, func(in []byte) []byte {
|
||||
return exp.ReplaceAllFunc(in, replace)
|
||||
})
|
||||
// filePath is the file we are looking for
|
||||
// inFile is the file where we found the link. So if we are processing
|
||||
// /path/to/repoRoot/docs/admin/README.md and are looking for
|
||||
// ../../file.json we can find that location.
|
||||
// In many cases filePath and processingFile may be the same
|
||||
func makeRepoRelative(filePath string, processingFile string) (string, error) {
|
||||
if filePath, err := filepath.Rel(repoRoot, filePath); err == nil {
|
||||
return filePath, nil
|
||||
}
|
||||
cwd := path.Dir(processingFile)
|
||||
return filepath.Rel(repoRoot, path.Join(cwd, filePath))
|
||||
}
|
||||
|
||||
func makeFileRelative(filePath string, processingFile string) (string, error) {
|
||||
cwd := path.Dir(processingFile)
|
||||
if filePath, err := filepath.Rel(cwd, filePath); err == nil {
|
||||
return filePath, nil
|
||||
}
|
||||
return filepath.Rel(cwd, path.Join(cwd, filePath))
|
||||
}
|
||||
|
@ -17,13 +17,17 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_updateMacroBlock(t *testing.T) {
|
||||
token := "TOKEN"
|
||||
BEGIN := beginMungeTag(token)
|
||||
END := endMungeTag(token)
|
||||
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
@ -31,149 +35,135 @@ func Test_updateMacroBlock(t *testing.T) {
|
||||
{"", ""},
|
||||
{"Lorem ipsum\ndolor sit amet\n",
|
||||
"Lorem ipsum\ndolor sit amet\n"},
|
||||
{"Lorem ipsum \n BEGIN\ndolor\nEND\nsit amet\n",
|
||||
"Lorem ipsum \n BEGIN\nfoo\n\nEND\nsit amet\n"},
|
||||
{"Lorem ipsum \n" + BEGIN + "\ndolor\n" + END + "\nsit amet\n",
|
||||
"Lorem ipsum \n" + BEGIN + "\nfoo\n" + END + "\nsit amet\n"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo\n")
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.out)
|
||||
actual, err := updateMacroBlock(in, token, getMungeLines("foo"))
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected '%v' but got '%v'", c.out, string(actual))
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Expected '%v' but got '%v'", expected.String(), expected.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_updateMacroBlock_errors(t *testing.T) {
|
||||
token := "TOKEN"
|
||||
b := beginMungeTag(token)
|
||||
e := endMungeTag(token)
|
||||
|
||||
var cases = []struct {
|
||||
in string
|
||||
}{
|
||||
{"BEGIN\n"},
|
||||
{"blah\nBEGIN\nblah"},
|
||||
{"END\n"},
|
||||
{"blah\nEND\nblah\n"},
|
||||
{"END\nBEGIN"},
|
||||
{"BEGIN\nEND\nEND"},
|
||||
{"BEGIN\nBEGIN\nEND"},
|
||||
{"BEGIN\nBEGIN\nEND\nEND"},
|
||||
{b + "\n"},
|
||||
{"blah\n" + b + "\nblah"},
|
||||
{e + "\n"},
|
||||
{"blah\n" + e + "\nblah\n"},
|
||||
{e + "\n" + b},
|
||||
{b + "\n" + e + "\n" + e},
|
||||
{b + "\n" + b + "\n" + e},
|
||||
{b + "\n" + b + "\n" + e + "\n" + e},
|
||||
}
|
||||
for _, c := range cases {
|
||||
_, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo")
|
||||
in := getMungeLines(c.in)
|
||||
_, err := updateMacroBlock(in, token, getMungeLines("foo"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasLine(t *testing.T) {
|
||||
cases := []struct {
|
||||
lines []string
|
||||
haystack string
|
||||
needle string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"abc", "def", "ghi"}, "abc", true},
|
||||
{[]string{" abc", "def", "ghi"}, "abc", true},
|
||||
{[]string{"abc ", "def", "ghi"}, "abc", true},
|
||||
{[]string{"\n abc", "def", "ghi"}, "abc", true},
|
||||
{[]string{"abc \n", "def", "ghi"}, "abc", true},
|
||||
{[]string{"abc", "def", "ghi"}, "def", true},
|
||||
{[]string{"abc", "def", "ghi"}, "ghi", true},
|
||||
{[]string{"abc", "def", "ghi"}, "xyz", false},
|
||||
{"abc\ndef\nghi", "abc", true},
|
||||
{" abc\ndef\nghi", "abc", true},
|
||||
{"abc \ndef\nghi", "abc", true},
|
||||
{"\n abc\ndef\nghi", "abc", true},
|
||||
{"abc \n\ndef\nghi", "abc", true},
|
||||
{"abc\ndef\nghi", "def", true},
|
||||
{"abc\ndef\nghi", "ghi", true},
|
||||
{"abc\ndef\nghi", "xyz", false},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
if hasLine(c.lines, c.needle) != c.expected {
|
||||
in := getMungeLines(c.haystack)
|
||||
if hasLine(in, c.needle) != c.expected {
|
||||
t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasMacroBlock(t *testing.T) {
|
||||
token := "<<<"
|
||||
b := beginMungeTag(token)
|
||||
e := endMungeTag(token)
|
||||
cases := []struct {
|
||||
lines []string
|
||||
begin string
|
||||
end string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"<<<", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", "abc", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", "<<<", "abc", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", "abc", ">>>", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", ">>>", "<<<", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<"}, "<<<", ">>>", false},
|
||||
{[]string{">>>"}, "<<<", ">>>", false},
|
||||
{[]string{"<<<", "abc"}, "<<<", ">>>", false},
|
||||
{[]string{"abc", ">>>"}, "<<<", ">>>", false},
|
||||
{[]string{b, e}, true},
|
||||
{[]string{b, "abc", e}, true},
|
||||
{[]string{b, b, "abc", e}, true},
|
||||
{[]string{b, "abc", e, e}, true},
|
||||
{[]string{b, e, b, e}, true},
|
||||
{[]string{b}, false},
|
||||
{[]string{e}, false},
|
||||
{[]string{b, "abc"}, false},
|
||||
{[]string{"abc", e}, false},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
if hasMacroBlock(c.lines, c.begin, c.end) != c.expected {
|
||||
t.Errorf("case[%d]: %q,%q, expected %t, got %t", i, c.begin, c.end, c.expected, !c.expected)
|
||||
in := getMungeLines(strings.Join(c.lines, "\n"))
|
||||
if hasMacroBlock(in, token) != c.expected {
|
||||
t.Errorf("case[%d]: expected %t, got %t", i, c.expected, !c.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNonPreformatted(t *testing.T) {
|
||||
func TestAppendMacroBlock(t *testing.T) {
|
||||
token := "<<<"
|
||||
b := beginMungeTag(token)
|
||||
e := endMungeTag(token)
|
||||
cases := []struct {
|
||||
in string
|
||||
out string
|
||||
in []string
|
||||
expected []string
|
||||
}{
|
||||
{"aoeu", ""},
|
||||
{"aoeu\n```\naoeu\n```\naoeu", "```\naoeu\n```\n"},
|
||||
{"ao\neu\n```\naoeu\n\n\n", "```\naoeu\n\n\n"},
|
||||
{"aoeu ```aoeu``` aoeu", ""},
|
||||
{[]string{}, []string{b, e}},
|
||||
{[]string{"bob"}, []string{"bob", "", b, e}},
|
||||
{[]string{b, e}, []string{b, e, "", b, e}},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
out := string(replaceNonPreformatted([]byte(c.in), func([]byte) []byte { return nil }))
|
||||
if out != c.out {
|
||||
t.Errorf("%v: got %q, wanted %q", i, out, c.out)
|
||||
in := getMungeLines(strings.Join(c.in, "\n"))
|
||||
expected := getMungeLines(strings.Join(c.expected, "\n"))
|
||||
out := appendMacroBlock(in, token)
|
||||
if !out.Equal(expected) {
|
||||
t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNonPreformattedNoChange(t *testing.T) {
|
||||
func TestPrependMacroBlock(t *testing.T) {
|
||||
token := "<<<"
|
||||
b := beginMungeTag(token)
|
||||
e := endMungeTag(token)
|
||||
cases := []struct {
|
||||
in string
|
||||
in []string
|
||||
expected []string
|
||||
}{
|
||||
{"aoeu"},
|
||||
{"aoeu\n```\naoeu\n```\naoeu"},
|
||||
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu"},
|
||||
{"ao\neu\n```\naoeu\n\n\n"},
|
||||
{"aoeu ```aoeu``` aoeu"},
|
||||
{"aoeu\n```\naoeu\n```"},
|
||||
{"aoeu\n```\naoeu\n```\n"},
|
||||
{"aoeu\n```\naoeu\n```\n\n"},
|
||||
{[]string{}, []string{b, e}},
|
||||
{[]string{"bob"}, []string{b, e, "", "bob"}},
|
||||
{[]string{b, e}, []string{b, e, "", b, e}},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
out := string(replaceNonPreformatted([]byte(c.in), func(in []byte) []byte { return in }))
|
||||
if out != c.in {
|
||||
t.Errorf("%v: got %q, wanted %q", i, out, c.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNonPreformattedCallOrder(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
expect []string
|
||||
}{
|
||||
{"aoeu", []string{"aoeu"}},
|
||||
{"aoeu\n```\naoeu\n```\naoeu", []string{"aoeu\n", "aoeu"}},
|
||||
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu", []string{"aoeu\n\n", "\naoeu"}},
|
||||
{"ao\neu\n```\naoeu\n\n\n", []string{"ao\neu\n"}},
|
||||
{"aoeu ```aoeu``` aoeu", []string{"aoeu ```aoeu``` aoeu"}},
|
||||
{"aoeu\n```\naoeu\n```", []string{"aoeu\n"}},
|
||||
{"aoeu\n```\naoeu\n```\n", []string{"aoeu\n"}},
|
||||
{"aoeu\n```\naoeu\n```\n\n", []string{"aoeu\n", "\n"}},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
got := []string{}
|
||||
replaceNonPreformatted([]byte(c.in), func(in []byte) []byte {
|
||||
got = append(got, string(in))
|
||||
return in
|
||||
})
|
||||
if e, a := c.expect, got; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: got %q, wanted %q", i, a, e)
|
||||
in := getMungeLines(strings.Join(c.in, "\n"))
|
||||
expected := getMungeLines(strings.Join(c.expected, "\n"))
|
||||
out := prependMacroBlock(token, in)
|
||||
if !out.Equal(expected) {
|
||||
t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
31
cmd/mungedocs/whitespace.go
Normal file
31
cmd/mungedocs/whitespace.go
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
// Remove all trailing whitespace
|
||||
func updateWhitespace(file string, mlines mungeLines) (mungeLines, error) {
|
||||
var out mungeLines
|
||||
for _, mline := range mlines {
|
||||
if mline.preformatted {
|
||||
out = append(out, mline)
|
||||
continue
|
||||
}
|
||||
newline := trimRightSpace(mline.data)
|
||||
out = append(out, newMungeLine(newline))
|
||||
}
|
||||
return out, nil
|
||||
}
|
45
cmd/mungedocs/whitespace_test.go
Normal file
45
cmd/mungedocs/whitespace_test.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_updateWhiteSpace(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"\n", "\n"},
|
||||
{" \t \t \n", "\n"},
|
||||
{"bob \t", "bob"},
|
||||
{"```\n \n```\n", "```\n \n```\n"},
|
||||
}
|
||||
for i, c := range cases {
|
||||
in := getMungeLines(c.in)
|
||||
expected := getMungeLines(c.expected)
|
||||
actual, err := updateWhitespace("filename.md", in)
|
||||
assert.NoError(t, err)
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Case[%d] Expected Whitespace '%v' but got '%v'", i, string(expected.Bytes()), string(actual.Bytes()))
|
||||
}
|
||||
}
|
||||
}
|
@ -35,7 +35,7 @@ Documentation for other releases can be found at
|
||||
|
||||
|
||||
In Kubernetes, authorization happens as a separate step from authentication.
|
||||
See the [authentication documentation](authentication.md) for an
|
||||
See the [authentication documentation](authentication.md) for an
|
||||
overview of authentication.
|
||||
|
||||
Authorization applies to all HTTP accesses on the main (secure) apiserver port.
|
||||
@ -60,8 +60,8 @@ The following implementations are available, and are selected by flag:
|
||||
A request has 4 attributes that can be considered for authorization:
|
||||
- user (the user-string which a user was authenticated as).
|
||||
- whether the request is readonly (GETs are readonly)
|
||||
- what resource is being accessed
|
||||
- applies only to the API endpoints, such as
|
||||
- what resource is being accessed
|
||||
- applies only to the API endpoints, such as
|
||||
`/api/v1/namespaces/default/pods`. For miscellaneous endpoints, like `/version`, the
|
||||
resource is the empty string.
|
||||
- the namespace of the object being access, or the empty string if the
|
||||
@ -95,7 +95,7 @@ interface.
|
||||
A request has attributes which correspond to the properties of a policy object.
|
||||
|
||||
When a request is received, the attributes are determined. Unknown attributes
|
||||
are set to the zero value of its type (e.g. empty string, 0, false).
|
||||
are set to the zero value of its type (e.g. empty string, 0, false).
|
||||
|
||||
An unset property will match any value of the corresponding
|
||||
attribute. An unset attribute will match any value of the corresponding property.
|
||||
|
@ -36,7 +36,7 @@ Documentation for other releases can be found at
|
||||
This doc is about cluster troubleshooting; we assume you have already ruled out your application as the root cause of the
|
||||
problem you are experiencing. See
|
||||
the [application troubleshooting guide](../user-guide/application-troubleshooting.md) for tips on application debugging.
|
||||
You may also visit [troubleshooting document](../troubleshooting.md) for more information.
|
||||
You may also visit [troubleshooting document](../troubleshooting.md) for more information.
|
||||
|
||||
## Listing your cluster
|
||||
|
||||
@ -73,7 +73,7 @@ This is an incomplete list of things that could go wrong, and how to adjust your
|
||||
Root causes:
|
||||
- VM(s) shutdown
|
||||
- Network partition within cluster, or between cluster and users
|
||||
- Crashes in Kubernetes software
|
||||
- Crashes in Kubernetes software
|
||||
- Data loss or unavailability of persistent storage (e.g. GCE PD or AWS EBS volume)
|
||||
- Operator error, e.g. misconfigured Kubernetes software or application software
|
||||
|
||||
|
@ -35,7 +35,7 @@ Documentation for other releases can be found at
|
||||
|
||||
[etcd](https://coreos.com/etcd/docs/2.0.12/) is a highly-available key value
|
||||
store which Kubernetes uses for persistent storage of all of its REST API
|
||||
objects.
|
||||
objects.
|
||||
|
||||
## Configuration: high-level goals
|
||||
|
||||
|
@ -102,7 +102,7 @@ to make sure that each automatically restarts when it fails. To achieve this, w
|
||||
the `kubelet` that we run on each of the worker nodes. This is convenient, since we can use containers to distribute our binaries, we can
|
||||
establish resource limits, and introspect the resource usage of each daemon. Of course, we also need something to monitor the kubelet
|
||||
itself (insert who watches the watcher jokes here). For Debian systems, we choose monit, but there are a number of alternate
|
||||
choices. For example, on systemd-based systems (e.g. RHEL, CentOS), you can run 'systemctl enable kubelet'.
|
||||
choices. For example, on systemd-based systems (e.g. RHEL, CentOS), you can run 'systemctl enable kubelet'.
|
||||
|
||||
If you are extending from a standard Kubernetes installation, the `kubelet` binary should already be present on your system. You can run
|
||||
`which kubelet` to determine if the binary is in fact installed. If it is not installed,
|
||||
|
@ -90,7 +90,7 @@ project](salt.md).
|
||||
|
||||
## Multi-tenant support
|
||||
|
||||
* **Resource Quota** ([resource-quota.md](resource-quota.md))
|
||||
* **Resource Quota** ([resource-quota.md](resource-quota.md))
|
||||
|
||||
## Security
|
||||
|
||||
|
@ -73,13 +73,13 @@ load and growth.
|
||||
|
||||
To pick the number of clusters, first, decide which regions you need to be in to have adequate latency to all your end users, for services that will run
|
||||
on Kubernetes (if you use a Content Distribution Network, the latency requirements for the CDN-hosted content need not
|
||||
be considered). Legal issues might influence this as well. For example, a company with a global customer base might decide to have clusters in US, EU, AP, and SA regions.
|
||||
be considered). Legal issues might influence this as well. For example, a company with a global customer base might decide to have clusters in US, EU, AP, and SA regions.
|
||||
Call the number of regions to be in `R`.
|
||||
|
||||
Second, decide how many clusters should be able to be unavailable at the same time, while still being available. Call
|
||||
the number that can be unavailable `U`. If you are not sure, then 1 is a fine choice.
|
||||
|
||||
If it is allowable for load-balancing to direct traffic to any region in the event of a cluster failure, then
|
||||
If it is allowable for load-balancing to direct traffic to any region in the event of a cluster failure, then
|
||||
you need `R + U` clusters. If it is not (e.g you want to ensure low latency for all users in the event of a
|
||||
cluster failure), then you need to have `R * U` clusters (`U` in each of `R` regions). In any case, try to put each cluster in a different zone.
|
||||
|
||||
|
@ -52,7 +52,7 @@ Each user community has its own:
|
||||
|
||||
A cluster operator may create a Namespace for each unique user community.
|
||||
|
||||
The Namespace provides a unique scope for:
|
||||
The Namespace provides a unique scope for:
|
||||
|
||||
1. named resources (to avoid basic naming collisions)
|
||||
2. delegated management authority to trusted users
|
||||
|
@ -99,7 +99,7 @@ Use the file [`namespace-dev.json`](namespace-dev.json) which describes a develo
|
||||
```
|
||||
|
||||
[Download example](namespace-dev.json)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE namespace-dev.json -->
|
||||
|
||||
Create the development namespace using kubectl.
|
||||
|
||||
|
@ -234,7 +234,7 @@ capacity when adding a node.
|
||||
The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It
|
||||
checks that the sum of the limits of containers on the node is no greater than than the node capacity. It
|
||||
includes all containers started by kubelet, but not containers started directly by docker, nor
|
||||
processes not in containers.
|
||||
processes not in containers.
|
||||
|
||||
If you want to explicitly reserve resources for non-Pod processes, you can create a placeholder
|
||||
pod. Use the following template:
|
||||
|
@ -160,14 +160,14 @@ Sometimes more complex policies may be desired, such as:
|
||||
|
||||
Such policies could be implemented using ResourceQuota as a building-block, by
|
||||
writing a 'controller' which watches the quota usage and adjusts the quota
|
||||
hard limits of each namespace according to other signals.
|
||||
hard limits of each namespace according to other signals.
|
||||
|
||||
Note that resource quota divides up aggregate cluster resources, but it creates no
|
||||
restrictions around nodes: pods from several namespaces may run on the same node.
|
||||
|
||||
## Example
|
||||
|
||||
See a [detailed example for how to use resource quota](../user-guide/resourcequota/).
|
||||
See a [detailed example for how to use resource quota](../user-guide/resourcequota/).
|
||||
|
||||
## Read More
|
||||
|
||||
|
@ -56,7 +56,7 @@ for a number of reasons:
|
||||
- Auditing considerations for humans and service accounts may differ.
|
||||
- A config bundle for a complex system may include definition of various service
|
||||
accounts for components of that system. Because service accounts can be created
|
||||
ad-hoc and have namespaced names, such config is portable.
|
||||
ad-hoc and have namespaced names, such config is portable.
|
||||
|
||||
## Service account automation
|
||||
|
||||
|
@ -55,7 +55,7 @@ What constitutes a compatible change and how to change the API are detailed by t
|
||||
|
||||
## API versioning
|
||||
|
||||
To make it easier to eliminate fields or restructure resource representations, Kubernetes supports multiple API versions, each at a different API path prefix, such as `/api/v1beta3`. These are simply different interfaces to read and/or modify the same underlying resources. In general, all API resources are accessible via all API versions, though there may be some cases in the future where that is not true.
|
||||
To make it easier to eliminate fields or restructure resource representations, Kubernetes supports multiple API versions, each at a different API path prefix, such as `/api/v1beta3`. These are simply different interfaces to read and/or modify the same underlying resources. In general, all API resources are accessible via all API versions, though there may be some cases in the future where that is not true.
|
||||
|
||||
We chose to version at the API level rather than at the resource or field level to ensure that the API presents a clear, consistent view of system resources and behavior, and to enable controlling access to end-of-lifed and/or experimental APIs.
|
||||
|
||||
|
@ -33,7 +33,7 @@ Documentation for other releases can be found at
|
||||
|
||||
# Kubernetes Design Overview
|
||||
|
||||
Kubernetes is a system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications.
|
||||
Kubernetes is a system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications.
|
||||
|
||||
Kubernetes establishes robust declarative primitives for maintaining the desired state requested by the user. We see these primitives as the main value added by Kubernetes. Self-healing mechanisms, such as auto-restarting, re-scheduling, and replicating containers require active controllers, not just imperative orchestration.
|
||||
|
||||
|
@ -104,7 +104,7 @@ type ResourceQuotaList struct {
|
||||
|
||||
## AdmissionControl plugin: ResourceQuota
|
||||
|
||||
The **ResourceQuota** plug-in introspects all incoming admission requests.
|
||||
The **ResourceQuota** plug-in introspects all incoming admission requests.
|
||||
|
||||
It makes decisions by evaluating the incoming object against all defined **ResourceQuota.Status.Hard** resource limits in the request
|
||||
namespace. If acceptance of the resource would cause the total usage of a named resource to exceed its hard limit, the request is denied.
|
||||
@ -125,7 +125,7 @@ Any resource that is not part of core Kubernetes must follow the resource naming
|
||||
This means the resource must have a fully-qualified name (i.e. mycompany.org/shinynewresource)
|
||||
|
||||
If the incoming request does not cause the total usage to exceed any of the enumerated hard resource limits, the plug-in will post a
|
||||
**ResourceQuotaUsage** document to the server to atomically update the observed usage based on the previously read
|
||||
**ResourceQuotaUsage** document to the server to atomically update the observed usage based on the previously read
|
||||
**ResourceQuota.ResourceVersion**. This keeps incremental usage atomically consistent, but does introduce a bottleneck (intentionally)
|
||||
into the system.
|
||||
|
||||
@ -184,7 +184,7 @@ resourcequotas 1 1
|
||||
services 3 5
|
||||
```
|
||||
|
||||
## More information
|
||||
## More information
|
||||
|
||||
See [resource quota document](../admin/resource-quota.md) and the [example of Resource Quota](../user-guide/resourcequota/) for more information.
|
||||
|
||||
|
@ -47,7 +47,7 @@ Each node runs Docker, of course. Docker takes care of the details of downloadi
|
||||
|
||||
### Kubelet
|
||||
|
||||
The **Kubelet** manages [pods](../user-guide/pods.md) and their containers, their images, their volumes, etc.
|
||||
The **Kubelet** manages [pods](../user-guide/pods.md) and their containers, their images, their volumes, etc.
|
||||
|
||||
### Kube-Proxy
|
||||
|
||||
|
@ -49,7 +49,7 @@ Event compression should be best effort (not guaranteed). Meaning, in the worst
|
||||
## Design
|
||||
|
||||
Instead of a single Timestamp, each event object [contains](http://releases.k8s.io/HEAD/pkg/api/types.go#L1111) the following fields:
|
||||
* `FirstTimestamp util.Time`
|
||||
* `FirstTimestamp util.Time`
|
||||
* The date/time of the first occurrence of the event.
|
||||
* `LastTimestamp util.Time`
|
||||
* The date/time of the most recent occurrence of the event.
|
||||
|
@ -87,7 +87,7 @@ available to subsequent expansions.
|
||||
|
||||
### Use Case: Variable expansion in command
|
||||
|
||||
Users frequently need to pass the values of environment variables to a container's command.
|
||||
Users frequently need to pass the values of environment variables to a container's command.
|
||||
Currently, Kubernetes does not perform any expansion of variables. The workaround is to invoke a
|
||||
shell in the container's command and have the shell perform the substitution, or to write a wrapper
|
||||
script that sets up the environment and runs the command. This has a number of drawbacks:
|
||||
@ -130,7 +130,7 @@ The exact syntax for variable expansion has a large impact on how users perceive
|
||||
feature. We considered implementing a very restrictive subset of the shell `${var}` syntax. This
|
||||
syntax is an attractive option on some level, because many people are familiar with it. However,
|
||||
this syntax also has a large number of lesser known features such as the ability to provide
|
||||
default values for unset variables, perform inline substitution, etc.
|
||||
default values for unset variables, perform inline substitution, etc.
|
||||
|
||||
In the interest of preventing conflation of the expansion feature in Kubernetes with the shell
|
||||
feature, we chose a different syntax similar to the one in Makefiles, `$(var)`. We also chose not
|
||||
@ -239,7 +239,7 @@ The necessary changes to implement this functionality are:
|
||||
`ObjectReference` and an `EventRecorder`
|
||||
2. Introduce `third_party/golang/expansion` package that provides:
|
||||
1. An `Expand(string, func(string) string) string` function
|
||||
2. A `MappingFuncFor(ObjectEventRecorder, ...map[string]string) string` function
|
||||
2. A `MappingFuncFor(ObjectEventRecorder, ...map[string]string) string` function
|
||||
3. Make the kubelet expand environment correctly
|
||||
4. Make the kubelet expand command correctly
|
||||
|
||||
@ -311,7 +311,7 @@ func Expand(input string, mapping func(string) string) string {
|
||||
|
||||
#### Kubelet changes
|
||||
|
||||
The Kubelet should be made to correctly expand variables references in a container's environment,
|
||||
The Kubelet should be made to correctly expand variables references in a container's environment,
|
||||
command, and args. Changes will need to be made to:
|
||||
|
||||
1. The `makeEnvironmentVariables` function in the kubelet; this is used by
|
||||
|
@ -52,7 +52,7 @@ Each user community has its own:
|
||||
|
||||
A cluster operator may create a Namespace for each unique user community.
|
||||
|
||||
The Namespace provides a unique scope for:
|
||||
The Namespace provides a unique scope for:
|
||||
|
||||
1. named resources (to avoid basic naming collisions)
|
||||
2. delegated management authority to trusted users
|
||||
@ -142,7 +142,7 @@ type NamespaceSpec struct {
|
||||
|
||||
A *FinalizerName* is a qualified name.
|
||||
|
||||
The API Server enforces that a *Namespace* can only be deleted from storage if and only if
|
||||
The API Server enforces that a *Namespace* can only be deleted from storage if and only if
|
||||
it's *Namespace.Spec.Finalizers* is empty.
|
||||
|
||||
A *finalize* operation is the only mechanism to modify the *Namespace.Spec.Finalizers* field post creation.
|
||||
@ -189,12 +189,12 @@ are known to the cluster.
|
||||
The *namespace controller* enumerates each known resource type in that namespace and deletes it one by one.
|
||||
|
||||
Admission control blocks creation of new resources in that namespace in order to prevent a race-condition
|
||||
where the controller could believe all of a given resource type had been deleted from the namespace,
|
||||
where the controller could believe all of a given resource type had been deleted from the namespace,
|
||||
when in fact some other rogue client agent had created new objects. Using admission control in this
|
||||
scenario allows each of registry implementations for the individual objects to not need to take into account Namespace life-cycle.
|
||||
|
||||
Once all objects known to the *namespace controller* have been deleted, the *namespace controller*
|
||||
executes a *finalize* operation on the namespace that removes the *kubernetes* value from
|
||||
executes a *finalize* operation on the namespace that removes the *kubernetes* value from
|
||||
the *Namespace.Spec.Finalizers* list.
|
||||
|
||||
If the *namespace controller* sees a *Namespace* whose *ObjectMeta.DeletionTimestamp* is set, and
|
||||
@ -245,13 +245,13 @@ In etcd, we want to continue to still support efficient WATCH across namespaces.
|
||||
|
||||
Resources that persist content in etcd will have storage paths as follows:
|
||||
|
||||
/{k8s_storage_prefix}/{resourceType}/{resource.Namespace}/{resource.Name}
|
||||
/{k8s_storage_prefix}/{resourceType}/{resource.Namespace}/{resource.Name}
|
||||
|
||||
This enables consumers to WATCH /registry/{resourceType} for changes across namespace of a particular {resourceType}.
|
||||
|
||||
### Kubelet
|
||||
|
||||
The kubelet will register pod's it sources from a file or http source with a namespace associated with the
|
||||
The kubelet will register pod's it sources from a file or http source with a namespace associated with the
|
||||
*cluster-id*
|
||||
|
||||
### Example: OpenShift Origin managing a Kubernetes Namespace
|
||||
@ -362,7 +362,7 @@ This results in the following state:
|
||||
|
||||
At this point, the Kubernetes *namespace controller* in its sync loop will see that the namespace
|
||||
has a deletion timestamp and that its list of finalizers is empty. As a result, it knows all
|
||||
content associated from that namespace has been purged. It performs a final DELETE action
|
||||
content associated from that namespace has been purged. It performs a final DELETE action
|
||||
to remove that Namespace from the storage.
|
||||
|
||||
At this point, all content associated with that Namespace, and the Namespace itself are gone.
|
||||
|
@ -41,11 +41,11 @@ Two new API kinds:
|
||||
|
||||
A `PersistentVolume` (PV) is a storage resource provisioned by an administrator. It is analogous to a node. See [Persistent Volume Guide](../user-guide/persistent-volumes/) for how to use it.
|
||||
|
||||
A `PersistentVolumeClaim` (PVC) is a user's request for a persistent volume to use in a pod. It is analogous to a pod.
|
||||
A `PersistentVolumeClaim` (PVC) is a user's request for a persistent volume to use in a pod. It is analogous to a pod.
|
||||
|
||||
One new system component:
|
||||
|
||||
`PersistentVolumeClaimBinder` is a singleton running in master that watches all PersistentVolumeClaims in the system and binds them to the closest matching available PersistentVolume. The volume manager watches the API for newly created volumes to manage.
|
||||
`PersistentVolumeClaimBinder` is a singleton running in master that watches all PersistentVolumeClaims in the system and binds them to the closest matching available PersistentVolume. The volume manager watches the API for newly created volumes to manage.
|
||||
|
||||
One new volume:
|
||||
|
||||
@ -69,7 +69,7 @@ Cluster administrators use the API to manage *PersistentVolumes*. A custom stor
|
||||
|
||||
PVs are system objects and, thus, have no namespace.
|
||||
|
||||
Many means of dynamic provisioning will be eventually be implemented for various storage types.
|
||||
Many means of dynamic provisioning will be eventually be implemented for various storage types.
|
||||
|
||||
|
||||
##### PersistentVolume API
|
||||
@ -116,7 +116,7 @@ TBD
|
||||
|
||||
#### Events
|
||||
|
||||
The implementation of persistent storage will not require events to communicate to the user the state of their claim. The CLI for bound claims contains a reference to the backing persistent volume. This is always present in the API and CLI, making an event to communicate the same unnecessary.
|
||||
The implementation of persistent storage will not require events to communicate to the user the state of their claim. The CLI for bound claims contains a reference to the backing persistent volume. This is always present in the API and CLI, making an event to communicate the same unnecessary.
|
||||
|
||||
Events that communicate the state of a mounted volume are left to the volume plugins.
|
||||
|
||||
@ -232,9 +232,9 @@ When a claim holder is finished with their data, they can delete their claim.
|
||||
$ kubectl delete pvc myclaim-1
|
||||
```
|
||||
|
||||
The ```PersistentVolumeClaimBinder``` will reconcile this by removing the claim reference from the PV and change the PVs status to 'Released'.
|
||||
The ```PersistentVolumeClaimBinder``` will reconcile this by removing the claim reference from the PV and change the PVs status to 'Released'.
|
||||
|
||||
Admins can script the recycling of released volumes. Future dynamic provisioners will understand how a volume should be recycled.
|
||||
Admins can script the recycling of released volumes. Future dynamic provisioners will understand how a volume should be recycled.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -33,7 +33,7 @@ Documentation for other releases can be found at
|
||||
|
||||
# Design Principles
|
||||
|
||||
Principles to follow when extending Kubernetes.
|
||||
Principles to follow when extending Kubernetes.
|
||||
|
||||
## API
|
||||
|
||||
@ -44,14 +44,14 @@ See also the [API conventions](../devel/api-conventions.md).
|
||||
* The control plane should be transparent -- there are no hidden internal APIs.
|
||||
* The cost of API operations should be proportional to the number of objects intentionally operated upon. Therefore, common filtered lookups must be indexed. Beware of patterns of multiple API calls that would incur quadratic behavior.
|
||||
* Object status must be 100% reconstructable by observation. Any history kept must be just an optimization and not required for correct operation.
|
||||
* Cluster-wide invariants are difficult to enforce correctly. Try not to add them. If you must have them, don't enforce them atomically in master components, that is contention-prone and doesn't provide a recovery path in the case of a bug allowing the invariant to be violated. Instead, provide a series of checks to reduce the probability of a violation, and make every component involved able to recover from an invariant violation.
|
||||
* Cluster-wide invariants are difficult to enforce correctly. Try not to add them. If you must have them, don't enforce them atomically in master components, that is contention-prone and doesn't provide a recovery path in the case of a bug allowing the invariant to be violated. Instead, provide a series of checks to reduce the probability of a violation, and make every component involved able to recover from an invariant violation.
|
||||
* Low-level APIs should be designed for control by higher-level systems. Higher-level APIs should be intent-oriented (think SLOs) rather than implementation-oriented (think control knobs).
|
||||
|
||||
## Control logic
|
||||
|
||||
* Functionality must be *level-based*, meaning the system must operate correctly given the desired state and the current/observed state, regardless of how many intermediate state updates may have been missed. Edge-triggered behavior must be just an optimization.
|
||||
* Assume an open world: continually verify assumptions and gracefully adapt to external events and/or actors. Example: we allow users to kill pods under control of a replication controller; it just replaces them.
|
||||
* Do not define comprehensive state machines for objects with behaviors associated with state transitions and/or "assumed" states that cannot be ascertained by observation.
|
||||
* Do not define comprehensive state machines for objects with behaviors associated with state transitions and/or "assumed" states that cannot be ascertained by observation.
|
||||
* Don't assume a component's decisions will not be overridden or rejected, nor for the component to always understand why. For example, etcd may reject writes. Kubelet may reject pods. The scheduler may not be able to schedule pods. Retry, but back off and/or make alternative decisions.
|
||||
* Components should be self-healing. For example, if you must keep some state (e.g., cache) the content needs to be periodically refreshed, so that if an item does get erroneously stored or a deletion event is missed etc, it will be soon fixed, ideally on timescales that are shorter than what will attract attention from humans.
|
||||
* Component behavior should degrade gracefully. Prioritize actions so that the most important activities can continue to function even when overloaded and/or in states of partial failure.
|
||||
@ -61,7 +61,7 @@ See also the [API conventions](../devel/api-conventions.md).
|
||||
* Only the apiserver should communicate with etcd/store, and not other components (scheduler, kubelet, etc.).
|
||||
* Compromising a single node shouldn't compromise the cluster.
|
||||
* Components should continue to do what they were last told in the absence of new instructions (e.g., due to network partition or component outage).
|
||||
* All components should keep all relevant state in memory all the time. The apiserver should write through to etcd/store, other components should write through to the apiserver, and they should watch for updates made by other clients.
|
||||
* All components should keep all relevant state in memory all the time. The apiserver should write through to etcd/store, other components should write through to the apiserver, and they should watch for updates made by other clients.
|
||||
* Watch is preferred over polling.
|
||||
|
||||
## Extensibility
|
||||
|
@ -51,7 +51,7 @@ The resource model aims to be:
|
||||
|
||||
A Kubernetes _resource_ is something that can be requested by, allocated to, or consumed by a pod or container. Examples include memory (RAM), CPU, disk-time, and network bandwidth.
|
||||
|
||||
Once resources on a node have been allocated to one pod, they should not be allocated to another until that pod is removed or exits. This means that Kubernetes schedulers should ensure that the sum of the resources allocated (requested and granted) to its pods never exceeds the usable capacity of the node. Testing whether a pod will fit on a node is called _feasibility checking_.
|
||||
Once resources on a node have been allocated to one pod, they should not be allocated to another until that pod is removed or exits. This means that Kubernetes schedulers should ensure that the sum of the resources allocated (requested and granted) to its pods never exceeds the usable capacity of the node. Testing whether a pod will fit on a node is called _feasibility checking_.
|
||||
|
||||
Note that the resource model currently prohibits over-committing resources; we will want to relax that restriction later.
|
||||
|
||||
@ -70,7 +70,7 @@ For future reference, note that some resources, such as CPU and network bandwidt
|
||||
|
||||
### Resource quantities
|
||||
|
||||
Initially, all Kubernetes resource types are _quantitative_, and have an associated _unit_ for quantities of the associated resource (e.g., bytes for memory, bytes per seconds for bandwidth, instances for software licences). The units will always be a resource type's natural base units (e.g., bytes, not MB), to avoid confusion between binary and decimal multipliers and the underlying unit multiplier (e.g., is memory measured in MiB, MB, or GB?).
|
||||
Initially, all Kubernetes resource types are _quantitative_, and have an associated _unit_ for quantities of the associated resource (e.g., bytes for memory, bytes per seconds for bandwidth, instances for software licences). The units will always be a resource type's natural base units (e.g., bytes, not MB), to avoid confusion between binary and decimal multipliers and the underlying unit multiplier (e.g., is memory measured in MiB, MB, or GB?).
|
||||
|
||||
Resource quantities can be added and subtracted: for example, a node has a fixed quantity of each resource type that can be allocated to pods/containers; once such an allocation has been made, the allocated resources cannot be made available to other pods/containers without over-committing the resources.
|
||||
|
||||
@ -110,7 +110,7 @@ resourceCapacitySpec: [
|
||||
```
|
||||
|
||||
Where:
|
||||
* _total_: the total allocatable resources of a node. Initially, the resources at a given scope will bound the resources of the sum of inner scopes.
|
||||
* _total_: the total allocatable resources of a node. Initially, the resources at a given scope will bound the resources of the sum of inner scopes.
|
||||
|
||||
#### Notes
|
||||
|
||||
@ -194,7 +194,7 @@ The following are planned future extensions to the resource model, included here
|
||||
|
||||
Because resource usage and related metrics change continuously, need to be tracked over time (i.e., historically), can be characterized in a variety of ways, and are fairly voluminous, we will not include usage in core API objects, such as [Pods](../user-guide/pods.md) and Nodes, but will provide separate APIs for accessing and managing that data. See the Appendix for possible representations of usage data, but the representation we'll use is TBD.
|
||||
|
||||
Singleton values for observed and predicted future usage will rapidly prove inadequate, so we will support the following structure for extended usage information:
|
||||
Singleton values for observed and predicted future usage will rapidly prove inadequate, so we will support the following structure for extended usage information:
|
||||
|
||||
```yaml
|
||||
resourceStatus: [
|
||||
@ -223,7 +223,7 @@ where a `<CPU-info>` or `<memory-info>` structure looks like this:
|
||||
```
|
||||
|
||||
All parts of this structure are optional, although we strongly encourage including quantities for 50, 90, 95, 99, 99.5, and 99.9 percentiles. _[In practice, it will be important to include additional info such as the length of the time window over which the averages are calculated, the confidence level, and information-quality metrics such as the number of dropped or discarded data points.]_
|
||||
and predicted
|
||||
and predicted
|
||||
|
||||
## Future resource types
|
||||
|
||||
|
@ -34,7 +34,7 @@ Documentation for other releases can be found at
|
||||
## Abstract
|
||||
|
||||
A proposal for the distribution of [secrets](../user-guide/secrets.md) (passwords, keys, etc) to the Kubelet and to
|
||||
containers inside Kubernetes using a custom [volume](../user-guide/volumes.md#secrets) type. See the [secrets example](../user-guide/secrets/) for more information.
|
||||
containers inside Kubernetes using a custom [volume](../user-guide/volumes.md#secrets) type. See the [secrets example](../user-guide/secrets/) for more information.
|
||||
|
||||
## Motivation
|
||||
|
||||
@ -117,7 +117,7 @@ which consumes this type of secret, the Kubelet may take a number of actions:
|
||||
|
||||
1. Expose the secret in a `.kubernetes_auth` file in a well-known location in the container's
|
||||
file system
|
||||
2. Configure that node's `kube-proxy` to decorate HTTP requests from that pod to the
|
||||
2. Configure that node's `kube-proxy` to decorate HTTP requests from that pod to the
|
||||
`kubernetes-master` service with the auth token, e. g. by adding a header to the request
|
||||
(see the [LOAS Daemon](https://github.com/GoogleCloudPlatform/kubernetes/issues/2209) proposal)
|
||||
|
||||
@ -146,7 +146,7 @@ We should consider what the best way to allow this is; there are a few different
|
||||
export MY_SECRET_ENV=MY_SECRET_VALUE
|
||||
|
||||
The user could `source` the file at `/etc/secrets/my-secret` prior to executing the command for
|
||||
the image either inline in the command or in an init script,
|
||||
the image either inline in the command or in an init script,
|
||||
|
||||
2. Give secrets an attribute that allows users to express the intent that the platform should
|
||||
generate the above syntax in the file used to present a secret. The user could consume these
|
||||
|
@ -48,55 +48,55 @@ The problem of securing containers in Kubernetes has come up [before](https://gi
|
||||
|
||||
### Container isolation
|
||||
|
||||
In order to improve container isolation from host and other containers running on the host, containers should only be
|
||||
granted the access they need to perform their work. To this end it should be possible to take advantage of Docker
|
||||
features such as the ability to [add or remove capabilities](https://docs.docker.com/reference/run/#runtime-privilege-linux-capabilities-and-lxc-configuration) and [assign MCS labels](https://docs.docker.com/reference/run/#security-configuration)
|
||||
In order to improve container isolation from host and other containers running on the host, containers should only be
|
||||
granted the access they need to perform their work. To this end it should be possible to take advantage of Docker
|
||||
features such as the ability to [add or remove capabilities](https://docs.docker.com/reference/run/#runtime-privilege-linux-capabilities-and-lxc-configuration) and [assign MCS labels](https://docs.docker.com/reference/run/#security-configuration)
|
||||
to the container process.
|
||||
|
||||
Support for user namespaces has recently been [merged](https://github.com/docker/libcontainer/pull/304) into Docker's libcontainer project and should soon surface in Docker itself. It will make it possible to assign a range of unprivileged uids and gids from the host to each container, improving the isolation between host and container and between containers.
|
||||
|
||||
### External integration with shared storage
|
||||
|
||||
In order to support external integration with shared storage, processes running in a Kubernetes cluster
|
||||
should be able to be uniquely identified by their Unix UID, such that a chain of ownership can be established.
|
||||
In order to support external integration with shared storage, processes running in a Kubernetes cluster
|
||||
should be able to be uniquely identified by their Unix UID, such that a chain of ownership can be established.
|
||||
Processes in pods will need to have consistent UID/GID/SELinux category labels in order to access shared disks.
|
||||
|
||||
## Constraints and Assumptions
|
||||
|
||||
* It is out of the scope of this document to prescribe a specific set
|
||||
* It is out of the scope of this document to prescribe a specific set
|
||||
of constraints to isolate containers from their host. Different use cases need different
|
||||
settings.
|
||||
* The concept of a security context should not be tied to a particular security mechanism or platform
|
||||
* The concept of a security context should not be tied to a particular security mechanism or platform
|
||||
(ie. SELinux, AppArmor)
|
||||
* Applying a different security context to a scope (namespace or pod) requires a solution such as the one proposed for
|
||||
[service accounts](service_accounts.md).
|
||||
|
||||
## Use Cases
|
||||
|
||||
In order of increasing complexity, following are example use cases that would
|
||||
In order of increasing complexity, following are example use cases that would
|
||||
be addressed with security contexts:
|
||||
|
||||
1. Kubernetes is used to run a single cloud application. In order to protect
|
||||
nodes from containers:
|
||||
* All containers run as a single non-root user
|
||||
* Privileged containers are disabled
|
||||
* All containers run with a particular MCS label
|
||||
* All containers run with a particular MCS label
|
||||
* Kernel capabilities like CHOWN and MKNOD are removed from containers
|
||||
|
||||
|
||||
2. Just like case #1, except that I have more than one application running on
|
||||
the Kubernetes cluster.
|
||||
* Each application is run in its own namespace to avoid name collisions
|
||||
* For each application a different uid and MCS label is used
|
||||
|
||||
3. Kubernetes is used as the base for a PAAS with
|
||||
multiple projects, each project represented by a namespace.
|
||||
|
||||
3. Kubernetes is used as the base for a PAAS with
|
||||
multiple projects, each project represented by a namespace.
|
||||
* Each namespace is associated with a range of uids/gids on the node that
|
||||
are mapped to uids/gids on containers using linux user namespaces.
|
||||
are mapped to uids/gids on containers using linux user namespaces.
|
||||
* Certain pods in each namespace have special privileges to perform system
|
||||
actions such as talking back to the server for deployment, run docker
|
||||
builds, etc.
|
||||
* External NFS storage is assigned to each namespace and permissions set
|
||||
using the range of uids/gids assigned to that namespace.
|
||||
using the range of uids/gids assigned to that namespace.
|
||||
|
||||
## Proposed Design
|
||||
|
||||
@ -109,12 +109,12 @@ to mutate Docker API calls in order to apply the security context.
|
||||
|
||||
It is recommended that this design be implemented in two phases:
|
||||
|
||||
1. Implement the security context provider extension point in the Kubelet
|
||||
1. Implement the security context provider extension point in the Kubelet
|
||||
so that a default security context can be applied on container run and creation.
|
||||
2. Implement a security context structure that is part of a service account. The
|
||||
default context provider can then be used to apply a security context based
|
||||
on the service account associated with the pod.
|
||||
|
||||
|
||||
### Security Context Provider
|
||||
|
||||
The Kubelet will have an interface that points to a `SecurityContextProvider`. The `SecurityContextProvider` is invoked before creating and running a given container:
|
||||
@ -137,7 +137,7 @@ type SecurityContextProvider interface {
|
||||
}
|
||||
```
|
||||
|
||||
If the value of the SecurityContextProvider field on the Kubelet is nil, the kubelet will create and run the container as it does today.
|
||||
If the value of the SecurityContextProvider field on the Kubelet is nil, the kubelet will create and run the container as it does today.
|
||||
|
||||
### Security Context
|
||||
|
||||
|
@ -33,9 +33,9 @@ Documentation for other releases can be found at
|
||||
|
||||
## Simple rolling update
|
||||
|
||||
This is a lightweight design document for simple [rolling update](../user-guide/kubectl/kubectl_rolling-update.md) in `kubectl`.
|
||||
This is a lightweight design document for simple [rolling update](../user-guide/kubectl/kubectl_rolling-update.md) in `kubectl`.
|
||||
|
||||
Complete execution flow can be found [here](#execution-details). See the [example of rolling update](../user-guide/update-demo/) for more information.
|
||||
Complete execution flow can be found [here](#execution-details). See the [example of rolling update](../user-guide/update-demo/) for more information.
|
||||
|
||||
### Lightweight rollout
|
||||
|
||||
|
@ -173,11 +173,11 @@ Objects that contain both spec and status should not contain additional top-leve
|
||||
##### Typical status properties
|
||||
|
||||
* **phase**: The phase is a simple, high-level summary of the phase of the lifecycle of an object. The phase should progress monotonically. Typical phase values are `Pending` (not yet fully physically realized), `Running` or `Active` (fully realized and active, but not necessarily operating correctly), and `Terminated` (no longer active), but may vary slightly for different types of objects. New phase values should not be added to existing objects in the future. Like other status fields, it must be possible to ascertain the lifecycle phase by observation. Additional details regarding the current phase may be contained in other fields.
|
||||
* **conditions**: Conditions represent orthogonal observations of an object's current state. Objects may report multiple conditions, and new types of conditions may be added in the future. Condition status values may be `True`, `False`, or `Unknown`. Unlike the phase, conditions are not expected to be monotonic -- their values may change back and forth. A typical condition type is `Ready`, which indicates the object was believed to be fully operational at the time it was last probed. Conditions may carry additional information, such as the last probe time or last transition time.
|
||||
* **conditions**: Conditions represent orthogonal observations of an object's current state. Objects may report multiple conditions, and new types of conditions may be added in the future. Condition status values may be `True`, `False`, or `Unknown`. Unlike the phase, conditions are not expected to be monotonic -- their values may change back and forth. A typical condition type is `Ready`, which indicates the object was believed to be fully operational at the time it was last probed. Conditions may carry additional information, such as the last probe time or last transition time.
|
||||
|
||||
TODO(@vishh): Reason and Message.
|
||||
|
||||
Phases and conditions are observations and not, themselves, state machines, nor do we define comprehensive state machines for objects with behaviors associated with state transitions. The system is level-based and should assume an Open World. Additionally, new observations and details about these observations may be added over time.
|
||||
Phases and conditions are observations and not, themselves, state machines, nor do we define comprehensive state machines for objects with behaviors associated with state transitions. The system is level-based and should assume an Open World. Additionally, new observations and details about these observations may be added over time.
|
||||
|
||||
In order to preserve extensibility, in the future, we intend to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from observations.
|
||||
|
||||
@ -376,7 +376,7 @@ Late-initializers should only make the following types of modifications:
|
||||
- Adding keys to maps
|
||||
- Adding values to arrays which have mergeable semantics (`patchStrategy:"merge"` attribute in
|
||||
the type definition).
|
||||
|
||||
|
||||
These conventions:
|
||||
1. allow a user (with sufficient privilege) to override any system-default behaviors by setting
|
||||
the fields that would otherwise have been defaulted.
|
||||
|
@ -309,7 +309,7 @@ a panic from the `serialization_test`. If so, look at the diff it produces (or
|
||||
the backtrace in case of a panic) and figure out what you forgot. Encode that
|
||||
into the fuzzer's custom fuzz functions. Hint: if you added defaults for a field,
|
||||
that field will need to have a custom fuzz function that ensures that the field is
|
||||
fuzzed to a non-empty value.
|
||||
fuzzed to a non-empty value.
|
||||
|
||||
The fuzzer can be found in `pkg/api/testing/fuzzer.go`.
|
||||
|
||||
|
@ -61,7 +61,7 @@ Maintainers will do merges of appropriately reviewed-and-approved changes during
|
||||
|
||||
There may be discussion an even approvals granted outside of the above hours, but merges will generally be deferred.
|
||||
|
||||
If a PR is considered complex or controversial, the merge of that PR should be delayed to give all interested parties in all timezones the opportunity to provide feedback. Concretely, this means that such PRs should be held for 24
|
||||
If a PR is considered complex or controversial, the merge of that PR should be delayed to give all interested parties in all timezones the opportunity to provide feedback. Concretely, this means that such PRs should be held for 24
|
||||
hours before merging. Of course "complex" and "controversial" are left to the judgment of the people involved, but we trust that part of being a committer is the judgment required to evaluate such things honestly, and not be
|
||||
motivated by your desire (or your cube-mate's desire) to get their code merged. Also see "Holds" below, any reviewer can issue a "hold" to indicate that the PR is in fact complicated or complex and deserves further review.
|
||||
|
||||
|
@ -44,7 +44,7 @@ The purpose of filtering the nodes is to filter out the nodes that do not meet c
|
||||
- `PodFitsPorts`: Check if any HostPort required by the Pod is already occupied on the node.
|
||||
- `PodFitsHost`: Filter out all nodes except the one specified in the PodSpec's NodeName field.
|
||||
- `PodSelectorMatches`: Check if the labels of the node match the labels specified in the Pod's `nodeSelector` field ([Here](../user-guide/node-selection/) is an example of how to use `nodeSelector` field).
|
||||
- `CheckNodeLabelPresence`: Check if all the specified labels exist on a node or not, regardless of the value.
|
||||
- `CheckNodeLabelPresence`: Check if all the specified labels exist on a node or not, regardless of the value.
|
||||
|
||||
The details of the above predicates can be found in [plugin/pkg/scheduler/algorithm/predicates/predicates.go](http://releases.k8s.io/HEAD/plugin/pkg/scheduler/algorithm/predicates/predicates.go). All predicates mentioned above can be used in combination to perform a sophisticated filtering policy. Kubernetes uses some, but not all, of these predicates by default. You can see which ones are used by default in [plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go](http://releases.k8s.io/HEAD/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go).
|
||||
|
||||
@ -53,7 +53,7 @@ The details of the above predicates can be found in [plugin/pkg/scheduler/algori
|
||||
The filtered nodes are considered suitable to host the Pod, and it is often that there are more than one nodes remaining. Kubernetes prioritizes the remaining nodes to find the "best" one for the Pod. The prioritization is performed by a set of priority functions. For each remaining node, a priority function gives a score which scales from 0-10 with 10 representing for "most preferred" and 0 for "least preferred". Each priority function is weighted by a positive number and the final score of each node is calculated by adding up all the weighted scores. For example, suppose there are two priority functions, `priorityFunc1` and `priorityFunc2` with weighting factors `weight1` and `weight2` respectively, the final score of some NodeA is:
|
||||
|
||||
finalScoreNodeA = (weight1 * priorityFunc1) + (weight2 * priorityFunc2)
|
||||
|
||||
|
||||
After the scores of all nodes are calculated, the node with highest score is chosen as the host of the Pod. If there are more than one nodes with equal highest scores, a random one among them is chosen.
|
||||
|
||||
Currently, Kubernetes scheduler provides some practical priority functions, including:
|
||||
|
@ -70,7 +70,7 @@ These guidelines say *what* to do. See the Rationale section for *why*.
|
||||
- Setup a cluster and run the [conformance test](development.md#conformance-testing) against it, and report the
|
||||
results in your PR.
|
||||
- Versioned distros should typically not modify or add code in `cluster/`. That is just scripts for developer
|
||||
distros.
|
||||
distros.
|
||||
- When a new major or minor release of Kubernetes comes out, we may also release a new
|
||||
conformance test, and require a new conformance test run to earn a conformance checkmark.
|
||||
|
||||
@ -82,20 +82,20 @@ Just file an issue or chat us on IRC and one of the committers will link to it f
|
||||
|
||||
These guidelines say *what* to do. See the Rationale section for *why*.
|
||||
- the main reason to add a new development distro is to support a new IaaS provider (VM and
|
||||
network management). This means implementing a new `pkg/cloudprovider/$IAAS_NAME`.
|
||||
network management). This means implementing a new `pkg/cloudprovider/$IAAS_NAME`.
|
||||
- Development distros should use Saltstack for Configuration Management.
|
||||
- development distros need to support automated cluster creation, deletion, upgrading, etc.
|
||||
This mean writing scripts in `cluster/$IAAS_NAME`.
|
||||
- all commits to the tip of this repo need to not break any of the development distros
|
||||
- the author of the change is responsible for making changes necessary on all the cloud-providers if the
|
||||
change affects any of them, and reverting the change if it breaks any of the CIs.
|
||||
- a development distro needs to have an organization which owns it. This organization needs to:
|
||||
- a development distro needs to have an organization which owns it. This organization needs to:
|
||||
- Setting up and maintaining Continuous Integration that runs e2e frequently (multiple times per day) against the
|
||||
Distro at head, and which notifies all devs of breakage.
|
||||
- being reasonably available for questions and assisting with
|
||||
refactoring and feature additions that affect code for their IaaS.
|
||||
|
||||
## Rationale
|
||||
## Rationale
|
||||
|
||||
- We want people to create Kubernetes clusters with whatever IaaS, Node OS,
|
||||
configuration management tools, and so on, which they are familiar with. The
|
||||
@ -114,19 +114,19 @@ These guidelines say *what* to do. See the Rationale section for *why*.
|
||||
learning curve to understand our automated testing scripts. And it is considerable effort
|
||||
to fully automate setup and teardown of a cluster, which is needed for CI. And, not everyone
|
||||
has the time and money to run CI. We do not want to
|
||||
discourage people from writing and sharing guides because of this.
|
||||
discourage people from writing and sharing guides because of this.
|
||||
- Versioned distro authors are free to run their own CI and let us know if there is breakage, but we
|
||||
will not include them as commit hooks -- there cannot be so many commit checks that it is impossible
|
||||
to pass them all.
|
||||
- We prefer a single Configuration Management tool for development distros. If there were more
|
||||
than one, the core developers would have to learn multiple tools and update config in multiple
|
||||
places. **Saltstack** happens to be the one we picked when we started the project. We
|
||||
welcome versioned distros that use any tool; there are already examples of
|
||||
welcome versioned distros that use any tool; there are already examples of
|
||||
CoreOS Fleet, Ansible, and others.
|
||||
- You can still run code from head or your own branch
|
||||
if you use another Configuration Management tool -- you just have to do some manual steps
|
||||
during testing and deployment.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -39,7 +39,7 @@ crafting your own customized cluster. We'll guide you in picking a solution tha
|
||||
|
||||
## Picking the Right Solution
|
||||
|
||||
If you just want to "kick the tires" on Kubernetes, we recommend the [local Docker-based](docker.md) solution.
|
||||
If you just want to "kick the tires" on Kubernetes, we recommend the [local Docker-based](docker.md) solution.
|
||||
|
||||
The local Docker-based solution is one of several [Local cluster](#local-machine-solutions) solutions
|
||||
that are quick to set up, but are limited to running on one machine.
|
||||
@ -50,9 +50,9 @@ solution is the easiest to create and maintain.
|
||||
[Turn-key cloud solutions](#turn-key-cloud-solutions) require only a few commands to create
|
||||
and cover a wider range of cloud providers.
|
||||
|
||||
[Custom solutions](#custom-solutions) require more effort to setup but cover and even
|
||||
[Custom solutions](#custom-solutions) require more effort to setup but cover and even
|
||||
they vary from step-by-step instructions to general advice for setting up
|
||||
a Kubernetes cluster from scratch.
|
||||
a Kubernetes cluster from scratch.
|
||||
|
||||
### Local-machine Solutions
|
||||
|
||||
@ -117,8 +117,8 @@ These solutions are combinations of cloud provider and OS not covered by the abo
|
||||
|
||||
- [Offline](coreos/bare_metal_offline.md) (no internet required. Uses CoreOS and Flannel)
|
||||
- [fedora/fedora_ansible_config.md](fedora/fedora_ansible_config.md)
|
||||
- [Fedora single node](fedora/fedora_manual_config.md)
|
||||
- [Fedora multi node](fedora/flannel_multi_node_cluster.md)
|
||||
- [Fedora single node](fedora/fedora_manual_config.md)
|
||||
- [Fedora multi node](fedora/flannel_multi_node_cluster.md)
|
||||
- [Centos](centos/centos_manual_config.md)
|
||||
- [Ubuntu](ubuntu.md)
|
||||
- [Docker Multi Node](docker-multinode.md)
|
||||
|
@ -215,7 +215,7 @@ kubectl get pods
|
||||
|
||||
Record the **Host** of the pod, which should be the private IP address.
|
||||
|
||||
Gather the public IP address for the worker node.
|
||||
Gather the public IP address for the worker node.
|
||||
|
||||
```bash
|
||||
aws ec2 describe-instances --filters 'Name=private-ip-address,Values=<host>'
|
||||
|
@ -60,7 +60,7 @@ centos-minion = 192.168.121.65
|
||||
```
|
||||
|
||||
**Prepare the hosts:**
|
||||
|
||||
|
||||
* Create virt7-testing repo on all hosts - centos-{master,minion} with following information.
|
||||
|
||||
```
|
||||
@ -175,7 +175,7 @@ KUBELET_HOSTNAME="--hostname_override=centos-minion"
|
||||
|
||||
# Add your own!
|
||||
KUBELET_ARGS=""
|
||||
```
|
||||
```
|
||||
|
||||
* Start the appropriate services on node (centos-minion).
|
||||
|
||||
|
@ -68,8 +68,8 @@ Or create a `~/.cloudstack.ini` file:
|
||||
|
||||
[cloudstack]
|
||||
endpoint = <your cloudstack api endpoint>
|
||||
key = <your api access key>
|
||||
secret = <your api secret key>
|
||||
key = <your api access key>
|
||||
secret = <your api secret key>
|
||||
method = post
|
||||
|
||||
We need to use the http POST method to pass the _large_ userdata to the coreOS instances.
|
||||
@ -104,7 +104,7 @@ Check the tasks and templates in `roles/k8s` if you want to modify anything.
|
||||
|
||||
Once the playbook as finished, it will print out the IP of the Kubernetes master:
|
||||
|
||||
TASK: [k8s | debug msg='k8s master IP is {{ k8s_master.default_ip }}'] ********
|
||||
TASK: [k8s | debug msg='k8s master IP is {{ k8s_master.default_ip }}'] ********
|
||||
|
||||
SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster:
|
||||
|
||||
|
@ -59,13 +59,13 @@ Deploy a CoreOS running Kubernetes environment. This particular guild is made to
|
||||
|
||||
## High Level Design
|
||||
|
||||
1. Manage the tftp directory
|
||||
1. Manage the tftp directory
|
||||
* /tftpboot/(coreos)(centos)(RHEL)
|
||||
* /tftpboot/pxelinux.0/(MAC) -> linked to Linux image config file
|
||||
2. Update per install the link for pxelinux
|
||||
3. Update the DHCP config to reflect the host needing deployment
|
||||
4. Setup nodes to deploy CoreOS creating a etcd cluster.
|
||||
5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/).
|
||||
4. Setup nodes to deploy CoreOS creating a etcd cluster.
|
||||
5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/).
|
||||
6. Installing the CoreOS slaves to become Kubernetes nodes.
|
||||
|
||||
## This Guides variables
|
||||
@ -115,7 +115,7 @@ To setup CentOS PXELINUX environment there is a complete [guide here](http://doc
|
||||
timeout 15
|
||||
ONTIMEOUT local
|
||||
display boot.msg
|
||||
|
||||
|
||||
MENU TITLE Main Menu
|
||||
|
||||
LABEL local
|
||||
@ -126,7 +126,7 @@ Now you should have a working PXELINUX setup to image CoreOS nodes. You can veri
|
||||
|
||||
## Adding CoreOS to PXE
|
||||
|
||||
This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment.
|
||||
This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment.
|
||||
|
||||
1. Find or create the TFTP root directory that everything will be based off of.
|
||||
* For this document we will assume `/tftpboot/` is our root directory.
|
||||
@ -170,9 +170,9 @@ This section describes how to setup the CoreOS images to live alongside a pre-ex
|
||||
APPEND initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http://<xxx.xxx.xxx.xxx>/pxe-cloud-config-slave.yml
|
||||
MENU END
|
||||
|
||||
This configuration file will now boot from local drive but have the option to PXE image CoreOS.
|
||||
This configuration file will now boot from local drive but have the option to PXE image CoreOS.
|
||||
|
||||
## DHCP configuration
|
||||
## DHCP configuration
|
||||
|
||||
This section covers configuring the DHCP server to hand out our new images. In this case we are assuming that there are other servers that will boot alongside other images.
|
||||
|
||||
@ -186,7 +186,7 @@ This section covers configuring the DHCP server to hand out our new images. In t
|
||||
next-server 10.20.30.242;
|
||||
option broadcast-address 10.20.30.255;
|
||||
filename "<other default image>";
|
||||
|
||||
|
||||
...
|
||||
# http://www.syslinux.org/wiki/index.php/PXELINUX
|
||||
host core_os_master {
|
||||
@ -194,7 +194,7 @@ This section covers configuring the DHCP server to hand out our new images. In t
|
||||
option routers 10.20.30.1;
|
||||
fixed-address 10.20.30.40;
|
||||
option domain-name-servers 10.20.30.242;
|
||||
filename "/pxelinux.0";
|
||||
filename "/pxelinux.0";
|
||||
}
|
||||
host core_os_slave {
|
||||
hardware ethernet d0:00:67:13:0d:01;
|
||||
@ -217,7 +217,7 @@ We will be specifying the node configuration later in the guide.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
To deploy our configuration we need to create an `etcd` master. To do so we want to pxe CoreOS with a specific cloud-config.yml. There are two options we have here.
|
||||
To deploy our configuration we need to create an `etcd` master. To do so we want to pxe CoreOS with a specific cloud-config.yml. There are two options we have here.
|
||||
1. Is to template the cloud config file and programmatically create new static configs for different cluster setups.
|
||||
2. Have a service discovery protocol running in our stack to do auto discovery.
|
||||
|
||||
@ -427,7 +427,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl
|
||||
--logtostderr=true
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
- name: kube-controller-manager.service
|
||||
- name: kube-controller-manager.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
@ -535,7 +535,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
Description=flannel is an etcd backed overlay network for containers
|
||||
[Service]
|
||||
|
@ -44,7 +44,7 @@ Use the [master.yaml](cloud-configs/master.yaml) and [node.yaml](cloud-configs/n
|
||||
* Provision the master node
|
||||
* Capture the master node private IP address
|
||||
* Edit node.yaml
|
||||
* Provision one or more worker nodes
|
||||
* Provision one or more worker nodes
|
||||
|
||||
### AWS
|
||||
|
||||
|
@ -79,7 +79,7 @@ curl <insert-ip-from-above-here>
|
||||
|
||||
Note that you will need run this curl command on your boot2docker VM if you are running on OS X.
|
||||
|
||||
### Scaling
|
||||
### Scaling
|
||||
|
||||
Now try to scale up the nginx you created before:
|
||||
|
||||
|
@ -80,7 +80,7 @@ docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1
|
||||
|
||||
### Test it out
|
||||
|
||||
At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl
|
||||
At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl
|
||||
binary
|
||||
([OS X](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/darwin/amd64/kubectl))
|
||||
([linux](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl))
|
||||
|
@ -60,9 +60,9 @@ fed-node = 192.168.121.65
|
||||
```
|
||||
|
||||
**Prepare the hosts:**
|
||||
|
||||
|
||||
* Install Kubernetes on all hosts - fed-{master,node}. This will also pull in docker. Also install etcd on fed-master. This guide has been tested with kubernetes-0.18 and beyond.
|
||||
* The [--enablerepo=update-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive.
|
||||
* The [--enablerepo=update-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive.
|
||||
* If you want the very latest Kubernetes release [you can download and yum install the RPM directly from Fedora Koji](http://koji.fedoraproject.org/koji/packageinfo?packageID=19202) instead of using the yum install command below.
|
||||
|
||||
```sh
|
||||
|
@ -262,10 +262,10 @@ works with [Amazon Web Service](https://jujucharms.com/docs/stable/config-aws),
|
||||
[Vmware vSphere](https://jujucharms.com/docs/stable/config-vmware).
|
||||
|
||||
If you do not see your favorite cloud provider listed many clouds can be
|
||||
configured for [manual provisioning](https://jujucharms.com/docs/stable/config-manual).
|
||||
configured for [manual provisioning](https://jujucharms.com/docs/stable/config-manual).
|
||||
|
||||
The Kubernetes bundle has been tested on GCE and AWS and found to work with
|
||||
version 1.0.0.
|
||||
version 1.0.0.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -74,7 +74,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](../../examples/blog-logging/counter-pod.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
|
||||
|
||||
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod in the default
|
||||
namespace.
|
||||
@ -92,7 +92,7 @@ NAME READY STATUS RESTARTS AG
|
||||
counter 1/1 Running 0 5m
|
||||
```
|
||||
|
||||
This step may take a few minutes to download the ubuntu:14.04 image during which the pod status will be shown as `Pending`.
|
||||
This step may take a few minutes to download the ubuntu:14.04 image during which the pod status will be shown as `Pending`.
|
||||
|
||||
One of the nodes is now running the counter pod:
|
||||
|
||||
@ -192,7 +192,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE ../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml -->
|
||||
|
||||
This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it.
|
||||
|
||||
|
@ -92,7 +92,7 @@ steps that existing cluster setup scripts are making.
|
||||
|
||||
## Designing and Preparing
|
||||
|
||||
### Learning
|
||||
### Learning
|
||||
|
||||
1. You should be familiar with using Kubernetes already. We suggest you set
|
||||
up a temporary cluster by following one of the other Getting Started Guides.
|
||||
@ -108,7 +108,7 @@ an interface for managing TCP Load Balancers, Nodes (Instances) and Networking R
|
||||
The interface is defined in `pkg/cloudprovider/cloud.go`. It is possible to
|
||||
create a custom cluster without implementing a cloud provider (for example if using
|
||||
bare-metal), and not all parts of the interface need to be implemented, depending
|
||||
on how flags are set on various components.
|
||||
on how flags are set on various components.
|
||||
|
||||
### Nodes
|
||||
|
||||
@ -220,13 +220,13 @@ all the necessary binaries.
|
||||
#### Selecting Images
|
||||
|
||||
You will run docker, kubelet, and kube-proxy outside of a container, the same way you would run any system daemon, so
|
||||
you just need the bare binaries. For etcd, kube-apiserver, kube-controller-manager, and kube-scheduler,
|
||||
you just need the bare binaries. For etcd, kube-apiserver, kube-controller-manager, and kube-scheduler,
|
||||
we recommend that you run these as containers, so you need an image to be built.
|
||||
|
||||
You have several choices for Kubernetes images:
|
||||
- Use images hosted on Google Container Registry (GCR):
|
||||
- e.g `gcr.io/google_containers/kube-apiserver:$TAG`, where `TAG` is the latest
|
||||
release tag, which can be found on the [latest releases page](https://github.com/GoogleCloudPlatform/kubernetes/releases/latest).
|
||||
release tag, which can be found on the [latest releases page](https://github.com/GoogleCloudPlatform/kubernetes/releases/latest).
|
||||
- Ensure $TAG is the same tag as the release tag you are using for kubelet and kube-proxy.
|
||||
- Build your own images.
|
||||
- Useful if you are using a private registry.
|
||||
@ -294,7 +294,7 @@ You will end up with the following files (we will use these variables later on)
|
||||
#### Preparing Credentials
|
||||
|
||||
The admin user (and any users) need:
|
||||
- a token or a password to identify them.
|
||||
- a token or a password to identify them.
|
||||
- tokens are just long alphanumeric strings, e.g. 32 chars. See
|
||||
- `TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)`
|
||||
|
||||
@ -318,7 +318,7 @@ The kubeconfig file for the administrator can be created as follows:
|
||||
- `kubectl config set-context $CONTEXT_NAME --cluster=$CLUSTER_NAME --user=$USER`
|
||||
- `kubectl config use-context $CONTEXT_NAME`
|
||||
|
||||
Next, make a kubeconfig file for the kubelets and kube-proxy. There are a couple of options for how
|
||||
Next, make a kubeconfig file for the kubelets and kube-proxy. There are a couple of options for how
|
||||
many distinct files to make:
|
||||
1. Use the same credential as the admin
|
||||
- This is simplest to setup.
|
||||
@ -355,7 +355,7 @@ guide assume that there are kubeconfigs in `/var/lib/kube-proxy/kubeconfig` and
|
||||
|
||||
## Configuring and Installing Base Software on Nodes
|
||||
|
||||
This section discusses how to configure machines to be Kubernetes nodes.
|
||||
This section discusses how to configure machines to be Kubernetes nodes.
|
||||
|
||||
You should run three daemons on every node:
|
||||
- docker or rkt
|
||||
@ -395,7 +395,7 @@ so that kube-proxy can manage iptables instead of docker.
|
||||
- if you are using an overlay network, consult those instructions.
|
||||
- `--mtu=`
|
||||
- may be required when using Flannel, because of the extra packet size due to udp encapsulation
|
||||
- `--insecure-registry $CLUSTER_SUBNET`
|
||||
- `--insecure-registry $CLUSTER_SUBNET`
|
||||
- to connect to a private registry, if you set one up, without using SSL.
|
||||
|
||||
You may want to increase the number of open files for docker:
|
||||
@ -412,7 +412,7 @@ installation, by following examples given in the Docker documentation.
|
||||
The minimum version required is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6).
|
||||
|
||||
[systemd](http://www.freedesktop.org/wiki/Software/systemd/) is required on your node to run rkt. The
|
||||
minimum version required to match rkt v0.5.6 is
|
||||
minimum version required to match rkt v0.5.6 is
|
||||
[systemd 215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html).
|
||||
|
||||
[rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking.md) is also required
|
||||
@ -444,7 +444,7 @@ Arguments to consider:
|
||||
|
||||
All nodes should run kube-proxy. (Running kube-proxy on a "master" node is not
|
||||
strictly required, but being consistent is easier.) Obtain a binary as described for
|
||||
kubelet.
|
||||
kubelet.
|
||||
|
||||
Arguments to consider:
|
||||
- If following the HTTPS security approach:
|
||||
@ -456,7 +456,7 @@ Arguments to consider:
|
||||
### Networking
|
||||
|
||||
Each node needs to be allocated its own CIDR range for pod networking.
|
||||
Call this `NODE_X_POD_CIDR`.
|
||||
Call this `NODE_X_POD_CIDR`.
|
||||
|
||||
A bridge called `cbr0` needs to be created on each node. The bridge is explained
|
||||
further in the [networking documentation](../admin/networking.md). The bridge itself
|
||||
@ -498,7 +498,7 @@ NOTE: This is environment specific. Some environments will not need
|
||||
any masquerading at all. Others, such as GCE, will not allow pod IPs to send
|
||||
traffic to the internet, but have no problem with them inside your GCE Project.
|
||||
|
||||
### Other
|
||||
### Other
|
||||
|
||||
- Enable auto-upgrades for your OS package manager, if desired.
|
||||
- Configure log rotation for all node components (e.g. using [logrotate](http://linux.die.net/man/8/logrotate)).
|
||||
@ -529,7 +529,7 @@ You will need to run one or more instances of etcd.
|
||||
- Recommended approach: run one etcd instance, with its log written to a directory backed
|
||||
by durable storage (RAID, GCE PD)
|
||||
- Alternative: run 3 or 5 etcd instances.
|
||||
- Log can be written to non-durable storage because storage is replicated.
|
||||
- Log can be written to non-durable storage because storage is replicated.
|
||||
- run a single apiserver which connects to one of the etc nodes.
|
||||
See [cluster-troubleshooting](../admin/cluster-troubleshooting.md) for more discussion on factors affecting cluster
|
||||
availability.
|
||||
|
@ -49,7 +49,7 @@ On the Master:
|
||||
On each Node:
|
||||
- `kube-proxy`
|
||||
- `kube-kubelet`
|
||||
- `calico-node`
|
||||
- `calico-node`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@ -191,7 +191,7 @@ node-X | 192.168.X.1/24
|
||||
|
||||
#### Start docker on cbr0
|
||||
|
||||
The Docker daemon must be started and told to use the already configured cbr0 instead of using the usual docker0, as well as disabling ip-masquerading and modification of the ip-tables.
|
||||
The Docker daemon must be started and told to use the already configured cbr0 instead of using the usual docker0, as well as disabling ip-masquerading and modification of the ip-tables.
|
||||
|
||||
1.) Edit the ubuntu-15.04 docker.service for systemd at: `/lib/systemd/system/docker.service`
|
||||
|
||||
|
@ -49,7 +49,7 @@ This document describes how to deploy Kubernetes on ubuntu nodes, including 1 Ku
|
||||
|
||||
## Prerequisites
|
||||
|
||||
*1 The nodes have installed docker version 1.2+ and bridge-utils to manipulate linux bridge*
|
||||
*1 The nodes have installed docker version 1.2+ and bridge-utils to manipulate linux bridge*
|
||||
|
||||
*2 All machines can communicate with each other, no need to connect Internet (should use private docker registry in this case)*
|
||||
|
||||
@ -57,7 +57,7 @@ This document describes how to deploy Kubernetes on ubuntu nodes, including 1 Ku
|
||||
|
||||
*4 Dependencies of this guide: etcd-2.0.12, flannel-0.4.0, k8s-1.0.1, but it may work with higher versions*
|
||||
|
||||
*5 All the remote servers can be ssh logged in without a password by using key authentication*
|
||||
*5 All the remote servers can be ssh logged in without a password by using key authentication*
|
||||
|
||||
|
||||
### Starting a Cluster
|
||||
@ -80,7 +80,7 @@ Please make sure that there are `kube-apiserver`, `kube-controller-manager`, `ku
|
||||
|
||||
An example cluster is listed as below:
|
||||
|
||||
| IP Address|Role |
|
||||
| IP Address|Role |
|
||||
|---------|------|
|
||||
|10.10.103.223| node |
|
||||
|10.10.103.162| node |
|
||||
@ -112,13 +112,13 @@ The `SERVICE_CLUSTER_IP_RANGE` variable defines the Kubernetes service IP range.
|
||||
|
||||
172.16.0.0 - 172.31.255.255 (172.16/12 prefix)
|
||||
|
||||
192.168.0.0 - 192.168.255.255 (192.168/16 prefix)
|
||||
192.168.0.0 - 192.168.255.255 (192.168/16 prefix)
|
||||
|
||||
The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above `SERVICE_CLUSTER_IP_RANGE`.
|
||||
|
||||
After all the above variables being set correctly, we can use following command in cluster/ directory to bring up the whole cluster.
|
||||
|
||||
`$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh`
|
||||
`$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh`
|
||||
|
||||
The scripts automatically scp binaries and config files to all the machines and start the k8s service on them. The only thing you need to do is to type the sudo password when promoted. The current machine name is shown below, so you will not type in the wrong password.
|
||||
|
||||
@ -135,9 +135,9 @@ If all things goes right, you will see the below message from console
|
||||
|
||||
**All done !**
|
||||
|
||||
You can also use `kubectl` command to see if the newly created k8s is working correctly. The `kubectl` binary is under the `cluster/ubuntu/binaries` directory. You can move it into your PATH. Then you can use the below command smoothly.
|
||||
You can also use `kubectl` command to see if the newly created k8s is working correctly. The `kubectl` binary is under the `cluster/ubuntu/binaries` directory. You can move it into your PATH. Then you can use the below command smoothly.
|
||||
|
||||
For example, use `$ kubectl get nodes` to see if all your nodes are in ready status. It may take some time for the nodes ready to use like below.
|
||||
For example, use `$ kubectl get nodes` to see if all your nodes are in ready status. It may take some time for the nodes ready to use like below.
|
||||
|
||||
```console
|
||||
NAME LABELS STATUS
|
||||
@ -192,19 +192,19 @@ We are working on these features which we'd like to let everybody know:
|
||||
|
||||
#### Trouble Shooting
|
||||
|
||||
Generally, what this approach did is quite simple:
|
||||
Generally, what this approach did is quite simple:
|
||||
|
||||
1. Download and copy binaries and configuration files to proper directories on every node
|
||||
|
||||
2. Configure `etcd` using IPs based on input from user
|
||||
2. Configure `etcd` using IPs based on input from user
|
||||
|
||||
3. Create and start flannel network
|
||||
|
||||
So, if you see a problem, **check etcd configuration first**
|
||||
So, if you see a problem, **check etcd configuration first**
|
||||
|
||||
Please try:
|
||||
|
||||
1. Check `/var/log/upstart/etcd.log` for suspicious etcd log
|
||||
1. Check `/var/log/upstart/etcd.log` for suspicious etcd log
|
||||
|
||||
2. Check `/etc/default/etcd`, as we do not have much input validation, a right config should be like:
|
||||
|
||||
@ -212,11 +212,11 @@ Please try:
|
||||
ETCD_OPTS="-name infra1 -initial-advertise-peer-urls <http://ip_of_this_node:2380> -listen-peer-urls <http://ip_of_this_node:2380> -initial-cluster-token etcd-cluster-1 -initial-cluster infra1=<http://ip_of_this_node:2380>,infra2=<http://ip_of_another_node:2380>,infra3=<http://ip_of_another_node:2380> -initial-cluster-state new"
|
||||
```
|
||||
|
||||
3. You can use below command
|
||||
3. You can use below command
|
||||
`$ KUBERNETES_PROVIDER=ubuntu ./kube-down.sh` to bring down the cluster and run
|
||||
`$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh` again to start again.
|
||||
|
||||
4. You can also customize your own settings in `/etc/default/{component_name}` after configured success.
|
||||
|
||||
4. You can also customize your own settings in `/etc/default/{component_name}` after configured success.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -34,7 +34,7 @@ Documentation for other releases can be found at
|
||||
## Abstract
|
||||
|
||||
Auto-scaling is a data-driven feature that allows users to increase or decrease capacity as needed by controlling the
|
||||
number of pods deployed within the system automatically.
|
||||
number of pods deployed within the system automatically.
|
||||
|
||||
## Motivation
|
||||
|
||||
@ -49,7 +49,7 @@ done automatically based on statistical analysis and thresholds.
|
||||
* Scale verb - [1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629)
|
||||
* Config conflicts - [Config](https://github.com/GoogleCloudPlatform/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes)
|
||||
* Rolling updates - [1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353)
|
||||
* Multiple scalable types - [1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624)
|
||||
* Multiple scalable types - [1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624)
|
||||
|
||||
## Constraints and Assumptions
|
||||
|
||||
@ -77,7 +77,7 @@ balanced or situated behind a proxy - the data from those proxies and load balan
|
||||
server traffic for applications. This is the primary, but not sole, source of data for making decisions.
|
||||
|
||||
Within Kubernetes a [kube proxy](../user-guide/services.md#ips-and-vips)
|
||||
running on each node directs service requests to the underlying implementation.
|
||||
running on each node directs service requests to the underlying implementation.
|
||||
|
||||
While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage
|
||||
traffic to backends. OpenShift, for instance, adds a "route" resource for defining external to internal traffic flow.
|
||||
@ -87,7 +87,7 @@ data source for the number of backends.
|
||||
### Scaling based on predictive analysis
|
||||
|
||||
Scaling may also occur based on predictions of system state like anticipated load, historical data, etc. Hand in hand
|
||||
with scaling based on traffic, predictive analysis may be used to determine anticipated system load and scale the application automatically.
|
||||
with scaling based on traffic, predictive analysis may be used to determine anticipated system load and scale the application automatically.
|
||||
|
||||
### Scaling based on arbitrary data
|
||||
|
||||
@ -113,7 +113,7 @@ use a client/cache implementation to receive watch data from the data aggregator
|
||||
scaling the application. Auto-scalers are created and defined like other resources via REST endpoints and belong to the
|
||||
namespace just as a `ReplicationController` or `Service`.
|
||||
|
||||
Since an auto-scaler is a durable object it is best represented as a resource.
|
||||
Since an auto-scaler is a durable object it is best represented as a resource.
|
||||
|
||||
```go
|
||||
//The auto scaler interface
|
||||
@ -241,7 +241,7 @@ be specified as "when requests per second fall below 25 for 30 seconds scale the
|
||||
### Data Aggregator
|
||||
|
||||
This section has intentionally been left empty. I will defer to folks who have more experience gathering and analyzing
|
||||
time series statistics.
|
||||
time series statistics.
|
||||
|
||||
Data aggregation is opaque to the auto-scaler resource. The auto-scaler is configured to use `AutoScaleThresholds`
|
||||
that know how to work with the underlying data in order to know if an application must be scaled up or down. Data aggregation
|
||||
@ -257,7 +257,7 @@ potentially piggyback on this registry.
|
||||
|
||||
If multiple scalable targets satisfy the `TargetSelector` criteria the auto-scaler should be configurable as to which
|
||||
target(s) are scaled. To begin with, if multiple targets are found the auto-scaler will scale the largest target up
|
||||
or down as appropriate. In the future this may be more configurable.
|
||||
or down as appropriate. In the future this may be more configurable.
|
||||
|
||||
### Interactions with a deployment
|
||||
|
||||
@ -266,12 +266,12 @@ there will be multiple replication controllers, with one scaling up and another
|
||||
auto-scaler must be aware of the entire set of capacity that backs a service so it does not fight with the deployer. `AutoScalerSpec.MonitorSelector`
|
||||
is what provides this ability. By using a selector that spans the entire service the auto-scaler can monitor capacity
|
||||
of multiple replication controllers and check that capacity against the `AutoScalerSpec.MaxAutoScaleCount` and
|
||||
`AutoScalerSpec.MinAutoScaleCount` while still only targeting a specific set of `ReplicationController`s with `TargetSelector`.
|
||||
`AutoScalerSpec.MinAutoScaleCount` while still only targeting a specific set of `ReplicationController`s with `TargetSelector`.
|
||||
|
||||
In the course of a deployment it is up to the deployment orchestration to decide how to manage the labels
|
||||
on the replication controllers if it needs to ensure that only specific replication controllers are targeted by
|
||||
the auto-scaler. By default, the auto-scaler will scale the largest replication controller that meets the target label
|
||||
selector criteria.
|
||||
selector criteria.
|
||||
|
||||
During deployment orchestration the auto-scaler may be making decisions to scale its target up or down. In order to prevent
|
||||
the scaler from fighting with a deployment process that is scaling one replication controller up and scaling another one
|
||||
|
@ -31,17 +31,17 @@ Documentation for other releases can be found at
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Kubernetes Cluster Federation
|
||||
# Kubernetes Cluster Federation
|
||||
|
||||
## (a.k.a. "Ubernetes")
|
||||
|
||||
## Requirements Analysis and Product Proposal
|
||||
|
||||
## _by Quinton Hoole ([quinton@google.com](mailto:quinton@google.com))_
|
||||
## _by Quinton Hoole ([quinton@google.com](mailto:quinton@google.com))_
|
||||
|
||||
_Initial revision: 2015-03-05_
|
||||
_Last updated: 2015-03-09_
|
||||
This doc: [tinyurl.com/ubernetesv2](http://tinyurl.com/ubernetesv2)
|
||||
_Initial revision: 2015-03-05_
|
||||
_Last updated: 2015-03-09_
|
||||
This doc: [tinyurl.com/ubernetesv2](http://tinyurl.com/ubernetesv2)
|
||||
Slides: [tinyurl.com/ubernetes-slides](http://tinyurl.com/ubernetes-slides)
|
||||
|
||||
## Introduction
|
||||
@ -89,11 +89,11 @@ loosely speaking, a cluster can be thought of as running in a single
|
||||
data center, or cloud provider availability zone, a more precise
|
||||
definition is that each cluster provides:
|
||||
|
||||
1. a single Kubernetes API entry point,
|
||||
1. a single Kubernetes API entry point,
|
||||
1. a consistent, cluster-wide resource naming scheme
|
||||
1. a scheduling/container placement domain
|
||||
1. a service network routing domain
|
||||
1. (in future) an authentication and authorization model.
|
||||
1. (in future) an authentication and authorization model.
|
||||
1. ....
|
||||
|
||||
The above in turn imply the need for a relatively performant, reliable
|
||||
@ -220,7 +220,7 @@ the multi-cloud provider implementation should just work for a single
|
||||
cloud provider). Propose high-level design catering for both, with
|
||||
initial implementation targeting single cloud provider only.
|
||||
|
||||
**Clarifying questions:**
|
||||
**Clarifying questions:**
|
||||
**How does global external service discovery work?** In the steady
|
||||
state, which external clients connect to which clusters? GeoDNS or
|
||||
similar? What is the tolerable failover latency if a cluster goes
|
||||
@ -266,8 +266,8 @@ Doing nothing (i.e. forcing users to choose between 1 and 2 on their
|
||||
own) is probably an OK starting point. Kubernetes autoscaling can get
|
||||
us to 3 at some later date.
|
||||
|
||||
Up to this point, this use case ("Unavailability Zones") seems materially different from all the others above. It does not require dynamic cross-cluster service migration (we assume that the service is already running in more than one cluster when the failure occurs). Nor does it necessarily involve cross-cluster service discovery or location affinity. As a result, I propose that we address this use case somewhat independently of the others (although I strongly suspect that it will become substantially easier once we've solved the others).
|
||||
|
||||
Up to this point, this use case ("Unavailability Zones") seems materially different from all the others above. It does not require dynamic cross-cluster service migration (we assume that the service is already running in more than one cluster when the failure occurs). Nor does it necessarily involve cross-cluster service discovery or location affinity. As a result, I propose that we address this use case somewhat independently of the others (although I strongly suspect that it will become substantially easier once we've solved the others).
|
||||
|
||||
All of the above (regarding "Unavailibility Zones") refers primarily
|
||||
to already-running user-facing services, and minimizing the impact on
|
||||
end users of those services becoming unavailable in a given cluster.
|
||||
@ -322,7 +322,7 @@ location affinity:
|
||||
(other than the source of YouTube videos, which is assumed to be
|
||||
equally remote from all clusters in this example). Each pod can be
|
||||
scheduled independently, in any cluster, and moved at any time.
|
||||
1. **"Preferentially Coupled"**: Somewhere between Coupled and Decoupled. These applications prefer to have all of their pods located in the same cluster (e.g. for failure correlation, network latency or bandwidth cost reasons), but can tolerate being partitioned for "short" periods of time (for example while migrating the application from one cluster to another). Most small to medium sized LAMP stacks with not-very-strict latency goals probably fall into this category (provided that they use sane service discovery and reconnect-on-fail, which they need to do anyway to run effectively, even in a single Kubernetes cluster).
|
||||
1. **"Preferentially Coupled"**: Somewhere between Coupled and Decoupled. These applications prefer to have all of their pods located in the same cluster (e.g. for failure correlation, network latency or bandwidth cost reasons), but can tolerate being partitioned for "short" periods of time (for example while migrating the application from one cluster to another). Most small to medium sized LAMP stacks with not-very-strict latency goals probably fall into this category (provided that they use sane service discovery and reconnect-on-fail, which they need to do anyway to run effectively, even in a single Kubernetes cluster).
|
||||
|
||||
And then there's what I'll call _absolute_ location affinity. Some
|
||||
applications are required to run in bounded geographical or network
|
||||
@ -341,7 +341,7 @@ of our users are in Western Europe, U.S. West Coast" etc).
|
||||
|
||||
## Cross-cluster service discovery
|
||||
|
||||
I propose having pods use standard discovery methods used by external clients of Kubernetes applications (i.e. DNS). DNS might resolve to a public endpoint in the local or a remote cluster. Other than Strictly Coupled applications, software should be largely oblivious of which of the two occurs.
|
||||
I propose having pods use standard discovery methods used by external clients of Kubernetes applications (i.e. DNS). DNS might resolve to a public endpoint in the local or a remote cluster. Other than Strictly Coupled applications, software should be largely oblivious of which of the two occurs.
|
||||
_Aside:_ How do we avoid "tromboning" through an external VIP when DNS
|
||||
resolves to a public IP on the local cluster? Strictly speaking this
|
||||
would be an optimization, and probably only matters to high bandwidth,
|
||||
@ -384,15 +384,15 @@ such events include:
|
||||
1. A change of scheduling policy ("we no longer use cloud provider X").
|
||||
1. A change of resource pricing ("cloud provider Y dropped their prices - lets migrate there").
|
||||
|
||||
Strictly Decoupled applications can be trivially moved, in part or in whole, one pod at a time, to one or more clusters.
|
||||
For Preferentially Decoupled applications, the federation system must first locate a single cluster with sufficient capacity to accommodate the entire application, then reserve that capacity, and incrementally move the application, one (or more) resources at a time, over to the new cluster, within some bounded time period (and possibly within a predefined "maintenance" window).
|
||||
Strictly Decoupled applications can be trivially moved, in part or in whole, one pod at a time, to one or more clusters.
|
||||
For Preferentially Decoupled applications, the federation system must first locate a single cluster with sufficient capacity to accommodate the entire application, then reserve that capacity, and incrementally move the application, one (or more) resources at a time, over to the new cluster, within some bounded time period (and possibly within a predefined "maintenance" window).
|
||||
Strictly Coupled applications (with the exception of those deemed
|
||||
completely immovable) require the federation system to:
|
||||
|
||||
1. start up an entire replica application in the destination cluster
|
||||
1. copy persistent data to the new application instance
|
||||
1. switch traffic across
|
||||
1. tear down the original application instance
|
||||
1. tear down the original application instance
|
||||
|
||||
It is proposed that support for automated migration of Strictly Coupled applications be
|
||||
deferred to a later date.
|
||||
@ -422,11 +422,11 @@ TBD: All very hand-wavey still, but some initial thoughts to get the conversatio
|
||||
|
||||
## Ubernetes API
|
||||
|
||||
This looks a lot like the existing Kubernetes API but is explicitly multi-cluster.
|
||||
This looks a lot like the existing Kubernetes API but is explicitly multi-cluster.
|
||||
|
||||
+ Clusters become first class objects, which can be registered, listed, described, deregistered etc via the API.
|
||||
+ Compute resources can be explicitly requested in specific clusters, or automatically scheduled to the "best" cluster by Ubernetes (by a pluggable Policy Engine).
|
||||
+ There is a federated equivalent of a replication controller type, which is multicluster-aware, and delegates to cluster-specific replication controllers as required (e.g. a federated RC for n replicas might simply spawn multiple replication controllers in different clusters to do the hard work).
|
||||
+ Clusters become first class objects, which can be registered, listed, described, deregistered etc via the API.
|
||||
+ Compute resources can be explicitly requested in specific clusters, or automatically scheduled to the "best" cluster by Ubernetes (by a pluggable Policy Engine).
|
||||
+ There is a federated equivalent of a replication controller type, which is multicluster-aware, and delegates to cluster-specific replication controllers as required (e.g. a federated RC for n replicas might simply spawn multiple replication controllers in different clusters to do the hard work).
|
||||
+ These federated replication controllers (and in fact all the
|
||||
services comprising the Ubernetes Control Plane) have to run
|
||||
somewhere. For high availability Ubernetes deployments, these
|
||||
|
@ -33,21 +33,21 @@ Documentation for other releases can be found at
|
||||
|
||||
# Security
|
||||
|
||||
If you believe you have discovered a vulnerability or a have a security incident to report, please follow the steps below. This applies to Kubernetes releases v1.0 or later.
|
||||
If you believe you have discovered a vulnerability or a have a security incident to report, please follow the steps below. This applies to Kubernetes releases v1.0 or later.
|
||||
|
||||
To watch for security and major API announcements, please join our [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce) group.
|
||||
To watch for security and major API announcements, please join our [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce) group.
|
||||
|
||||
## Reporting a security issue
|
||||
|
||||
To report an issue, please:
|
||||
- Submit a bug report [here](http://goo.gl/vulnz).
|
||||
- Select “I want to report a technical security bug in a Google product (SQLi, XSS, etc.).”
|
||||
- Select “Other” as the Application Type.
|
||||
- Select “Other” as the Application Type.
|
||||
- Under reproduction steps, please additionally include
|
||||
- the words "Kubernetes Security issue"
|
||||
- Description of the issue
|
||||
- Kubernetes release (e.g. output of `kubectl version` command, which includes server version.)
|
||||
- Environment setup (e.g. which "Getting Started Guide" you followed, if any; what node operating system used; what service or software creates your virtual machines, if any)
|
||||
- Environment setup (e.g. which "Getting Started Guide" you followed, if any; what node operating system used; what service or software creates your virtual machines, if any)
|
||||
|
||||
An online submission will have the fastest response; however, if you prefer email, please send mail to security@google.com. If you feel the need, please use the [PGP public key](https://services.google.com/corporate/publickey.txt) to encrypt communications.
|
||||
|
||||
|
@ -150,7 +150,7 @@ There are [client libraries](../devel/client-libraries.md) for accessing the API
|
||||
from several languages. The Kubernetes project-supported
|
||||
[Go](http://releases.k8s.io/HEAD/pkg/client/)
|
||||
client library can use the same [kubeconfig file](kubeconfig-file.md)
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver.
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver.
|
||||
|
||||
See documentation for other libraries for how they authenticate.
|
||||
|
||||
@ -241,7 +241,7 @@ at `https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticse
|
||||
|
||||
#### Manually constructing apiserver proxy URLs
|
||||
|
||||
As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL:
|
||||
As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL:
|
||||
`http://`*`kubernetes_master_address`*`/`*`service_path`*`/`*`service_name`*`/`*`service_endpoint-suffix-parameter`*
|
||||
<!--- TODO: update this part of doc because it doesn't seem to be valid. What
|
||||
about namespaces? 'proxy' verb? -->
|
||||
@ -297,7 +297,7 @@ There are several different proxies you may encounter when using Kubernetes:
|
||||
- can be used to reach a Node, Pod, or Service
|
||||
- does load balancing when used to reach a Service
|
||||
1. The [kube proxy](services.md#ips-and-vips):
|
||||
- runs on each node
|
||||
- runs on each node
|
||||
- proxies UDP and TCP
|
||||
- does not understand HTTP
|
||||
- provides load balancing
|
||||
|
@ -87,7 +87,7 @@ there are insufficient resources of one type or another that prevent scheduling.
|
||||
your pod. Reasons include:
|
||||
|
||||
* **You don't have enough resources**: You may have exhausted the supply of CPU or Memory in your cluster, in this case
|
||||
you need to delete Pods, adjust resource requests, or add new nodes to your cluster. See [Compute Resources document](compute-resources.md#my-pods-are-pending-with-event-message-failedscheduling) for more information.
|
||||
you need to delete Pods, adjust resource requests, or add new nodes to your cluster. See [Compute Resources document](compute-resources.md#my-pods-are-pending-with-event-message-failedscheduling) for more information.
|
||||
|
||||
* **You are using `hostPort`**: When you bind a Pod to a `hostPort` there are a limited number of places that pod can be
|
||||
scheduled. In most cases, `hostPort` is unnecessary, try using a Service object to expose your Pod. If you do require
|
||||
@ -100,7 +100,7 @@ If a Pod is stuck in the `Waiting` state, then it has been scheduled to a worker
|
||||
Again, the information from `kubectl describe ...` should be informative. The most common cause of `Waiting` pods is a failure to pull the image. There are three things to check:
|
||||
* Make sure that you have the name of the image correct
|
||||
* Have you pushed the image to the repository?
|
||||
* Run a manual `docker pull <image>` on your machine to see if the image can be pulled.
|
||||
* Run a manual `docker pull <image>` on your machine to see if the image can be pulled.
|
||||
|
||||
#### My pod is crashing or otherwise unhealthy
|
||||
|
||||
@ -139,7 +139,7 @@ feature request on GitHub describing your use case and why these tools are insuf
|
||||
### Debugging Replication Controllers
|
||||
|
||||
Replication controllers are fairly straightforward. They can either create Pods or they can't. If they can't
|
||||
create pods, then please refer to the [instructions above](#debugging-pods) to debug your pods.
|
||||
create pods, then please refer to the [instructions above](#debugging-pods) to debug your pods.
|
||||
|
||||
You can also use `kubectl describe rc ${CONTROLLER_NAME}` to introspect events related to the replication
|
||||
controller.
|
||||
@ -199,11 +199,11 @@ check:
|
||||
* Can you connect to your pods directly? Get the IP address for the Pod, and try to connect directly to that IP
|
||||
* Is your application serving on the port that you configured? Kubernetes doesn't do port remapping, so if your application serves on 8080, the `containerPort` field needs to be 8080.
|
||||
|
||||
#### More information
|
||||
#### More information
|
||||
|
||||
If none of the above solves your problem, follow the instructions in [Debugging Service document](debugging-services.md) to make sure that your `Service` is running, has `Endpoints`, and your `Pods` are actually serving; you have DNS working, iptables rules installed, and kube-proxy does not seem to be misbehaving.
|
||||
If none of the above solves your problem, follow the instructions in [Debugging Service document](debugging-services.md) to make sure that your `Service` is running, has `Endpoints`, and your `Pods` are actually serving; you have DNS working, iptables rules installed, and kube-proxy does not seem to be misbehaving.
|
||||
|
||||
You may also visit [troubleshooting document](../troubleshooting.md) for more information.
|
||||
You may also visit [troubleshooting document](../troubleshooting.md) for more information.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -133,7 +133,7 @@ When using Docker:
|
||||
**TODO: document behavior for rkt**
|
||||
|
||||
If a container exceeds its memory limit, it may be terminated. If it is restartable, it will be
|
||||
restarted by kubelet, as will any other type of runtime failure.
|
||||
restarted by kubelet, as will any other type of runtime failure.
|
||||
|
||||
A container may or may not be allowed to exceed its CPU limit for extended periods of time.
|
||||
However, it will not be killed for excessive CPU usage.
|
||||
@ -178,7 +178,7 @@ The [resource quota](../admin/resource-quota.md) feature can be configured
|
||||
to limit the total amount of resources that can be consumed. If used in conjunction
|
||||
with namespaces, it can prevent one team from hogging all the resources.
|
||||
|
||||
### My container is terminated
|
||||
### My container is terminated
|
||||
|
||||
Your container may be terminated because it's resource-starved. To check if a container is being killed because it is hitting a resource limit, call `kubectl describe pod`
|
||||
on the pod you are interested in:
|
||||
|
@ -35,7 +35,7 @@ Documentation for other releases can be found at
|
||||
|
||||
This document is meant to highlight and consolidate in one place configuration best practices that are introduced throughout the user-guide and getting-started documentation and examples. This is a living document so if you think of something that is not on this list but might be useful to others, please don't hesitate to file an issue or submit a PR.
|
||||
|
||||
1. When writing configuration, use the latest stable API version (currently v1).
|
||||
1. When writing configuration, use the latest stable API version (currently v1).
|
||||
1. Configuration should be stored in version control before being pushed to the cluster. This allows configuration to be quickly rolled back if needed and will aid with cluster re-creation and restoration if the worst were to happen.
|
||||
1. Use YAML rather than JSON. They can be used interchangeably in almost all scenarios but YAML tends to be more user-friendly for config.
|
||||
1. Group related objects together in a single file. This is often better than separate files.
|
||||
|
@ -73,7 +73,7 @@ spec: # specification of the pod’s contents
|
||||
|
||||
The value of `metadata.name`, `hello-world`, will be the name of the pod resource created, and must be unique within the cluster, whereas `containers[0].name` is just a nickname for the container within that pod. `image` is the name of the Docker image, which Kubernetes expects to be able to pull from a registry, the [Docker Hub](https://registry.hub.docker.com/) by default.
|
||||
|
||||
`restartPolicy: Never` indicates that we just want to run the container once and then terminate the pod.
|
||||
`restartPolicy: Never` indicates that we just want to run the container once and then terminate the pod.
|
||||
|
||||
The [`command`](containers.md#containers-and-commands) overrides the Docker container’s `Entrypoint`. Command arguments (corresponding to Docker’s `Cmd`) may be specified using `args`, as follows:
|
||||
|
||||
@ -142,7 +142,7 @@ However, a shell isn’t necessary just to expand environment variables. Kuberne
|
||||
|
||||
## Viewing pod status
|
||||
|
||||
You can see the pod you created (actually all of your cluster's pods) using the `get` command.
|
||||
You can see the pod you created (actually all of your cluster's pods) using the `get` command.
|
||||
|
||||
If you’re quick, it will look as follows:
|
||||
|
||||
@ -199,7 +199,7 @@ $ kubectl delete pods/hello-world
|
||||
pods/hello-world
|
||||
```
|
||||
|
||||
Terminated pods aren’t currently automatically deleted, so that you can observe their final status, so be sure to clean up your dead pods.
|
||||
Terminated pods aren’t currently automatically deleted, so that you can observe their final status, so be sure to clean up your dead pods.
|
||||
|
||||
On the other hand, containers and their logs are eventually deleted automatically in order to free up disk space on the nodes.
|
||||
|
||||
|
@ -52,10 +52,10 @@ Documentation for other releases can be found at
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode). In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster.
|
||||
This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode). In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster.
|
||||
|
||||
This cluster information makes it possible to build applications that are *cluster aware*.
|
||||
Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers. Container hooks are somewhat analogous to operating system signals in a traditional process model. However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster. Containers that participate in this cluster lifecycle become *cluster native*.
|
||||
This cluster information makes it possible to build applications that are *cluster aware*.
|
||||
Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers. Container hooks are somewhat analogous to operating system signals in a traditional process model. However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster. Containers that participate in this cluster lifecycle become *cluster native*.
|
||||
|
||||
Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](images.md) and one or more [volumes](volumes.md).
|
||||
|
||||
@ -89,7 +89,7 @@ Services have dedicated IP address, and are also surfaced to the container via D
|
||||
|
||||
*NB*: Container hooks are under active development, we anticipate adding additional hooks as the Kubernetes container management system evolves.*
|
||||
|
||||
Container hooks provide information to the container about events in its management lifecycle. For example, immediately after a container is started, it receives a *PostStart* hook. These hooks are broadcast *into* the container with information about the life-cycle of the container. They are different from the events provided by Docker and other systems which are *output* from the container. Output events provide a log of what has already happened. Input hooks provide real-time notification about things that are happening, but no historical log.
|
||||
Container hooks provide information to the container about events in its management lifecycle. For example, immediately after a container is started, it receives a *PostStart* hook. These hooks are broadcast *into* the container with information about the life-cycle of the container. They are different from the events provided by Docker and other systems which are *output* from the container. Output events provide a log of what has already happened. Input hooks provide real-time notification about things that are happening, but no historical log.
|
||||
|
||||
### Hook Details
|
||||
|
||||
|
@ -48,7 +48,7 @@ we can use:
|
||||
Docker images have metadata associated with them that is used to store information about the image.
|
||||
The image author may use this to define defaults for the command and arguments to run a container
|
||||
when the user does not supply values. Docker calls the fields for commands and arguments
|
||||
`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to
|
||||
`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to
|
||||
describe here, mostly due to the fact that the docker API allows users to specify both of these
|
||||
fields as either a string array or a string and there are subtle differences in how those cases are
|
||||
handled. We encourage the curious to check out [docker's documentation]() for this feature.
|
||||
@ -69,10 +69,10 @@ Here are examples for these rules in table format
|
||||
|
||||
| Image `Entrypoint` | Image `Cmd` | Container `Command` | Container `Args` | Command Run |
|
||||
|--------------------|------------------|---------------------|--------------------|------------------|
|
||||
| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` |
|
||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` |
|
||||
| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` |
|
||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` |
|
||||
| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` |
|
||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` |
|
||||
| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` |
|
||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` |
|
||||
|
||||
|
||||
## Capabilities
|
||||
|
@ -552,7 +552,7 @@ Contact us on
|
||||
|
||||
## More information
|
||||
|
||||
Visit [troubleshooting document](../troubleshooting.md) for more information.
|
||||
Visit [troubleshooting document](../troubleshooting.md) for more information.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@ -105,7 +105,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](downward-api/dapi-pod.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE downward-api/dapi-pod.yaml -->
|
||||
|
||||
Some more thorough examples:
|
||||
* [environment variables](environment-guide/)
|
||||
|
@ -53,7 +53,7 @@ NAME READY REASON RESTARTS AGE
|
||||
redis-master-ft9ex 1/1 Running 0 12s
|
||||
```
|
||||
|
||||
then we can check the environment variables of the pod,
|
||||
then we can check the environment variables of the pod,
|
||||
|
||||
```console
|
||||
$ kubectl exec redis-master-ft9ex env
|
||||
|
@ -68,7 +68,7 @@ Credentials can be provided in several ways:
|
||||
- Per-cluster
|
||||
- automatically configured on Google Compute Engine or Google Container Engine
|
||||
- all pods can read the project's private registry
|
||||
- Configuring Nodes to Authenticate to a Private Registry
|
||||
- Configuring Nodes to Authenticate to a Private Registry
|
||||
- all pods can read any configured private registries
|
||||
- requires node configuration by cluster administrator
|
||||
- Pre-pulling Images
|
||||
@ -77,7 +77,7 @@ Credentials can be provided in several ways:
|
||||
- Specifying ImagePullSecrets on a Pod
|
||||
- only pods which provide own keys can access the private registry
|
||||
Each option is described in more detail below.
|
||||
|
||||
|
||||
|
||||
### Using Google Container Registry
|
||||
|
||||
@ -101,7 +101,7 @@ with credentials for Google Container Registry. You cannot use this approach.
|
||||
**Note:** this approach is suitable if you can control node configuration. It
|
||||
will not work reliably on GCE, and any other cloud provider that does automatic
|
||||
node replacement.
|
||||
|
||||
|
||||
Docker stores keys for private registries in the `$HOME/.dockercfg` file. If you put this
|
||||
in the `$HOME` of `root` on a kubelet, then docker will use it.
|
||||
|
||||
@ -109,7 +109,7 @@ Here are the recommended steps to configuring your nodes to use a private regist
|
||||
example, run these on your desktop/laptop:
|
||||
1. run `docker login [server]` for each set of credentials you want to use.
|
||||
1. view `$HOME/.dockercfg` in an editor to ensure it contains just the credentials you want to use.
|
||||
1. get a list of your nodes
|
||||
1. get a list of your nodes
|
||||
- for example: `nodes=$(kubectl get nodes -o template --template='{{range.items}}{{.metadata.name}} {{end}}')`
|
||||
1. copy your local `.dockercfg` to the home directory of root on each node.
|
||||
- for example: `for n in $nodes; do scp ~/.dockercfg root@$n:/root/.dockercfg; done`
|
||||
@ -218,7 +218,7 @@ secrets/myregistrykey
|
||||
$
|
||||
```
|
||||
|
||||
If you get the error message `error: no objects passed to create`, it may mean the base64 encoded string is invalid.
|
||||
If you get the error message `error: no objects passed to create`, it may mean the base64 encoded string is invalid.
|
||||
If you get an error message like `Secret "myregistrykey" is invalid: data[.dockercfg]: invalid value ...` it means
|
||||
the data was successfully un-base64 encoded, but could not be parsed as a dockercfg file.
|
||||
|
||||
|
@ -138,7 +138,7 @@ Lastly, you see a log of recent events related to your Pod. The system compresse
|
||||
|
||||
## Example: debugging Pending Pods
|
||||
|
||||
A common scenario that you can detect using events is when you’ve created a Pod that won’t fit on any node. For example, the Pod might request more resources than are free on any node, or it might specify a label selector that doesn’t match any nodes. Let’s say we created the previous Replication Controller with 5 replicas (instead of 2) and requesting 600 millicores instead of 500, on a four-node cluster where each (virtual) machine has 1 CPU. In that case one of the Pods will not be able to schedule. (Note that because of the cluster addon pods such as fluentd, skydns, etc., that run on each node, if we requested 1000 millicores then none of the Pods would be able to schedule.)
|
||||
A common scenario that you can detect using events is when you’ve created a Pod that won’t fit on any node. For example, the Pod might request more resources than are free on any node, or it might specify a label selector that doesn’t match any nodes. Let’s say we created the previous Replication Controller with 5 replicas (instead of 2) and requesting 600 millicores instead of 500, on a four-node cluster where each (virtual) machine has 1 CPU. In that case one of the Pods will not be able to schedule. (Note that because of the cluster addon pods such as fluentd, skydns, etc., that run on each node, if we requested 1000 millicores then none of the Pods would be able to schedule.)
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
|
@ -59,7 +59,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](../../examples/blog-logging/counter-pod.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
|
||||
|
||||
we can run the pod:
|
||||
|
||||
|
@ -157,7 +157,7 @@ my-nginx-svc app=nginx app=nginx 10.0.152.174 80/TCP
|
||||
|
||||
## Using labels effectively
|
||||
|
||||
The examples we’ve used so far apply at most a single label to any resource. There are many scenarios where multiple labels should be used to distinguish sets from one another.
|
||||
The examples we’ve used so far apply at most a single label to any resource. There are many scenarios where multiple labels should be used to distinguish sets from one another.
|
||||
|
||||
For instance, different applications would use different values for the `app` label, but a multi-tier application, such as the [guestbook example](../../examples/guestbook/), would additionally need to distinguish each tier. The frontend could carry the following labels:
|
||||
|
||||
@ -279,7 +279,7 @@ my-nginx-o0ef1 1/1 Running 0 1h
|
||||
|
||||
At some point, you’ll eventually need to update your deployed application, typically by specifying a new image or image tag, as in the canary deployment scenario above. `kubectl` supports several update operations, each of which is applicable to different scenarios.
|
||||
|
||||
To update a service without an outage, `kubectl` supports what is called [“rolling update”](kubectl/kubectl_rolling-update.md), which updates one pod at a time, rather than taking down the entire service at the same time. See the [rolling update design document](../design/simple-rolling-update.md) and the [example of rolling update](update-demo/) for more information.
|
||||
To update a service without an outage, `kubectl` supports what is called [“rolling update”](kubectl/kubectl_rolling-update.md), which updates one pod at a time, rather than taking down the entire service at the same time. See the [rolling update design document](../design/simple-rolling-update.md) and the [example of rolling update](update-demo/) for more information.
|
||||
|
||||
Let’s say you were running version 1.7.9 of nginx:
|
||||
|
||||
|
@ -59,7 +59,7 @@ The Kubelet acts as a bridge between the Kubernetes master and the nodes. It man
|
||||
|
||||
### InfluxDB and Grafana
|
||||
|
||||
A Grafana setup with InfluxDB is a very popular combination for monitoring in the open source world. InfluxDB exposes an easy to use API to write and fetch time series data. Heapster is setup to use this storage backend by default on most Kubernetes clusters. A detailed setup guide can be found [here](https://github.com/GoogleCloudPlatform/heapster/blob/master/docs/influxdb.md). InfluxDB and Grafana run in Pods. The pod exposes itself as a Kubernetes service which is how Heapster discovers it.
|
||||
A Grafana setup with InfluxDB is a very popular combination for monitoring in the open source world. InfluxDB exposes an easy to use API to write and fetch time series data. Heapster is setup to use this storage backend by default on most Kubernetes clusters. A detailed setup guide can be found [here](https://github.com/GoogleCloudPlatform/heapster/blob/master/docs/influxdb.md). InfluxDB and Grafana run in Pods. The pod exposes itself as a Kubernetes service which is how Heapster discovers it.
|
||||
|
||||
The Grafana container serves Grafana’s UI which provides an easy to configure dashboard interface. The default dashboard for Kubernetes contains an example dashboard that monitors resource usage of the cluster and the pods inside of it. This dashboard can easily be customized and expanded. Take a look at the storage schema for InfluxDB [here](https://github.com/GoogleCloudPlatform/heapster/blob/master/docs/storage-schema.md#metrics).
|
||||
|
||||
@ -88,7 +88,7 @@ Here is a snapshot of the a Google Cloud Monitoring dashboard showing cluster-wi
|
||||
Now that you’ve learned a bit about Heapster, feel free to try it out on your own clusters! The [Heapster repository](https://github.com/GoogleCloudPlatform/heapster) is available on GitHub. It contains detailed instructions to setup Heapster and its storage backends. Heapster runs by default on most Kubernetes clusters, so you may already have it! Feedback is always welcome. Please let us know if you run into any issues. Heapster and Kubernetes developers hang out in the [#google-containers](http://webchat.freenode.net/?channels=google-containers) IRC channel on freenode.net. You can also reach us on the [google-containers Google Groups mailing list](https://groups.google.com/forum/#!forum/google-containers).
|
||||
|
||||
***
|
||||
*Authors: Vishnu Kannan and Victor Marmol, Google Software Engineers.*
|
||||
*Authors: Vishnu Kannan and Victor Marmol, Google Software Engineers.*
|
||||
*This article was originally posted in [Kubernetes blog](http://blog.kubernetes.io/2015/05/resource-usage-monitoring-kubernetes.html).*
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ Documentation for other releases can be found at
|
||||
|
||||
Kubernetes is an open-source system for managing containerized applications across multiple hosts in a cluster. Kubernetes is intended to make deploying containerized/microservice-based applications easy but powerful.
|
||||
|
||||
Kubernetes provides mechanisms for application deployment, scheduling, updating, maintenance, and scaling. A key feature of Kubernetes is that it actively manages the containers to ensure that the state of the cluster continually matches the user's intentions. An operations user should be able to launch a micro-service, letting the scheduler find the right placement. We also want to improve the tools and experience for how users can roll-out applications through patterns like canary deployments.
|
||||
Kubernetes provides mechanisms for application deployment, scheduling, updating, maintenance, and scaling. A key feature of Kubernetes is that it actively manages the containers to ensure that the state of the cluster continually matches the user's intentions. An operations user should be able to launch a micro-service, letting the scheduler find the right placement. We also want to improve the tools and experience for how users can roll-out applications through patterns like canary deployments.
|
||||
|
||||
Kubernetes supports [Docker](http://www.docker.io) and [Rocket](https://coreos.com/blog/rocket/) containers, and other container image formats and container runtimes will be supported in the future.
|
||||
|
||||
@ -45,7 +45,7 @@ In Kubernetes, all containers run inside [pods](pods.md). A pod can host a singl
|
||||
|
||||
Users can create and manage pods themselves, but Kubernetes drastically simplifies system management by allowing users to delegate two common pod-related activities: deploying multiple pod replicas based on the same pod configuration, and creating replacement pods when a pod or its machine fails. The Kubernetes API object that manages these behaviors is called a [replication controller](replication-controller.md). It defines a pod in terms of a template, that the system then instantiates as some number of pods (specified by the user). The replicated set of pods might constitute an entire application, a micro-service, or one layer in a multi-tier application. Once the pods are created, the system continually monitors their health and that of the machines they are running on; if a pod fails due to a software problem or machine failure, the replication controller automatically creates a new pod on a healthy machine, to maintain the set of pods at the desired replication level. Multiple pods from the same or different applications can share the same machine. Note that a replication controller is needed even in the case of a single non-replicated pod if the user wants it to be re-created when it or its machine fails.
|
||||
|
||||
Frequently it is useful to refer to a set of pods, for example to limit the set of pods on which a mutating operation should be performed, or that should be queried for status. As a general mechanism, users can attach to most Kubernetes API objects arbitrary key-value pairs called [labels](labels.md), and then use a set of label selectors (key-value queries over labels) to constrain the target of API operations. Each resource also has a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object, called [annotations](annotations.md).
|
||||
Frequently it is useful to refer to a set of pods, for example to limit the set of pods on which a mutating operation should be performed, or that should be queried for status. As a general mechanism, users can attach to most Kubernetes API objects arbitrary key-value pairs called [labels](labels.md), and then use a set of label selectors (key-value queries over labels) to constrain the target of API operations. Each resource also has a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object, called [annotations](annotations.md).
|
||||
|
||||
Kubernetes supports a unique [networking model](../admin/networking.md). Kubernetes encourages a flat address space and does not dynamically allocate ports, instead allowing users to select whichever ports are convenient for them. To achieve this, it allocates an IP address for each pod.
|
||||
|
||||
|
@ -65,7 +65,7 @@ Managing storage is a distinct problem from managing compute. The `PersistentVol
|
||||
|
||||
A `PersistentVolume` (PV) is a piece of networked storage in the cluster that has been provisioned by an administrator. It is a resource in the cluster just like a node is a cluster resource. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual pod that uses the PV. This API object captures the details of the implementation of the storage, be that NFS, iSCSI, or a cloud-provider-specific storage system.
|
||||
|
||||
A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g, can be mounted once read/write or many times read-only).
|
||||
A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g, can be mounted once read/write or many times read-only).
|
||||
|
||||
Please see the [detailed walkthrough with working examples](persistent-volumes/).
|
||||
|
||||
@ -75,7 +75,7 @@ Please see the [detailed walkthrough with working examples](persistent-volumes/)
|
||||
PVs are resources in the cluster. PVC are requests for those resources and also act as claim checks to the resource. The interaction between PVs and PVCs follows this lifecycle:
|
||||
|
||||
### Provisioning
|
||||
|
||||
|
||||
A cluster administrator creates some number of PVs. They carry the details of the real storage that is available for use by cluster users. They exist in the Kubernetes API and are available for consumption.
|
||||
|
||||
### Binding
|
||||
@ -113,7 +113,7 @@ A `PersistentVolume's` reclaim policy tells the cluster what to do with the volu
|
||||
|
||||
## Persistent Volumes
|
||||
|
||||
Each PV contains a spec and status, which is the specification and status of the volume.
|
||||
Each PV contains a spec and status, which is the specification and status of the volume.
|
||||
|
||||
|
||||
```yaml
|
||||
|
@ -38,7 +38,7 @@ nginx serving content from your persistent volume.
|
||||
|
||||
This guide assumes knowledge of Kubernetes fundamentals and that you have a cluster up and running.
|
||||
|
||||
See [Persistent Storage design document](../../design/persistent-storage.md) for more information.
|
||||
See [Persistent Storage design document](../../design/persistent-storage.md) for more information.
|
||||
|
||||
## Provisioning
|
||||
|
||||
@ -51,7 +51,7 @@ for ease of development and testing. You'll create a local `HostPath` for this
|
||||
> IMPORTANT! For `HostPath` to work, you will need to run a single node cluster. Kubernetes does not
|
||||
support local storage on the host at this time. There is no guarantee your pod ends up on the correct node where the `HostPath` resides.
|
||||
|
||||
|
||||
|
||||
|
||||
```console
|
||||
# This will be nginx's webroot
|
||||
@ -70,7 +70,7 @@ pv0001 type=local 10737418240 RWO Available
|
||||
## Requesting storage
|
||||
|
||||
Users of Kubernetes request persistent storage for their pods. They don't know how the underlying cluster is provisioned.
|
||||
They just know they can rely on their claim to storage and can manage its lifecycle independently from the many pods that may use it.
|
||||
They just know they can rely on their claim to storage and can manage its lifecycle independently from the many pods that may use it.
|
||||
|
||||
Claims must be created in the same namespace as the pods that use them.
|
||||
|
||||
@ -114,7 +114,7 @@ kubernetes component=apiserver,provider=kubernetes <none>
|
||||
|
||||
## Next steps
|
||||
|
||||
You should be able to query your service endpoint and see what content nginx is serving. A "forbidden" error might mean you
|
||||
You should be able to query your service endpoint and see what content nginx is serving. A "forbidden" error might mean you
|
||||
need to disable SELinux (setenforce 0).
|
||||
|
||||
```console
|
||||
|
@ -93,22 +93,22 @@ That approach would provide co-location, but would not provide most of the benef
|
||||
|
||||
## Durability of pods (or lack thereof)
|
||||
|
||||
Pods aren't intended to be treated as durable [pets](https://blog.engineyard.com/2014/pets-vs-cattle). They won't survive scheduling failures, node failures, or other evictions, such as due to lack of resources, or in the case of node maintenance.
|
||||
Pods aren't intended to be treated as durable [pets](https://blog.engineyard.com/2014/pets-vs-cattle). They won't survive scheduling failures, node failures, or other evictions, such as due to lack of resources, or in the case of node maintenance.
|
||||
|
||||
In general, users shouldn't need to create pods directly. They should almost always use controllers (e.g., [replication controller](replication-controller.md)), even for singletons. Controllers provide self-healing with a cluster scope, as well as replication and rollout management.
|
||||
In general, users shouldn't need to create pods directly. They should almost always use controllers (e.g., [replication controller](replication-controller.md)), even for singletons. Controllers provide self-healing with a cluster scope, as well as replication and rollout management.
|
||||
|
||||
The use of collective APIs as the primary user-facing primitive is relatively common among cluster scheduling systems, including [Borg](https://research.google.com/pubs/pub43438.html), [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html), [Aurora](http://aurora.apache.org/documentation/latest/configuration-reference/#job-schema), and [Tupperware](http://www.slideshare.net/Docker/aravindnarayanan-facebook140613153626phpapp02-37588997).
|
||||
|
||||
Pod is exposed as a primitive in order to facilitate:
|
||||
|
||||
* scheduler and controller pluggability
|
||||
* support for pod-level operations without the need to "proxy" them via controller APIs
|
||||
* support for pod-level operations without the need to "proxy" them via controller APIs
|
||||
* decoupling of pod lifetime from controller lifetime, such as for bootstrapping
|
||||
* decoupling of controllers and services — the endpoint controller just watches pods
|
||||
* clean composition of Kubelet-level functionality with cluster-level functionality — Kubelet is effectively the "pod controller"
|
||||
* high-availability applications, which will expect pods to be replaced in advance of their termination and certainly in advance of deletion, such as in the case of planned evictions, image prefetching, or live pod migration [#3949](https://github.com/GoogleCloudPlatform/kubernetes/issues/3949)
|
||||
|
||||
The current best practice for pets is to create a replication controller with `replicas` equal to `1` and a corresponding service. If you find this cumbersome, please comment on [issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260).
|
||||
The current best practice for pets is to create a replication controller with `replicas` equal to `1` and a corresponding service. If you find this cumbersome, please comment on [issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260).
|
||||
|
||||
## API Object
|
||||
|
||||
|
@ -33,7 +33,7 @@ Documentation for other releases can be found at
|
||||
|
||||
# Kubernetes User Guide: Managing Applications: Prerequisites
|
||||
|
||||
To deploy and manage applications on Kubernetes, you’ll use the Kubernetes command-line tool, [kubectl](kubectl/kubectl.md). It lets you inspect your cluster resources, create, delete, and update components, and much more. You will use it to look at your new cluster and bring up example apps.
|
||||
To deploy and manage applications on Kubernetes, you’ll use the Kubernetes command-line tool, [kubectl](kubectl/kubectl.md). It lets you inspect your cluster resources, create, delete, and update components, and much more. You will use it to look at your new cluster and bring up example apps.
|
||||
|
||||
## Installing kubectl
|
||||
|
||||
|
@ -90,7 +90,7 @@ In addition to the local disk storage provided by `emptyDir`, Kubernetes support
|
||||
|
||||
## Distributing credentials
|
||||
|
||||
Many applications need credentials, such as passwords, OAuth tokens, and TLS keys, to authenticate with other applications, databases, and services. Storing these credentials in container images or environment variables is less than ideal, since the credentials can then be copied by anyone with access to the image, pod/container specification, host file system, or host Docker daemon.
|
||||
Many applications need credentials, such as passwords, OAuth tokens, and TLS keys, to authenticate with other applications, databases, and services. Storing these credentials in container images or environment variables is less than ideal, since the credentials can then be copied by anyone with access to the image, pod/container specification, host file system, or host Docker daemon.
|
||||
|
||||
Kubernetes provides a mechanism, called [*secrets*](secrets.md), that facilitates delivery of sensitive credentials to applications. A `Secret` is a simple resource containing a map of data. For instance, a simple secret with a username and password might look as follows:
|
||||
|
||||
@ -245,7 +245,7 @@ More examples can be found in our [blog article](http://blog.kubernetes.io/2015/
|
||||
|
||||
## Resource management
|
||||
|
||||
Kubernetes’s scheduler will place applications only where they have adequate CPU and memory, but it can only do so if it knows how much [resources they require](compute-resources.md). The consequence of specifying too little CPU is that the containers could be starved of CPU if too many other containers were scheduled onto the same node. Similarly, containers could die unpredictably due to running out of memory if no memory were requested, which can be especially likely for large-memory applications.
|
||||
Kubernetes’s scheduler will place applications only where they have adequate CPU and memory, but it can only do so if it knows how much [resources they require](compute-resources.md). The consequence of specifying too little CPU is that the containers could be starved of CPU if too many other containers were scheduled onto the same node. Similarly, containers could die unpredictably due to running out of memory if no memory were requested, which can be especially likely for large-memory applications.
|
||||
|
||||
If no resource requirements are specified, a nominal amount of resources is assumed. (This default is applied via a [LimitRange](limitrange/) for the default [Namespace](namespaces.md). It can be viewed with `kubectl describe limitrange limits`.) You may explicitly specify the amount of resources required as follows:
|
||||
|
||||
@ -318,7 +318,7 @@ For more details (e.g., how to specify command-based probes), see the [example i
|
||||
|
||||
Of course, nodes and applications may fail at any time, but many applications benefit from clean shutdown, such as to complete in-flight requests, when the termination of the application is deliberate. To support such cases, Kubernetes supports two kinds of notifications:
|
||||
Kubernetes will send SIGTERM to applications, which can be handled in order to effect graceful termination. SIGKILL is sent 10 seconds later if the application does not terminate sooner.
|
||||
Kubernetes supports the (optional) specification of a [*pre-stop lifecycle hook*](container-environment.md#container-hooks), which will execute prior to sending SIGTERM.
|
||||
Kubernetes supports the (optional) specification of a [*pre-stop lifecycle hook*](container-environment.md#container-hooks), which will execute prior to sending SIGTERM.
|
||||
|
||||
The specification of a pre-stop hook is similar to that of probes, but without the timing-related parameters. For example:
|
||||
|
||||
|
@ -36,7 +36,7 @@ Documentation for other releases can be found at
|
||||
Objects of type `secret` are intended to hold sensitive information, such as
|
||||
passwords, OAuth tokens, and ssh keys. Putting this information in a `secret`
|
||||
is safer and more flexible than putting it verbatim in a `pod` definition or in
|
||||
a docker image. See [Secrets design document](../design/secrets.md) for more information.
|
||||
a docker image. See [Secrets design document](../design/secrets.md) for more information.
|
||||
|
||||
**Table of Contents**
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
|
@ -33,7 +33,7 @@ Documentation for other releases can be found at
|
||||
|
||||
# Secrets example
|
||||
|
||||
Following this example, you will create a [secret](../secrets.md) and a [pod](../pods.md) that consumes that secret in a [volume](../volumes.md). See [Secrets design document](../../design/secrets.md) for more information.
|
||||
Following this example, you will create a [secret](../secrets.md) and a [pod](../pods.md) that consumes that secret in a [volume](../volumes.md). See [Secrets design document](../../design/secrets.md) for more information.
|
||||
|
||||
## Step Zero: Prerequisites
|
||||
|
||||
@ -83,7 +83,7 @@ $ kubectl create -f docs/user-guide/secrets/secret-pod.yaml
|
||||
```
|
||||
|
||||
This pod runs a binary that displays the content of one of the pieces of secret data in the secret
|
||||
volume:
|
||||
volume:
|
||||
|
||||
```console
|
||||
$ kubectl logs secret-test-pod
|
||||
|
@ -35,7 +35,7 @@ Documentation for other releases can be found at
|
||||
|
||||
A service account provides an identity for processes that run in a Pod.
|
||||
|
||||
*This is a user introduction to Service Accounts. See also the
|
||||
*This is a user introduction to Service Accounts. See also the
|
||||
[Cluster Admin Guide to Service Accounts](../admin/service-accounts-admin.md).*
|
||||
|
||||
*Note: This document describes how service accounts behave in a cluster set up
|
||||
@ -111,7 +111,7 @@ field of a pod to the name of the service account you wish to use.
|
||||
|
||||
The service account has to exist at the time the pod is created, or it will be rejected.
|
||||
|
||||
You cannot update the service account of an already created pod.
|
||||
You cannot update the service account of an already created pod.
|
||||
|
||||
You can clean up the service account from this example like this:
|
||||
|
||||
|
@ -65,7 +65,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](pod.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE pod.yaml -->
|
||||
|
||||
You can see your cluster's pods:
|
||||
|
||||
@ -117,7 +117,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](replication.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE replication.yaml -->
|
||||
|
||||
To delete the replication controller (and the pods it created):
|
||||
|
||||
|
@ -33,7 +33,7 @@ Documentation for other releases can be found at
|
||||
|
||||
# Kubernetes User Interface
|
||||
|
||||
Kubernetes has a web-based user interface that displays the current cluster state graphically.
|
||||
Kubernetes has a web-based user interface that displays the current cluster state graphically.
|
||||
|
||||
## Accessing the UI
|
||||
|
||||
@ -50,34 +50,34 @@ Normally, this should be taken care of automatically by the [`kube-addons.sh`](h
|
||||
|
||||
## Using the UI
|
||||
|
||||
The Kubernetes UI can be used to introspect your current cluster, such as checking how resources are used, or looking at error messages. You cannot, however, use the UI to modify your cluster.
|
||||
The Kubernetes UI can be used to introspect your current cluster, such as checking how resources are used, or looking at error messages. You cannot, however, use the UI to modify your cluster.
|
||||
|
||||
### Node Resource Usage
|
||||
### Node Resource Usage
|
||||
|
||||
After accessing Kubernetes UI, you'll see a homepage dynamically listing out all nodes in your current cluster, with related information including internal IP addresses, CPU usage, memory usage, and file systems usage.
|
||||
After accessing Kubernetes UI, you'll see a homepage dynamically listing out all nodes in your current cluster, with related information including internal IP addresses, CPU usage, memory usage, and file systems usage.
|
||||

|
||||
|
||||
### Dashboard Views
|
||||
|
||||
Click on the "Views" button in the top-right of the page to see other views available, which include: Explore, Pods, Nodes, Replication Controllers, Services, and Events.
|
||||
Click on the "Views" button in the top-right of the page to see other views available, which include: Explore, Pods, Nodes, Replication Controllers, Services, and Events.
|
||||
|
||||
#### Explore View
|
||||
#### Explore View
|
||||
|
||||
The "Explore" view allows your to see the pods, replication controllers, and services in current cluster easily.
|
||||
The "Explore" view allows your to see the pods, replication controllers, and services in current cluster easily.
|
||||

|
||||
The "Group by" dropdown list allows you to group these resources by a number of factors, such as type, name, host, etc.
|
||||

|
||||
You can also create filters by clicking on the down triangle of any listed resource instances and choose which filters you want to add.
|
||||

|
||||
To see more details of each resource instance, simply click on it.
|
||||
To see more details of each resource instance, simply click on it.
|
||||

|
||||
|
||||
### Other Views
|
||||
|
||||
Other views (Pods, Nodes, Replication Controllers, Services, and Events) simply list information about each type of resource. You can also click on any instance for more details.
|
||||
Other views (Pods, Nodes, Replication Controllers, Services, and Events) simply list information about each type of resource. You can also click on any instance for more details.
|
||||

|
||||
|
||||
## More Information
|
||||
## More Information
|
||||
|
||||
For more information, see the [Kubernetes UI development document](http://releases.k8s.io/HEAD/www/README.md) in the www directory.
|
||||
|
||||
|
@ -49,7 +49,7 @@ limitations under the License.
|
||||
|
||||
# Rolling update example
|
||||
|
||||
This example demonstrates the usage of Kubernetes to perform a [rolling update](../kubectl/kubectl_rolling-update.md) on a running group of [pods](../../../docs/user-guide/pods.md). See [here](../managing-deployments.md#updating-your-application-without-a-service-outage) to understand why you need a rolling update. Also check [rolling update design document](../../design/simple-rolling-update.md) for more information.
|
||||
This example demonstrates the usage of Kubernetes to perform a [rolling update](../kubectl/kubectl_rolling-update.md) on a running group of [pods](../../../docs/user-guide/pods.md). See [here](../managing-deployments.md#updating-your-application-without-a-service-outage) to understand why you need a rolling update. Also check [rolling update design document](../../design/simple-rolling-update.md) for more information.
|
||||
|
||||
### Step Zero: Prerequisites
|
||||
|
||||
|
@ -166,7 +166,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](pod-redis.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE pod-redis.yaml -->
|
||||
|
||||
Notes:
|
||||
- The volume mount name is a reference to a specific empty dir volume.
|
||||
|
@ -87,7 +87,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](pod-nginx-with-label.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE pod-nginx-with-label.yaml -->
|
||||
|
||||
Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)):
|
||||
|
||||
@ -143,7 +143,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](replication-controller.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE replication-controller.yaml -->
|
||||
|
||||
#### Replication Controller Management
|
||||
|
||||
@ -196,7 +196,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](service.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE service.yaml -->
|
||||
|
||||
#### Service Management
|
||||
|
||||
@ -312,7 +312,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](pod-with-http-healthcheck.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE pod-with-http-healthcheck.yaml -->
|
||||
|
||||
For more information about health checking, see [Container Probes](../pod-states.md#container-probes).
|
||||
|
||||
|
@ -36,7 +36,7 @@ Documentation for other releases can be found at
|
||||
*This document is aimed at users who have worked through some of the examples,
|
||||
and who want to learn more about using kubectl to manage resources such
|
||||
as pods and services. Users who want to access the REST API directly,
|
||||
and developers who want to extend the Kubernetes API should
|
||||
and developers who want to extend the Kubernetes API should
|
||||
refer to the [api conventions](../devel/api-conventions.md) and
|
||||
the [api document](../api.md).*
|
||||
|
||||
@ -68,7 +68,7 @@ $ wc -l /tmp/original.yaml /tmp/current.yaml
|
||||
60 total
|
||||
```
|
||||
|
||||
The resource we posted had only 9 lines, but the one we got back had 51 lines.
|
||||
The resource we posted had only 9 lines, but the one we got back had 51 lines.
|
||||
If you `diff -u /tmp/original.yaml /tmp/current.yaml`, you can see the fields added to the pod.
|
||||
The system adds fields in several ways:
|
||||
- Some fields are added synchronously with creation of the resource and some are set asynchronously.
|
||||
|
@ -31,11 +31,11 @@ Documentation for other releases can be found at
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# What is Kubernetes?
|
||||
# What is Kubernetes?
|
||||
|
||||
Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts.
|
||||
Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts.
|
||||
|
||||
With Kubernetes, you are able to quickly and efficiently respond to customer demand:
|
||||
With Kubernetes, you are able to quickly and efficiently respond to customer demand:
|
||||
|
||||
- Scale your applications on the fly.
|
||||
- Seamlessly roll out new features.
|
||||
@ -54,27 +54,27 @@ The Kubernetes project was started by Google in 2014. Kubernetes builds upon a [
|
||||
|
||||
<hr>
|
||||
|
||||
Looking for reasons why you should be using [containers](http://aucouranton.com/2014/06/13/linux-containers-parallels-lxc-openvz-docker-and-more/)?
|
||||
Looking for reasons why you should be using [containers](http://aucouranton.com/2014/06/13/linux-containers-parallels-lxc-openvz-docker-and-more/)?
|
||||
|
||||
Here are some key points:
|
||||
|
||||
* **Application-centric management**:
|
||||
* **Application-centric management**:
|
||||
Raises the level of abstraction from running an OS on virtual hardware to running an application on an OS using logical resources. This provides the simplicity of PaaS with the flexibility of IaaS and enables you to run much more than just [12-factor apps](http://12factor.net/).
|
||||
* **Dev and Ops separation of concerns**:
|
||||
* **Dev and Ops separation of concerns**:
|
||||
Provides separatation of build and deployment; therefore, decoupling applications from infrastructure.
|
||||
* **Agile application creation and deployment**:
|
||||
* **Agile application creation and deployment**:
|
||||
Increased ease and efficiency of container image creation compared to VM image use.
|
||||
* **Continuous development, integration, and deployment**:
|
||||
* **Continuous development, integration, and deployment**:
|
||||
Provides for reliable and frequent container image build and deployment with quick and easy rollbacks (due to image immutability).
|
||||
* **Loosely coupled, distributed, elastic, liberated [micro-services](http://martinfowler.com/articles/microservices.html)**:
|
||||
* **Loosely coupled, distributed, elastic, liberated [micro-services](http://martinfowler.com/articles/microservices.html)**:
|
||||
Applications are broken into smaller, independent pieces and can be deployed and managed dynamically -- not a fat monolithic stack running on one big single-purpose machine.
|
||||
* **Environmental consistency across development, testing, and production**:
|
||||
* **Environmental consistency across development, testing, and production**:
|
||||
Runs the same on a laptop as it does in the cloud.
|
||||
* **Cloud and OS distribution portability**:
|
||||
* **Cloud and OS distribution portability**:
|
||||
Runs on Ubuntu, RHEL, on-prem, or Google Container Engine, which makes sense for all environments: build, test, and production.
|
||||
* **Resource isolation**:
|
||||
* **Resource isolation**:
|
||||
Predictable application performance.
|
||||
* **Resource utilization**:
|
||||
* **Resource utilization**:
|
||||
High efficiency and density.
|
||||
|
||||
|
||||
|
@ -101,7 +101,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](cassandra-controller.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE cassandra-controller.yaml -->
|
||||
|
||||
There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later)
|
||||
|
||||
@ -132,7 +132,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](cassandra-service.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE cassandra-service.yaml -->
|
||||
|
||||
The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service.
|
||||
|
||||
@ -242,7 +242,7 @@ spec:
|
||||
```
|
||||
|
||||
[Download example](cassandra-controller.yaml)
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
<!-- END MUNGE: EXAMPLE cassandra-controller.yaml -->
|
||||
|
||||
Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the resplication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1.
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user