Merge pull request #11839 from eparis/munger-rewrite

Major rewrite for docs munger
This commit is contained in:
Mike Danese 2015-07-31 10:21:25 -07:00
commit fd7a48f379
113 changed files with 1231 additions and 880 deletions

22
cmd/mungedocs/README.md Normal file
View File

@ -0,0 +1,22 @@
# Documentation Mungers
Basically this is like lint/gofmt for md docs.
It basically does the following:
- iterate over all files in the given doc root.
- for each file split it into a slice (mungeLines) of lines (mungeLine)
- a mungeline has metadata about each line typically determined by a 'fast' regex.
- metadata contains things like 'is inside a preformmatted block'
- contains a markdown header
- has a link to another file
- etc..
- if you have a really slow regex with a lot of backtracking you might want to write a fast one to limit how often you run the slow one.
- each munger is then called in turn
- they are given the mungeLines
- they create an entirely new set of mungeLines with their modifications
- the new set is returned
- the new set is then fed into the next munger.
- in the end we might commit the end mungeLines to the file or not (--verify)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cmd/mungedocs/README.md?pixel)]()

View File

@ -17,43 +17,42 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"fmt" "fmt"
"os" "strings"
"regexp"
) )
var ( const analyticsMungeTag = "GENERATED_ANALYTICS"
beginMungeExp = regexp.QuoteMeta(beginMungeTag("GENERATED_ANALYTICS")) const analyticsLinePrefix = "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/"
endMungeExp = regexp.QuoteMeta(endMungeTag("GENERATED_ANALYTICS"))
analyticsExp = regexp.QuoteMeta("[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/") +
"[^?]*" +
regexp.QuoteMeta("?pixel)]()")
// Matches the analytics blurb, with or without the munge headers. func updateAnalytics(fileName string, mlines mungeLines) (mungeLines, error) {
analyticsRE = regexp.MustCompile(`[\n]*` + analyticsExp + `[\n]?` + var out mungeLines
`|` + `[\n]*` + beginMungeExp + `[^<]*` + endMungeExp) fileName, err := makeRepoRelative(fileName, fileName)
) if err != nil {
return mlines, err
// This adds the analytics link to every .md file.
func checkAnalytics(fileName string, fileBytes []byte) (output []byte, err error) {
fileName = makeRepoRelative(fileName)
desired := fmt.Sprintf(`
`+beginMungeTag("GENERATED_ANALYTICS")+`
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/%s?pixel)]()
`+endMungeTag("GENERATED_ANALYTICS")+`
`, fileName)
if !analyticsRE.MatchString(desired) {
fmt.Printf("%q does not match %q", analyticsRE.String(), desired)
os.Exit(1)
} }
//output = replaceNonPreformattedRegexp(fileBytes, analyticsRE, func(in []byte) []byte {
output = analyticsRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte { link := fmt.Sprintf(analyticsLinePrefix+"%s?pixel)]()", fileName)
return []byte{} insertLines := getMungeLines(link)
}) mlines, err = removeMacroBlock(analyticsMungeTag, mlines)
output = bytes.TrimRight(output, "\n") if err != nil {
output = append(output, []byte(desired)...) return mlines, err
return output, nil }
// Remove floating analytics links not surrounded by the munge tags.
for _, mline := range mlines {
if mline.preformatted || mline.header || mline.beginTag || mline.endTag {
out = append(out, mline)
continue
}
if strings.HasPrefix(mline.data, analyticsLinePrefix) {
continue
}
out = append(out, mline)
}
out = appendMacroBlock(out, analyticsMungeTag)
out, err = updateMacroBlock(out, analyticsMungeTag, insertLines)
if err != nil {
return mlines, err
}
return out, nil
} }

View File

@ -23,67 +23,71 @@ import (
) )
func TestAnalytics(t *testing.T) { func TestAnalytics(t *testing.T) {
b := beginMungeTag("GENERATED_ANALYTICS")
e := endMungeTag("GENERATED_ANALYTICS")
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{ {
"aoeu", "aoeu",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()", "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "aoeu" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n", e + "\n",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "\n" + "aoeu" + "\n" + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "\n" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n", e + "\n",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"prefix" + "\n" + "prefix" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + e +
"\n" + "suffix", "\n" + "suffix",
"prefix" + "\n" + "suffix" + "\n" + "\n" + "\n" + "prefix" + "\n" + "suffix" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n", e + "\n",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
} }
for _, c := range cases { for i, c := range cases {
out, err := checkAnalytics("path/to/file-name.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
out, err := updateAnalytics("path/to/file-name.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if string(out) != c.out { if !expected.Equal(out) {
t.Errorf("Expected \n\n%v\n\n but got \n\n%v\n\n", c.out, string(out)) t.Errorf("Case %d Expected \n\n%v\n\n but got \n\n%v\n\n", i, expected.String(), out.String())
} }
} }
} }

View File

@ -17,15 +17,17 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path"
"regexp" "regexp"
"strings" "strings"
) )
const exampleMungeTag = "EXAMPLE" const exampleToken = "EXAMPLE"
const exampleLineStart = "<!-- BEGIN MUNGE: EXAMPLE"
var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", exampleToken, `(([^ ])*.(yaml|json))`)))
// syncExamples updates all examples in markdown file. // syncExamples updates all examples in markdown file.
// //
@ -43,75 +45,70 @@ const exampleMungeTag = "EXAMPLE"
// //
// [Download example](../../examples/guestbook/frontend-controller.yaml) // [Download example](../../examples/guestbook/frontend-controller.yaml)
// <!-- END MUNGE: EXAMPLE --> // <!-- END MUNGE: EXAMPLE -->
func syncExamples(filePath string, markdown []byte) ([]byte, error) { func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) {
// find the example syncer begin tag var err error
header := beginMungeTag(fmt.Sprintf("%s %s", exampleMungeTag, `(([^ ])*.(yaml|json))`)) type exampleTag struct {
exampleLinkRE := regexp.MustCompile(header) token string
lines := splitLines(markdown) linkText string
updatedMarkdown, err := updateExampleMacroBlock(filePath, lines, exampleLinkRE, endMungeTag(exampleMungeTag)) fileType string
if err != nil {
return updatedMarkdown, err
} }
return updatedMarkdown, nil exampleTags := []exampleTag{}
// collect all example Tags
for _, mline := range mlines {
if mline.preformatted || !mline.beginTag {
continue
}
line := mline.data
if !strings.HasPrefix(line, exampleLineStart) {
continue
}
match := exampleMungeTagRE.FindStringSubmatch(line)
if len(match) < 4 {
err = fmt.Errorf("Found unparsable EXAMPLE munge line %v", line)
return mlines, err
}
tag := exampleTag{
token: exampleToken + " " + match[1],
linkText: match[1],
fileType: match[3],
}
exampleTags = append(exampleTags, tag)
}
// update all example Tags
for _, tag := range exampleTags {
example, err := exampleContent(filePath, tag.linkText, tag.fileType)
if err != nil {
return mlines, err
}
mlines, err = updateMacroBlock(mlines, tag.token, example)
if err != nil {
return mlines, err
}
}
return mlines, nil
} }
// exampleContent retrieves the content of the file at linkPath // exampleContent retrieves the content of the file at linkPath
func exampleContent(filePath, linkPath, fileType string) (content string, err error) { func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) {
realRoot := path.Join(*rootDir, *repoRoot) + "/" repoRel, err := makeRepoRelative(linkPath, filePath)
path := path.Join(realRoot, path.Dir(filePath), linkPath)
dat, err := ioutil.ReadFile(path)
if err != nil {
return content, err
}
// remove leading and trailing spaces and newlines
trimmedFileContent := strings.TrimSpace(string(dat))
content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, linkPath)
return
}
// updateExampleMacroBlock sync the yaml/json example between begin tag and end tag
func updateExampleMacroBlock(filePath string, lines []string, beginMarkExp *regexp.Regexp, endMark string) ([]byte, error) {
var buffer bytes.Buffer
betweenBeginAndEnd := false
for _, line := range lines {
trimmedLine := strings.Trim(line, " \n")
if beginMarkExp.Match([]byte(trimmedLine)) {
if betweenBeginAndEnd {
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
}
betweenBeginAndEnd = true
buffer.WriteString(line)
buffer.WriteString("\n")
match := beginMarkExp.FindStringSubmatch(line)
if len(match) < 4 {
return nil, fmt.Errorf("failed to parse the link in example header")
}
// match[0] is the entire expression; [1] is the link text and [3] is the file type (yaml or json).
linkText := match[1]
fileType := match[3]
example, err := exampleContent(filePath, linkText, fileType)
if err != nil { if err != nil {
return nil, err return nil, err
} }
buffer.WriteString(example)
} else if trimmedLine == endMark { fileRel, err := makeFileRelative(linkPath, filePath)
if !betweenBeginAndEnd { if err != nil {
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks") return nil, err
} }
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
buffer.WriteString("\n") dat, err := ioutil.ReadFile(repoRel)
buffer.WriteString(line) if err != nil {
buffer.WriteString("\n") return nil, err
betweenBeginAndEnd = false
} else {
if !betweenBeginAndEnd {
buffer.WriteString(line)
buffer.WriteString("\n")
} }
}
} // remove leading and trailing spaces and newlines
if betweenBeginAndEnd { trimmedFileContent := strings.TrimSpace(string(dat))
return nil, fmt.Errorf("never found closing end mark while updating macro blocks") content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel)
} out := getMungeLines(content)
return buffer.Bytes(), nil return out, nil
} }

View File

@ -36,23 +36,26 @@ spec:
` `
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", ""}, {"", ""},
{ {
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
}, },
{ {
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
}, },
} }
repoRoot = ""
for _, c := range cases { for _, c := range cases {
actual, err := syncExamples("mungedocs/filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := syncExamples("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if c.out != string(actual) { if !expected.Equal(actual) {
t.Errorf("Expected example \n'%v' but got \n'%v'", c.out, string(actual)) t.Errorf("Expected example \n'%q' but got \n'%q'", expected.String(), actual.String())
} }
} }
} }

View File

@ -19,53 +19,56 @@ package main
import ( import (
"fmt" "fmt"
"regexp" "regexp"
"strings"
) )
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`) var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
var whitespaceRegex = regexp.MustCompile(`^\s*$`)
func fixHeaderLines(fileBytes []byte) []byte { func fixHeaderLine(mlines mungeLines, newlines mungeLines, linenum int) mungeLines {
lines := splitLines(fileBytes) var out mungeLines
out := []string{}
for i := range lines { mline := mlines[linenum]
matches := headerRegex.FindStringSubmatch(lines[i]) line := mlines[linenum].data
matches := headerRegex.FindStringSubmatch(line)
if matches == nil { if matches == nil {
out = append(out, lines[i]) out = append(out, mline)
continue return out
} }
if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) {
out = append(out, "") // There must be a blank line before the # (unless first line in file)
} if linenum != 0 {
out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2])) newlen := len(newlines)
if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) { if newlines[newlen-1].data != "" {
out = append(out, "") out = append(out, blankMungeLine)
} }
} }
final := strings.Join(out, "\n")
// Preserve the end of the file. // There must be a space AFTER the ##'s
if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' { newline := fmt.Sprintf("%s %s", matches[1], matches[2])
final += "\n" newmline := newMungeLine(newline)
out = append(out, newmline)
// The next line needs to be a blank line (unless last line in file)
if len(mlines) > linenum+1 && mlines[linenum+1].data != "" {
out = append(out, blankMungeLine)
} }
return []byte(final) return out
} }
// Header lines need whitespace around them and after the #s. // Header lines need whitespace around them and after the #s.
func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) { func updateHeaderLines(filePath string, mlines mungeLines) (mungeLines, error) {
fbs := splitByPreformatted(fileBytes) var out mungeLines
fbs = append([]fileBlock{{false, []byte{}}}, fbs...) for i, mline := range mlines {
fbs = append(fbs, fileBlock{false, []byte{}}) if mline.preformatted {
out = append(out, mline)
for i := range fbs {
block := &fbs[i]
if block.preformatted {
continue continue
} }
block.data = fixHeaderLines(block.data) if !mline.header {
out = append(out, mline)
continue
} }
output := []byte{} newLines := fixHeaderLine(mlines, out, i)
for _, block := range fbs { out = append(out, newLines...)
output = append(output, block.data...)
} }
return output, nil return out, nil
} }

View File

@ -25,7 +25,7 @@ import (
func TestHeaderLines(t *testing.T) { func TestHeaderLines(t *testing.T) {
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", ""}, {"", ""},
{ {
@ -62,10 +62,12 @@ func TestHeaderLines(t *testing.T) {
}, },
} }
for i, c := range cases { for i, c := range cases {
actual, err := checkHeaderLines("filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateHeaderLines("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if string(actual) != c.out { if !actual.Equal(expected) {
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual)) t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
} }
} }
} }

View File

@ -25,29 +25,25 @@ import (
// Looks for lines that have kubectl commands with -f flags and files that // Looks for lines that have kubectl commands with -f flags and files that
// don't exist. // don't exist.
func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) { func updateKubectlFileTargets(file string, mlines mungeLines) (mungeLines, error) {
inside := false var errors []string
lines := splitLines(markdown) for i, mline := range mlines {
errors := []string{} if !mline.preformatted {
for i := range lines { continue
if strings.HasPrefix(lines[i], "```") {
inside = !inside
} }
if inside { if err := lookForKubectl(mline.data, i); err != nil {
if err := lookForKubectl(lines, i); err != nil {
errors = append(errors, err.Error()) errors = append(errors, err.Error())
} }
} }
}
err := error(nil) err := error(nil)
if len(errors) != 0 { if len(errors) != 0 {
err = fmt.Errorf("%s", strings.Join(errors, "\n")) err = fmt.Errorf("%s", strings.Join(errors, "\n"))
} }
return markdown, err return mlines, err
} }
func lookForKubectl(lines []string, lineNum int) error { func lookForKubectl(line string, lineNum int) error {
fields := strings.Fields(lines[lineNum]) fields := strings.Fields(line)
for i := range fields { for i := range fields {
if fields[i] == "kubectl" { if fields[i] == "kubectl" {
return gotKubectl(lineNum, fields, i) return gotKubectl(lineNum, fields, i)
@ -56,26 +52,26 @@ func lookForKubectl(lines []string, lineNum int) error {
return nil return nil
} }
func gotKubectl(line int, fields []string, fieldNum int) error { func gotKubectl(lineNum int, fields []string, fieldNum int) error {
for i := fieldNum + 1; i < len(fields); i++ { for i := fieldNum + 1; i < len(fields); i++ {
switch fields[i] { switch fields[i] {
case "create", "update", "replace", "delete": case "create", "update", "replace", "delete":
return gotCommand(line, fields, i) return gotCommand(lineNum, fields, i)
} }
} }
return nil return nil
} }
func gotCommand(line int, fields []string, fieldNum int) error { func gotCommand(lineNum int, fields []string, fieldNum int) error {
for i := fieldNum + 1; i < len(fields); i++ { for i := fieldNum + 1; i < len(fields); i++ {
if strings.HasPrefix(fields[i], "-f") { if strings.HasPrefix(fields[i], "-f") {
return gotDashF(line, fields, i) return gotDashF(lineNum, fields, i)
} }
} }
return nil return nil
} }
func gotDashF(line int, fields []string, fieldNum int) error { func gotDashF(lineNum int, fields []string, fieldNum int) error {
target := "" target := ""
if fields[fieldNum] == "-f" { if fields[fieldNum] == "-f" {
if fieldNum+1 == len(fields) { if fieldNum+1 == len(fields) {
@ -112,9 +108,9 @@ func gotDashF(line int, fields []string, fieldNum int) error {
} }
// If we got here we expect the file to exist. // If we got here we expect the file to exist.
_, err := os.Stat(path.Join(*rootDir, *repoRoot, target)) _, err := os.Stat(path.Join(repoRoot, target))
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fmt.Errorf("%d: target file %q does not exist", line, target) return fmt.Errorf("%d: target file %q does not exist", lineNum, target)
} }
return err return err
} }

View File

@ -130,9 +130,9 @@ func TestKubectlDashF(t *testing.T) {
}, },
} }
for i, c := range cases { for i, c := range cases {
*rootDir = "" repoRoot = ""
*repoRoot = "" in := getMungeLines(c.in)
_, err := checkKubectlFileTargets("filename.md", []byte(c.in)) _, err := updateKubectlFileTargets("filename.md", in)
if err != nil && c.ok { if err != nil && c.ok {
t.Errorf("case[%d]: expected success, got %v", i, err) t.Errorf("case[%d]: expected success, got %v", i, err)
} }

View File

@ -29,20 +29,20 @@ var (
// Finds markdown links of the form [foo](bar "alt-text"). // Finds markdown links of the form [foo](bar "alt-text").
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`) linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
// Splits the link target into link target and alt-text. // Splits the link target into link target and alt-text.
altTextRE = regexp.MustCompile(`(.*)( ".*")`) altTextRE = regexp.MustCompile(`([^)]*)( ".*")`)
) )
// checkLinks assumes fileBytes has links in markdown syntax, and verifies that func processLink(in string, filePath string) (string, error) {
// any relative links actually point to files that exist. var err error
func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { out := linkRE.ReplaceAllStringFunc(in, func(in string) string {
dir := path.Dir(filePath) match := linkRE.FindStringSubmatch(in)
errors := []string{} if match == nil {
err = fmt.Errorf("Detected this line had a link, but unable to parse, %v", in)
output := replaceNonPreformattedRegexp(fileBytes, linkRE, func(in []byte) (out []byte) { return ""
match := linkRE.FindSubmatch(in) }
// match[0] is the entire expression; [1] is the visible text and [2] is the link text. // match[0] is the entire expression;
visibleText := string(match[1]) visibleText := match[1]
linkText := string(match[2]) linkText := match[2]
altText := "" altText := ""
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil { if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
linkText = parts[1] linkText = parts[1]
@ -54,13 +54,10 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
linkText = strings.Trim(linkText, "\n") linkText = strings.Trim(linkText, "\n")
linkText = strings.Trim(linkText, " ") linkText = strings.Trim(linkText, " ")
u, err := url.Parse(linkText) u, terr := url.Parse(linkText)
if err != nil { if terr != nil {
errors = append( err = fmt.Errorf("link %q is unparsable: %v", linkText, terr)
errors, return ""
fmt.Sprintf("link %q is unparsable: %v", linkText, err),
)
return in
} }
if u.Host != "" && u.Host != "github.com" { if u.Host != "" && u.Host != "github.com" {
@ -72,10 +69,8 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") { if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
newPath, targetExists := checkPath(filePath, path.Clean(u.Path)) newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
if !targetExists { if !targetExists {
errors = append( err = fmt.Errorf("%q: target not found", linkText)
errors, return ""
fmt.Sprintf("%q: target not found", linkText),
)
} }
u.Path = newPath u.Path = newPath
if strings.HasPrefix(u.Path, "/") { if strings.HasPrefix(u.Path, "/") {
@ -89,11 +84,16 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
// Make the visible text show the absolute path if it's // Make the visible text show the absolute path if it's
// not nested in or beneath the current directory. // not nested in or beneath the current directory.
if strings.HasPrefix(u.Path, "..") { if strings.HasPrefix(u.Path, "..") {
suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path)) dir := path.Dir(filePath)
suggestedVisibleText, err = makeRepoRelative(path.Join(dir, u.Path), filePath)
if err != nil {
return ""
}
} else { } else {
suggestedVisibleText = u.Path suggestedVisibleText = u.Path
} }
if unescaped, err := url.QueryUnescape(u.String()); err != nil { var unescaped string
if unescaped, err = url.QueryUnescape(u.String()); err != nil {
// Remove %28 type stuff, be nice to humans. // Remove %28 type stuff, be nice to humans.
// And don't fight with the toc generator. // And don't fight with the toc generator.
linkText = unescaped linkText = unescaped
@ -107,18 +107,37 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
visibleText = suggestedVisibleText visibleText = suggestedVisibleText
} }
return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)) return fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)
}) })
if out == "" {
return in, err
}
return out, nil
}
// updateLinks assumes lines has links in markdown syntax, and verifies that
// any relative links actually point to files that exist.
func updateLinks(filePath string, mlines mungeLines) (mungeLines, error) {
var out mungeLines
errors := []string{}
for _, mline := range mlines {
if mline.preformatted || !mline.link {
out = append(out, mline)
continue
}
line, err := processLink(mline.data, filePath)
if err != nil {
errors = append(errors, err.Error())
}
ml := newMungeLine(line)
out = append(out, ml)
}
err := error(nil) err := error(nil)
if len(errors) != 0 { if len(errors) != 0 {
err = fmt.Errorf("%s", strings.Join(errors, "\n")) err = fmt.Errorf("%s", strings.Join(errors, "\n"))
} }
return output, err return out, err
}
func makeRepoRelative(filePath string) string {
realRoot := path.Join(*rootDir, *repoRoot) + "/"
return strings.TrimPrefix(filePath, realRoot)
} }
// We have to append together before path.Clean will be able to tell that stuff // We have to append together before path.Clean will be able to tell that stuff

View File

@ -0,0 +1,76 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
var _ = fmt.Printf
func TestBadLinks(t *testing.T) {
var cases = []struct {
in string
}{
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/NOTREADME.md)"},
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/NOTREADME.md)"},
{"[NOTREADME](../NOTREADME.md)"},
}
for _, c := range cases {
in := getMungeLines(c.in)
_, err := updateLinks("filename.md", in)
assert.Error(t, err)
}
}
func TestGoodLinks(t *testing.T) {
var cases = []struct {
in string
expected string
}{
{"", ""},
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/README.md)",
"[README](README.md)"},
{"[README](../README.md)",
"[README](README.md)"},
{"[README](https://lwn.net)",
"[README](https://lwn.net)"},
// _ to -
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/devel/cli_roadmap.md)",
"[README](../../docs/devel/cli-roadmap.md)"},
// - to _
{"[README](../../docs/devel/api-changes.md)",
"[README](../../docs/devel/api_changes.md)"},
// Does this even make sense? i dunno
{"[README](/docs/README.md)",
"[README](https://github.com/docs/README.md)"},
{"[README](/GoogleCloudPlatform/kubernetes/tree/master/docs/README.md)",
"[README](../../docs/README.md)"},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateLinks("filename.md", in)
assert.NoError(t, err)
if !actual.Equal(expected) {
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
}
}
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -32,26 +31,29 @@ import (
var ( var (
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.") verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.") rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
repoRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root. // "repo-root" seems like a dumb name, this is the relative path (from rootDir) to get to the repoRoot
relRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
It's done this way so that generally you just have to set --root-dir. It's done this way so that generally you just have to set --root-dir.
Examples: Examples:
* --root-dir=docs/ --repo-root=.. means the repository root is ./ * --root-dir=docs/ --repo-root=.. means the repository root is ./
* --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/ * --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/
* --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`) * --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`)
skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList) skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList)
repoRoot string
ErrChangesNeeded = errors.New("mungedocs: changes required") ErrChangesNeeded = errors.New("mungedocs: changes required")
// All of the munge operations to perform. // All of the munge operations to perform.
// TODO: allow selection from command line. (e.g., just check links in the examples directory.) // TODO: allow selection from command line. (e.g., just check links in the examples directory.)
allMunges = []munge{ allMunges = []munge{
{"remove-whitespace", updateWhitespace},
{"table-of-contents", updateTOC}, {"table-of-contents", updateTOC},
{"unversioned-warning", updateUnversionedWarning}, {"unversioned-warning", updateUnversionedWarning},
{"check-links", checkLinks}, {"md-links", updateLinks},
{"blank-lines-surround-preformatted", checkPreformatted}, {"blank-lines-surround-preformatted", updatePreformatted},
{"header-lines", checkHeaderLines}, {"header-lines", updateHeaderLines},
{"analytics", checkAnalytics}, {"analytics", updateAnalytics},
{"kubectl-dash-f", checkKubectlFileTargets}, {"kubectl-dash-f", updateKubectlFileTargets},
{"sync-examples", syncExamples}, {"sync-examples", syncExamples},
} }
availableMungeList = func() string { availableMungeList = func() string {
@ -68,7 +70,7 @@ Examples:
// data into a new byte array and return that. // data into a new byte array and return that.
type munge struct { type munge struct {
name string name string
fn func(filePath string, before []byte) (after []byte, err error) fn func(filePath string, mlines mungeLines) (after mungeLines, err error)
} }
type fileProcessor struct { type fileProcessor struct {
@ -90,12 +92,14 @@ func (f fileProcessor) visit(path string) error {
return err return err
} }
mungeLines := getMungeLines(string(fileBytes))
modificationsMade := false modificationsMade := false
errFound := false errFound := false
filePrinted := false filePrinted := false
for _, munge := range f.munges { for _, munge := range f.munges {
after, err := munge.fn(path, fileBytes) after, err := munge.fn(path, mungeLines)
if err != nil || !bytes.Equal(after, fileBytes) { if err != nil || !after.Equal(mungeLines) {
if !filePrinted { if !filePrinted {
fmt.Printf("%s\n----\n", path) fmt.Printf("%s\n----\n", path)
filePrinted = true filePrinted = true
@ -110,7 +114,7 @@ func (f fileProcessor) visit(path string) error {
} }
fmt.Println("") fmt.Println("")
} }
fileBytes = after mungeLines = after
} }
// Write out new file with any changes. // Write out new file with any changes.
@ -119,7 +123,7 @@ func (f fileProcessor) visit(path string) error {
// We're not allowed to make changes. // We're not allowed to make changes.
return ErrChangesNeeded return ErrChangesNeeded
} }
ioutil.WriteFile(path, fileBytes, 0644) ioutil.WriteFile(path, mungeLines.Bytes(), 0644)
} }
if errFound { if errFound {
return ErrChangesNeeded return ErrChangesNeeded
@ -165,6 +169,7 @@ func wantedMunges() (filtered []munge) {
} }
func main() { func main() {
var err error
flag.Parse() flag.Parse()
if *rootDir == "" { if *rootDir == "" {
@ -172,11 +177,9 @@ func main() {
os.Exit(1) os.Exit(1)
} }
// Split the root dir of "foo/docs" into "foo" and "docs". We repoRoot = path.Join(*rootDir, *relRoot)
// chdir into "foo" and walk "docs" so the walk is always at a repoRoot, err = filepath.Abs(repoRoot)
// relative path. if err != nil {
stem, leaf := path.Split(strings.TrimRight(*rootDir, "/"))
if err := os.Chdir(stem); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(2) os.Exit(2)
} }
@ -194,7 +197,7 @@ func main() {
// changes needed, exit 1 if manual changes are needed. // changes needed, exit 1 if manual changes are needed.
var changesNeeded bool var changesNeeded bool
err := filepath.Walk(leaf, newWalkFunc(&fp, &changesNeeded)) err = filepath.Walk(*rootDir, newWalkFunc(&fp, &changesNeeded))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(2) os.Exit(2)

View File

@ -16,40 +16,26 @@ limitations under the License.
package main package main
import "bytes"
// Blocks of ``` need to have blank lines on both sides or they don't look // Blocks of ``` need to have blank lines on both sides or they don't look
// right in HTML. // right in HTML.
func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) { func updatePreformatted(filePath string, mlines mungeLines) (mungeLines, error) {
f := splitByPreformatted(fileBytes) var out mungeLines
f = append(fileBlocks{{false, []byte{}}}, f...) inpreformat := false
f = append(f, fileBlock{false, []byte{}}) for i, mline := range mlines {
if !inpreformat && mline.preformatted {
output := []byte(nil) if i == 0 || out[len(out)-1].data != "" {
for i := 1; i < len(f)-1; i++ { out = append(out, blankMungeLine)
prev := &f[i-1]
block := &f[i]
next := &f[i+1]
if !block.preformatted {
continue
} }
neededSuffix := []byte("\n\n") // start of a preformat block
for !bytes.HasSuffix(prev.data, neededSuffix) { inpreformat = true
prev.data = append(prev.data, '\n')
} }
for !bytes.HasSuffix(block.data, neededSuffix) { out = append(out, mline)
block.data = append(block.data, '\n') if inpreformat && !mline.preformatted {
if bytes.HasPrefix(next.data, []byte("\n")) { if i >= len(mlines)-2 || mlines[i+1].data != "" {
// don't change the number of newlines unless needed. out = append(out, blankMungeLine)
next.data = next.data[1:] }
if len(next.data) == 0 { inpreformat = false
f = append(f[:i+1], f[i+2:]...)
} }
} }
} return out, nil
}
for _, block := range f {
output = append(output, block.data...)
}
return output, nil
} }

View File

@ -0,0 +1,57 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPreformatted(t *testing.T) {
var cases = []struct {
in string
expected string
}{
{"", ""},
{
"```\nbob\n```",
"\n```\nbob\n```\n\n",
},
{
"```\nbob\n```\n```\nnotbob\n```\n",
"\n```\nbob\n```\n\n```\nnotbob\n```\n\n",
},
{
"```bob```\n",
"```bob```\n",
},
{
" ```\n bob\n ```",
"\n ```\n bob\n ```\n\n",
},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updatePreformatted("filename.md", in)
assert.NoError(t, err)
if !actual.Equal(expected) {
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
}
}
}

View File

@ -17,8 +17,6 @@ limitations under the License.
package main package main
import ( import (
"bufio"
"bytes"
"fmt" "fmt"
"regexp" "regexp"
"strings" "strings"
@ -26,6 +24,8 @@ import (
const tocMungeTag = "GENERATED_TOC" const tocMungeTag = "GENERATED_TOC"
var r = regexp.MustCompile("[^A-Za-z0-9-]")
// inserts/updates a table of contents in markdown file. // inserts/updates a table of contents in markdown file.
// //
// First, builds a ToC. // First, builds a ToC.
@ -33,15 +33,11 @@ const tocMungeTag = "GENERATED_TOC"
// the ToC, thereby updating any previously inserted ToC. // the ToC, thereby updating any previously inserted ToC.
// //
// TODO(erictune): put this in own package with tests // TODO(erictune): put this in own package with tests
func updateTOC(filePath string, markdown []byte) ([]byte, error) { func updateTOC(filePath string, mlines mungeLines) (mungeLines, error) {
toc, err := buildTOC(markdown) toc := buildTOC(mlines)
updatedMarkdown, err := updateMacroBlock(mlines, tocMungeTag, toc)
if err != nil { if err != nil {
return nil, err return mlines, err
}
lines := splitLines(markdown)
updatedMarkdown, err := updateMacroBlock(lines, beginMungeTag(tocMungeTag), endMungeTag(tocMungeTag), string(toc))
if err != nil {
return nil, err
} }
return updatedMarkdown, nil return updatedMarkdown, nil
} }
@ -52,24 +48,19 @@ func updateTOC(filePath string, markdown []byte) ([]byte, error) {
// and builds a table of contents from those. Assumes bookmarks for those will be // and builds a table of contents from those. Assumes bookmarks for those will be
// like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces. // like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces.
// builds the ToC. // builds the ToC.
func buildTOC(markdown []byte) ([]byte, error) {
var buffer bytes.Buffer func buildTOC(mlines mungeLines) mungeLines {
buffer.WriteString("\n") var out mungeLines
scanner := bufio.NewScanner(bytes.NewReader(markdown))
inBlockQuotes := false for _, mline := range mlines {
for scanner.Scan() { if mline.preformatted || !mline.header {
line := scanner.Text()
match, err := regexp.Match("^```", []byte(line))
if err != nil {
return nil, err
}
if match {
inBlockQuotes = !inBlockQuotes
continue continue
} }
if inBlockQuotes { // Add a blank line after the munge start tag
continue if len(out) == 0 {
out = append(out, blankMungeLine)
} }
line := mline.data
noSharps := strings.TrimLeft(line, "#") noSharps := strings.TrimLeft(line, "#")
numSharps := len(line) - len(noSharps) numSharps := len(line) - len(noSharps)
heading := strings.Trim(noSharps, " \n") heading := strings.Trim(noSharps, " \n")
@ -77,16 +68,15 @@ func buildTOC(markdown []byte) ([]byte, error) {
indent := strings.Repeat(" ", numSharps-1) indent := strings.Repeat(" ", numSharps-1)
bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1) bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1)
// remove symbols (except for -) in bookmarks // remove symbols (except for -) in bookmarks
r := regexp.MustCompile("[^A-Za-z0-9-]")
bookmark = r.ReplaceAllString(bookmark, "") bookmark = r.ReplaceAllString(bookmark, "")
tocLine := fmt.Sprintf("%s- [%s](#%s)\n", indent, heading, bookmark) tocLine := fmt.Sprintf("%s- [%s](#%s)", indent, heading, bookmark)
buffer.WriteString(tocLine) out = append(out, newMungeLine(tocLine))
} }
} }
if err := scanner.Err(); err != nil { // Add a blank line before the munge end tag
return []byte{}, err if len(out) != 0 {
out = append(out, blankMungeLine)
} }
return out
return buffer.Bytes(), nil
} }

View File

@ -25,28 +25,29 @@ import (
func Test_buildTOC(t *testing.T) { func Test_buildTOC(t *testing.T) {
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", "\n"}, {"", ""},
{"Lorem ipsum\ndolor sit amet\n", "\n"}, {"Lorem ipsum\ndolor sit amet\n", ""},
{ {
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n", "# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n", "\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
}, },
{ {
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```", "# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```",
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n", "\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
}, },
{ {
"# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n", "# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n",
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n", "\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n\n",
}, },
} }
for _, c := range cases { for i, c := range cases {
actual, err := buildTOC([]byte(c.in)) in := getMungeLines(c.in)
assert.NoError(t, err) expected := getMungeLines(c.expected)
if c.out != string(actual) { actual := buildTOC(in)
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual)) if !expected.Equal(actual) {
t.Errorf("Case[%d] Expected TOC '%v' but got '%v'", i, expected.String(), actual.String())
} }
} }
} }
@ -54,7 +55,7 @@ func Test_buildTOC(t *testing.T) {
func Test_updateTOC(t *testing.T) { func Test_updateTOC(t *testing.T) {
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", ""}, {"", ""},
{ {
@ -67,10 +68,12 @@ func Test_updateTOC(t *testing.T) {
}, },
} }
for _, c := range cases { for _, c := range cases {
actual, err := updateTOC("filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateTOC("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if c.out != string(actual) { if !expected.Equal(actual) {
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual)) t.Errorf("Expected TOC '%v' but got '%v'", expected.String(), actual.String())
} }
} }
} }

View File

@ -20,10 +20,7 @@ import "fmt"
const unversionedWarningTag = "UNVERSIONED_WARNING" const unversionedWarningTag = "UNVERSIONED_WARNING"
var beginUnversionedWarning = beginMungeTag(unversionedWarningTag) const unversionedWarningPre = `
var endUnversionedWarning = endMungeTag(unversionedWarningTag)
const unversionedWarningFmt = `
<!-- BEGIN STRIP_FOR_RELEASE --> <!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING" <img src="http://kubernetes.io/img/warning.png" alt="WARNING"
@ -44,7 +41,11 @@ refer to the docs that go with that version.
<strong> <strong>
The latest 1.0.x release of this document can be found The latest 1.0.x release of this document can be found
[here](http://releases.k8s.io/release-1.0/%s). `
const unversionedWarningFmt = `[here](http://releases.k8s.io/release-1.0/%s).`
const unversionedWarningPost = `
Documentation for other releases can be found at Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io). [releases.k8s.io](http://releases.k8s.io).
@ -52,21 +53,31 @@ Documentation for other releases can be found at
-- --
<!-- END STRIP_FOR_RELEASE --> <!-- END STRIP_FOR_RELEASE -->
` `
func makeUnversionedWarning(fileName string) string { func makeUnversionedWarning(fileName string) mungeLines {
return fmt.Sprintf(unversionedWarningFmt, fileName) insert := unversionedWarningPre + fmt.Sprintf(unversionedWarningFmt, fileName) + unversionedWarningPost
return getMungeLines(insert)
} }
// inserts/updates a warning for unversioned docs // inserts/updates a warning for unversioned docs
func updateUnversionedWarning(file string, markdown []byte) ([]byte, error) { func updateUnversionedWarning(file string, mlines mungeLines) (mungeLines, error) {
lines := splitLines(markdown) file, err := makeRepoRelative(file, file)
if hasLine(lines, "<!-- TAG IS_VERSIONED -->") { if err != nil {
return mlines, err
}
if hasLine(mlines, "<!-- TAG IS_VERSIONED -->") {
// No warnings on release branches // No warnings on release branches
return markdown, nil return mlines, nil
} }
if !hasMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning) { if !hasMacroBlock(mlines, unversionedWarningTag) {
lines = append([]string{beginUnversionedWarning, endUnversionedWarning}, lines...) mlines = prependMacroBlock(unversionedWarningTag, mlines)
} }
return updateMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning, makeUnversionedWarning(file))
mlines, err = updateMacroBlock(mlines, unversionedWarningTag, makeUnversionedWarning(file))
if err != nil {
return mlines, err
}
return mlines, nil
} }

View File

@ -23,30 +23,34 @@ import (
) )
func TestUnversionedWarning(t *testing.T) { func TestUnversionedWarning(t *testing.T) {
warningBlock := beginUnversionedWarning + "\n" + makeUnversionedWarning("filename.md") + "\n" + endUnversionedWarning + "\n" beginMark := beginMungeTag(unversionedWarningTag)
endMark := endMungeTag(unversionedWarningTag)
warningString := makeUnversionedWarning("filename.md").String()
warningBlock := beginMark + "\n" + warningString + endMark + "\n"
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", warningBlock}, {"", warningBlock},
{ {
"Foo\nBar\n", "Foo\nBar\n",
warningBlock + "Foo\nBar\n", warningBlock + "\nFoo\nBar\n",
}, },
{ {
"Foo\n<!-- TAG IS_VERSIONED -->\nBar", "Foo\n<!-- TAG IS_VERSIONED -->\nBar",
"Foo\n<!-- TAG IS_VERSIONED -->\nBar", "Foo\n<!-- TAG IS_VERSIONED -->\nBar",
}, },
{ {
beginUnversionedWarning + "\n" + endUnversionedWarning + "\n", beginMark + "\n" + endMark + "\n",
warningBlock, warningBlock,
}, },
{ {
beginUnversionedWarning + "\n" + "something\n" + endUnversionedWarning + "\n", beginMark + "\n" + "something\n" + endMark + "\n",
warningBlock, warningBlock,
}, },
{ {
"Foo\n" + beginUnversionedWarning + "\n" + endUnversionedWarning + "\nBar\n", "Foo\n" + beginMark + "\n" + endMark + "\nBar\n",
"Foo\n" + warningBlock + "Bar\n", "Foo\n" + warningBlock + "Bar\n",
}, },
{ {
@ -55,10 +59,12 @@ func TestUnversionedWarning(t *testing.T) {
}, },
} }
for i, c := range cases { for i, c := range cases {
actual, err := updateUnversionedWarning("filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateUnversionedWarning("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if string(actual) != c.out { if !expected.Equal(actual) {
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual)) t.Errorf("case[%d]: expected %v got %v", i, expected.String(), actual.String())
} }
} }
} }

View File

@ -17,83 +17,140 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"fmt" "fmt"
"path"
"path/filepath"
"regexp" "regexp"
"strings" "strings"
"unicode"
) )
// Splits a document up into a slice of lines.
func splitLines(document []byte) []string {
lines := strings.Split(string(document), "\n")
// Skip trailing empty string from Split-ing
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}
// Replaces the text between matching "beginMark" and "endMark" within the // Replaces the text between matching "beginMark" and "endMark" within the
// document represented by "lines" with "insertThis". // document represented by "lines" with "insertThis".
// //
// Delimiters should occupy own line. // Delimiters should occupy own line.
// Returns copy of document with modifications. // Returns copy of document with modifications.
func updateMacroBlock(lines []string, beginMark, endMark, insertThis string) ([]byte, error) { func updateMacroBlock(mlines mungeLines, token string, insertThis mungeLines) (mungeLines, error) {
var buffer bytes.Buffer beginMark := beginMungeTag(token)
endMark := endMungeTag(token)
var out mungeLines
betweenBeginAndEnd := false betweenBeginAndEnd := false
for _, line := range lines { for _, mline := range mlines {
trimmedLine := strings.Trim(line, " \n") if mline.preformatted && !betweenBeginAndEnd {
if trimmedLine == beginMark { out = append(out, mline)
continue
}
line := mline.data
if mline.beginTag && line == beginMark {
if betweenBeginAndEnd { if betweenBeginAndEnd {
return nil, fmt.Errorf("found second begin mark while updating macro blocks") return nil, fmt.Errorf("found second begin mark while updating macro blocks")
} }
betweenBeginAndEnd = true betweenBeginAndEnd = true
buffer.WriteString(line) out = append(out, mline)
buffer.WriteString("\n") } else if mline.endTag && line == endMark {
} else if trimmedLine == endMark {
if !betweenBeginAndEnd { if !betweenBeginAndEnd {
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks") return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
} }
buffer.WriteString(insertThis)
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
buffer.WriteString("\n")
buffer.WriteString(line)
buffer.WriteString("\n")
betweenBeginAndEnd = false betweenBeginAndEnd = false
out = append(out, insertThis...)
out = append(out, mline)
} else { } else {
if !betweenBeginAndEnd { if !betweenBeginAndEnd {
buffer.WriteString(line) out = append(out, mline)
buffer.WriteString("\n")
} }
} }
} }
if betweenBeginAndEnd { if betweenBeginAndEnd {
return nil, fmt.Errorf("never found closing end mark while updating macro blocks") return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
} }
return buffer.Bytes(), nil return out, nil
} }
// Tests that a document, represented as a slice of lines, has a line. Ignores // Tests that a document, represented as a slice of lines, has a line. Ignores
// leading and trailing space. // leading and trailing space.
func hasLine(lines []string, needle string) bool { func hasLine(lines mungeLines, needle string) bool {
for _, line := range lines { for _, mline := range lines {
trimmedLine := strings.Trim(line, " \n") haystack := strings.TrimSpace(mline.data)
if trimmedLine == needle { if haystack == needle {
return true return true
} }
} }
return false return false
} }
func removeMacroBlock(token string, mlines mungeLines) (mungeLines, error) {
beginMark := beginMungeTag(token)
endMark := endMungeTag(token)
var out mungeLines
betweenBeginAndEnd := false
for _, mline := range mlines {
if mline.preformatted {
out = append(out, mline)
continue
}
line := mline.data
if mline.beginTag && line == beginMark {
if betweenBeginAndEnd {
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
}
betweenBeginAndEnd = true
} else if mline.endTag && line == endMark {
if !betweenBeginAndEnd {
return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
}
betweenBeginAndEnd = false
} else {
if !betweenBeginAndEnd {
out = append(out, mline)
}
}
}
if betweenBeginAndEnd {
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
}
return out, nil
}
// Add a macro block to the beginning of a set of lines
func prependMacroBlock(token string, mlines mungeLines) mungeLines {
beginLine := newMungeLine(beginMungeTag(token))
endLine := newMungeLine(endMungeTag(token))
out := mungeLines{beginLine, endLine}
if len(mlines) > 0 && mlines[0].data != "" {
out = append(out, blankMungeLine)
}
return append(out, mlines...)
}
// Add a macro block to the end of a set of lines
func appendMacroBlock(mlines mungeLines, token string) mungeLines {
beginLine := newMungeLine(beginMungeTag(token))
endLine := newMungeLine(endMungeTag(token))
out := mlines
if len(mlines) > 0 && mlines[len(mlines)-1].data != "" {
out = append(out, blankMungeLine)
}
return append(out, beginLine, endLine)
}
// Tests that a document, represented as a slice of lines, has a macro block. // Tests that a document, represented as a slice of lines, has a macro block.
func hasMacroBlock(lines []string, begin string, end string) bool { func hasMacroBlock(lines mungeLines, token string) bool {
beginMark := beginMungeTag(token)
endMark := endMungeTag(token)
foundBegin := false foundBegin := false
for _, line := range lines { for _, mline := range lines {
trimmedLine := strings.Trim(line, " \n") if mline.preformatted {
continue
}
if !mline.beginTag && !mline.endTag {
continue
}
line := mline.data
switch { switch {
case !foundBegin && trimmedLine == begin: case !foundBegin && line == beginMark:
foundBegin = true foundBegin = true
case foundBegin && trimmedLine == end: case foundBegin && line == endMark:
return true return true
} }
} }
@ -112,72 +169,123 @@ func endMungeTag(desc string) string {
return fmt.Sprintf("<!-- END MUNGE: %s -->", desc) return fmt.Sprintf("<!-- END MUNGE: %s -->", desc)
} }
// Calls 'replace' for all sections of the document not in ``` / ``` blocks. So type mungeLine struct {
// that you don't have false positives inside those blocks. data string
func replaceNonPreformatted(input []byte, replace func([]byte) []byte) []byte {
f := splitByPreformatted(input)
output := []byte(nil)
for _, block := range f {
if block.preformatted {
output = append(output, block.data...)
} else {
output = append(output, replace(block.data)...)
}
}
return output
}
type fileBlock struct {
preformatted bool preformatted bool
data []byte header bool
link bool
beginTag bool
endTag bool
} }
type fileBlocks []fileBlock type mungeLines []mungeLine
func (m1 mungeLines) Equal(m2 mungeLines) bool {
if len(m1) != len(m2) {
return false
}
for i := range m1 {
if m1[i].data != m2[i].data {
return false
}
}
return true
}
func (mlines mungeLines) String() string {
slice := []string{}
for _, mline := range mlines {
slice = append(slice, mline.data)
}
s := strings.Join(slice, "\n")
// We need to tack on an extra newline at the end of the file
return s + "\n"
}
func (mlines mungeLines) Bytes() []byte {
return []byte(mlines.String())
}
var ( var (
// Finds all preformatted block start/stops. // Finds all preformatted block start/stops.
preformatRE = regexp.MustCompile("^\\s*```") preformatRE = regexp.MustCompile("^\\s*```")
notPreformatRE = regexp.MustCompile("^\\s*```.*```") notPreformatRE = regexp.MustCompile("^\\s*```.*```")
// Is this line a header?
mlHeaderRE = regexp.MustCompile(`^#`)
// Is there a link on this line?
mlLinkRE = regexp.MustCompile(`\[[^]]*\]\([^)]*\)`)
beginTagRE = regexp.MustCompile(`<!-- BEGIN MUNGE:`)
endTagRE = regexp.MustCompile(`<!-- END MUNGE:`)
blankMungeLine = newMungeLine("")
) )
func splitByPreformatted(input []byte) fileBlocks { // Does not set 'preformatted'
f := fileBlocks{} func newMungeLine(line string) mungeLine {
return mungeLine{
cur := []byte(nil) data: line,
preformatted := false header: mlHeaderRE.MatchString(line),
// SplitAfter keeps the newline, so you don't have to worry about link: mlLinkRE.MatchString(line),
// omitting it on the last line or anything. Also, the documentation beginTag: beginTagRE.MatchString(line),
// claims it's unicode safe. endTag: endTagRE.MatchString(line),
for _, line := range bytes.SplitAfter(input, []byte("\n")) {
if !preformatted {
if preformatRE.Match(line) && !notPreformatRE.Match(line) {
if len(cur) > 0 {
f = append(f, fileBlock{false, cur})
} }
cur = []byte{} }
func trimRightSpace(in string) string {
return strings.TrimRightFunc(in, unicode.IsSpace)
}
// Splits a document up into a slice of lines.
func splitLines(document string) []string {
lines := strings.Split(document, "\n")
// Skip trailing empty string from Split-ing
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}
func getMungeLines(in string) mungeLines {
var out mungeLines
preformatted := false
lines := splitLines(in)
// We indicate if any given line is inside a preformatted block or
// outside a preformatted block
for _, line := range lines {
if !preformatted {
if preformatRE.MatchString(line) && !notPreformatRE.MatchString(line) {
preformatted = true preformatted = true
} }
cur = append(cur, line...)
} else { } else {
cur = append(cur, line...) if preformatRE.MatchString(line) {
if preformatRE.Match(line) {
if len(cur) > 0 {
f = append(f, fileBlock{true, cur})
}
cur = []byte{}
preformatted = false preformatted = false
} }
} }
ml := newMungeLine(line)
ml.preformatted = preformatted
out = append(out, ml)
} }
if len(cur) > 0 { return out
f = append(f, fileBlock{preformatted, cur})
}
return f
} }
// As above, but further uses exp to parse the non-preformatted sections. // filePath is the file we are looking for
func replaceNonPreformattedRegexp(input []byte, exp *regexp.Regexp, replace func([]byte) []byte) []byte { // inFile is the file where we found the link. So if we are processing
return replaceNonPreformatted(input, func(in []byte) []byte { // /path/to/repoRoot/docs/admin/README.md and are looking for
return exp.ReplaceAllFunc(in, replace) // ../../file.json we can find that location.
}) // In many cases filePath and processingFile may be the same
func makeRepoRelative(filePath string, processingFile string) (string, error) {
if filePath, err := filepath.Rel(repoRoot, filePath); err == nil {
return filePath, nil
}
cwd := path.Dir(processingFile)
return filepath.Rel(repoRoot, path.Join(cwd, filePath))
}
func makeFileRelative(filePath string, processingFile string) (string, error) {
cwd := path.Dir(processingFile)
if filePath, err := filepath.Rel(cwd, filePath); err == nil {
return filePath, nil
}
return filepath.Rel(cwd, path.Join(cwd, filePath))
} }

View File

@ -17,13 +17,17 @@ limitations under the License.
package main package main
import ( import (
"reflect" "strings"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func Test_updateMacroBlock(t *testing.T) { func Test_updateMacroBlock(t *testing.T) {
token := "TOKEN"
BEGIN := beginMungeTag(token)
END := endMungeTag(token)
var cases = []struct { var cases = []struct {
in string in string
out string out string
@ -31,149 +35,135 @@ func Test_updateMacroBlock(t *testing.T) {
{"", ""}, {"", ""},
{"Lorem ipsum\ndolor sit amet\n", {"Lorem ipsum\ndolor sit amet\n",
"Lorem ipsum\ndolor sit amet\n"}, "Lorem ipsum\ndolor sit amet\n"},
{"Lorem ipsum \n BEGIN\ndolor\nEND\nsit amet\n", {"Lorem ipsum \n" + BEGIN + "\ndolor\n" + END + "\nsit amet\n",
"Lorem ipsum \n BEGIN\nfoo\n\nEND\nsit amet\n"}, "Lorem ipsum \n" + BEGIN + "\nfoo\n" + END + "\nsit amet\n"},
} }
for _, c := range cases { for _, c := range cases {
actual, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo\n") in := getMungeLines(c.in)
expected := getMungeLines(c.out)
actual, err := updateMacroBlock(in, token, getMungeLines("foo"))
assert.NoError(t, err) assert.NoError(t, err)
if c.out != string(actual) { if !expected.Equal(actual) {
t.Errorf("Expected '%v' but got '%v'", c.out, string(actual)) t.Errorf("Expected '%v' but got '%v'", expected.String(), expected.String())
} }
} }
} }
func Test_updateMacroBlock_errors(t *testing.T) { func Test_updateMacroBlock_errors(t *testing.T) {
token := "TOKEN"
b := beginMungeTag(token)
e := endMungeTag(token)
var cases = []struct { var cases = []struct {
in string in string
}{ }{
{"BEGIN\n"}, {b + "\n"},
{"blah\nBEGIN\nblah"}, {"blah\n" + b + "\nblah"},
{"END\n"}, {e + "\n"},
{"blah\nEND\nblah\n"}, {"blah\n" + e + "\nblah\n"},
{"END\nBEGIN"}, {e + "\n" + b},
{"BEGIN\nEND\nEND"}, {b + "\n" + e + "\n" + e},
{"BEGIN\nBEGIN\nEND"}, {b + "\n" + b + "\n" + e},
{"BEGIN\nBEGIN\nEND\nEND"}, {b + "\n" + b + "\n" + e + "\n" + e},
} }
for _, c := range cases { for _, c := range cases {
_, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo") in := getMungeLines(c.in)
_, err := updateMacroBlock(in, token, getMungeLines("foo"))
assert.Error(t, err) assert.Error(t, err)
} }
} }
func TestHasLine(t *testing.T) { func TestHasLine(t *testing.T) {
cases := []struct { cases := []struct {
lines []string haystack string
needle string needle string
expected bool expected bool
}{ }{
{[]string{"abc", "def", "ghi"}, "abc", true}, {"abc\ndef\nghi", "abc", true},
{[]string{" abc", "def", "ghi"}, "abc", true}, {" abc\ndef\nghi", "abc", true},
{[]string{"abc ", "def", "ghi"}, "abc", true}, {"abc \ndef\nghi", "abc", true},
{[]string{"\n abc", "def", "ghi"}, "abc", true}, {"\n abc\ndef\nghi", "abc", true},
{[]string{"abc \n", "def", "ghi"}, "abc", true}, {"abc \n\ndef\nghi", "abc", true},
{[]string{"abc", "def", "ghi"}, "def", true}, {"abc\ndef\nghi", "def", true},
{[]string{"abc", "def", "ghi"}, "ghi", true}, {"abc\ndef\nghi", "ghi", true},
{[]string{"abc", "def", "ghi"}, "xyz", false}, {"abc\ndef\nghi", "xyz", false},
} }
for i, c := range cases { for i, c := range cases {
if hasLine(c.lines, c.needle) != c.expected { in := getMungeLines(c.haystack)
if hasLine(in, c.needle) != c.expected {
t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected) t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected)
} }
} }
} }
func TestHasMacroBlock(t *testing.T) { func TestHasMacroBlock(t *testing.T) {
token := "<<<"
b := beginMungeTag(token)
e := endMungeTag(token)
cases := []struct { cases := []struct {
lines []string lines []string
begin string
end string
expected bool expected bool
}{ }{
{[]string{"<<<", ">>>"}, "<<<", ">>>", true}, {[]string{b, e}, true},
{[]string{"<<<", "abc", ">>>"}, "<<<", ">>>", true}, {[]string{b, "abc", e}, true},
{[]string{"<<<", "<<<", "abc", ">>>"}, "<<<", ">>>", true}, {[]string{b, b, "abc", e}, true},
{[]string{"<<<", "abc", ">>>", ">>>"}, "<<<", ">>>", true}, {[]string{b, "abc", e, e}, true},
{[]string{"<<<", ">>>", "<<<", ">>>"}, "<<<", ">>>", true}, {[]string{b, e, b, e}, true},
{[]string{"<<<"}, "<<<", ">>>", false}, {[]string{b}, false},
{[]string{">>>"}, "<<<", ">>>", false}, {[]string{e}, false},
{[]string{"<<<", "abc"}, "<<<", ">>>", false}, {[]string{b, "abc"}, false},
{[]string{"abc", ">>>"}, "<<<", ">>>", false}, {[]string{"abc", e}, false},
} }
for i, c := range cases { for i, c := range cases {
if hasMacroBlock(c.lines, c.begin, c.end) != c.expected { in := getMungeLines(strings.Join(c.lines, "\n"))
t.Errorf("case[%d]: %q,%q, expected %t, got %t", i, c.begin, c.end, c.expected, !c.expected) if hasMacroBlock(in, token) != c.expected {
t.Errorf("case[%d]: expected %t, got %t", i, c.expected, !c.expected)
} }
} }
} }
func TestReplaceNonPreformatted(t *testing.T) { func TestAppendMacroBlock(t *testing.T) {
token := "<<<"
b := beginMungeTag(token)
e := endMungeTag(token)
cases := []struct { cases := []struct {
in string in []string
out string expected []string
}{ }{
{"aoeu", ""}, {[]string{}, []string{b, e}},
{"aoeu\n```\naoeu\n```\naoeu", "```\naoeu\n```\n"}, {[]string{"bob"}, []string{"bob", "", b, e}},
{"ao\neu\n```\naoeu\n\n\n", "```\naoeu\n\n\n"}, {[]string{b, e}, []string{b, e, "", b, e}},
{"aoeu ```aoeu``` aoeu", ""},
} }
for i, c := range cases { for i, c := range cases {
out := string(replaceNonPreformatted([]byte(c.in), func([]byte) []byte { return nil })) in := getMungeLines(strings.Join(c.in, "\n"))
if out != c.out { expected := getMungeLines(strings.Join(c.expected, "\n"))
t.Errorf("%v: got %q, wanted %q", i, out, c.out) out := appendMacroBlock(in, token)
if !out.Equal(expected) {
t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
} }
} }
} }
func TestReplaceNonPreformattedNoChange(t *testing.T) { func TestPrependMacroBlock(t *testing.T) {
token := "<<<"
b := beginMungeTag(token)
e := endMungeTag(token)
cases := []struct { cases := []struct {
in string in []string
expected []string
}{ }{
{"aoeu"}, {[]string{}, []string{b, e}},
{"aoeu\n```\naoeu\n```\naoeu"}, {[]string{"bob"}, []string{b, e, "", "bob"}},
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu"}, {[]string{b, e}, []string{b, e, "", b, e}},
{"ao\neu\n```\naoeu\n\n\n"},
{"aoeu ```aoeu``` aoeu"},
{"aoeu\n```\naoeu\n```"},
{"aoeu\n```\naoeu\n```\n"},
{"aoeu\n```\naoeu\n```\n\n"},
} }
for i, c := range cases { for i, c := range cases {
out := string(replaceNonPreformatted([]byte(c.in), func(in []byte) []byte { return in })) in := getMungeLines(strings.Join(c.in, "\n"))
if out != c.in { expected := getMungeLines(strings.Join(c.expected, "\n"))
t.Errorf("%v: got %q, wanted %q", i, out, c.in) out := prependMacroBlock(token, in)
} if !out.Equal(expected) {
} t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
}
func TestReplaceNonPreformattedCallOrder(t *testing.T) {
cases := []struct {
in string
expect []string
}{
{"aoeu", []string{"aoeu"}},
{"aoeu\n```\naoeu\n```\naoeu", []string{"aoeu\n", "aoeu"}},
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu", []string{"aoeu\n\n", "\naoeu"}},
{"ao\neu\n```\naoeu\n\n\n", []string{"ao\neu\n"}},
{"aoeu ```aoeu``` aoeu", []string{"aoeu ```aoeu``` aoeu"}},
{"aoeu\n```\naoeu\n```", []string{"aoeu\n"}},
{"aoeu\n```\naoeu\n```\n", []string{"aoeu\n"}},
{"aoeu\n```\naoeu\n```\n\n", []string{"aoeu\n", "\n"}},
}
for i, c := range cases {
got := []string{}
replaceNonPreformatted([]byte(c.in), func(in []byte) []byte {
got = append(got, string(in))
return in
})
if e, a := c.expect, got; !reflect.DeepEqual(e, a) {
t.Errorf("%v: got %q, wanted %q", i, a, e)
} }
} }
} }

View File

@ -0,0 +1,31 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// Remove all trailing whitespace
func updateWhitespace(file string, mlines mungeLines) (mungeLines, error) {
var out mungeLines
for _, mline := range mlines {
if mline.preformatted {
out = append(out, mline)
continue
}
newline := trimRightSpace(mline.data)
out = append(out, newMungeLine(newline))
}
return out, nil
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_updateWhiteSpace(t *testing.T) {
var cases = []struct {
in string
expected string
}{
{"", ""},
{"\n", "\n"},
{" \t \t \n", "\n"},
{"bob \t", "bob"},
{"```\n \n```\n", "```\n \n```\n"},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateWhitespace("filename.md", in)
assert.NoError(t, err)
if !expected.Equal(actual) {
t.Errorf("Case[%d] Expected Whitespace '%v' but got '%v'", i, string(expected.Bytes()), string(actual.Bytes()))
}
}
}

View File

@ -99,7 +99,7 @@ Use the file [`namespace-dev.json`](namespace-dev.json) which describes a develo
``` ```
[Download example](namespace-dev.json) [Download example](namespace-dev.json)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE namespace-dev.json -->
Create the development namespace using kubectl. Create the development namespace using kubectl.

View File

@ -74,7 +74,7 @@ spec:
``` ```
[Download example](../../examples/blog-logging/counter-pod.yaml) [Download example](../../examples/blog-logging/counter-pod.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Lets create the pod in the default This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Lets create the pod in the default
namespace. namespace.
@ -192,7 +192,7 @@ spec:
``` ```
[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) [Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE ../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml -->
This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it. This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it.

View File

@ -52,10 +52,10 @@ Documentation for other releases can be found at
## Overview ## Overview
This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode).  In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster.  This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode).  In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster.
This cluster information makes it possible to build applications that are *cluster aware*.   This cluster information makes it possible to build applications that are *cluster aware*.
Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers.  Container hooks are somewhat analogous to operating system signals in a traditional process model.   However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster.  Containers that participate in this cluster lifecycle become *cluster native*.  Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers.  Container hooks are somewhat analogous to operating system signals in a traditional process model.   However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster.  Containers that participate in this cluster lifecycle become *cluster native*.
Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](images.md) and one or more [volumes](volumes.md). Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](images.md) and one or more [volumes](volumes.md).
@ -89,7 +89,7 @@ Services have dedicated IP address, and are also surfaced to the container via D
*NB*: Container hooks are under active development, we anticipate adding additional hooks as the Kubernetes container management system evolves.* *NB*: Container hooks are under active development, we anticipate adding additional hooks as the Kubernetes container management system evolves.*
Container hooks provide information to the container about events in its management lifecycle.  For example, immediately after a container is started, it receives a *PostStart* hook.  These hooks are broadcast *into* the container with information about the life-cycle of the container.  They are different from the events provided by Docker and other systems which are *output* from the container.  Output events provide a log of what has already happened.  Input hooks provide real-time notification about things that are happening, but no historical log.   Container hooks provide information to the container about events in its management lifecycle.  For example, immediately after a container is started, it receives a *PostStart* hook.  These hooks are broadcast *into* the container with information about the life-cycle of the container.  They are different from the events provided by Docker and other systems which are *output* from the container.  Output events provide a log of what has already happened.  Input hooks provide real-time notification about things that are happening, but no historical log.
### Hook Details ### Hook Details

View File

@ -105,7 +105,7 @@ spec:
``` ```
[Download example](downward-api/dapi-pod.yaml) [Download example](downward-api/dapi-pod.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE downward-api/dapi-pod.yaml -->
Some more thorough examples: Some more thorough examples:
* [environment variables](environment-guide/) * [environment variables](environment-guide/)

View File

@ -59,7 +59,7 @@ spec:
``` ```
[Download example](../../examples/blog-logging/counter-pod.yaml) [Download example](../../examples/blog-logging/counter-pod.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
we can run the pod: we can run the pod:

View File

@ -65,7 +65,7 @@ spec:
``` ```
[Download example](pod.yaml) [Download example](pod.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE pod.yaml -->
You can see your cluster's pods: You can see your cluster's pods:
@ -117,7 +117,7 @@ spec:
``` ```
[Download example](replication.yaml) [Download example](replication.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE replication.yaml -->
To delete the replication controller (and the pods it created): To delete the replication controller (and the pods it created):

View File

@ -166,7 +166,7 @@ spec:
``` ```
[Download example](pod-redis.yaml) [Download example](pod-redis.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE pod-redis.yaml -->
Notes: Notes:
- The volume mount name is a reference to a specific empty dir volume. - The volume mount name is a reference to a specific empty dir volume.

View File

@ -87,7 +87,7 @@ spec:
``` ```
[Download example](pod-nginx-with-label.yaml) [Download example](pod-nginx-with-label.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE pod-nginx-with-label.yaml -->
Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)): Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)):
@ -143,7 +143,7 @@ spec:
``` ```
[Download example](replication-controller.yaml) [Download example](replication-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE replication-controller.yaml -->
#### Replication Controller Management #### Replication Controller Management
@ -196,7 +196,7 @@ spec:
``` ```
[Download example](service.yaml) [Download example](service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE service.yaml -->
#### Service Management #### Service Management
@ -312,7 +312,7 @@ spec:
``` ```
[Download example](pod-with-http-healthcheck.yaml) [Download example](pod-with-http-healthcheck.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE pod-with-http-healthcheck.yaml -->
For more information about health checking, see [Container Probes](../pod-states.md#container-probes). For more information about health checking, see [Container Probes](../pod-states.md#container-probes).

View File

@ -101,7 +101,7 @@ spec:
``` ```
[Download example](cassandra-controller.yaml) [Download example](cassandra-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE cassandra-controller.yaml -->
There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later) There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later)
@ -132,7 +132,7 @@ spec:
``` ```
[Download example](cassandra-service.yaml) [Download example](cassandra-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE cassandra-service.yaml -->
The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service.
@ -242,7 +242,7 @@ spec:
``` ```
[Download example](cassandra-controller.yaml) [Download example](cassandra-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE cassandra-controller.yaml -->
Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the resplication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1. Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the resplication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1.

View File

@ -82,7 +82,7 @@ spec:
``` ```
[Download example](rabbitmq-service.yaml) [Download example](rabbitmq-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE rabbitmq-service.yaml -->
To start the service, run: To start the service, run:
@ -127,7 +127,7 @@ spec:
``` ```
[Download example](rabbitmq-controller.yaml) [Download example](rabbitmq-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE rabbitmq-controller.yaml -->
Running `$ kubectl create -f examples/celery-rabbitmq/rabbitmq-controller.yaml` brings up a replication controller that ensures one pod exists which is running a RabbitMQ instance. Running `$ kubectl create -f examples/celery-rabbitmq/rabbitmq-controller.yaml` brings up a replication controller that ensures one pod exists which is running a RabbitMQ instance.
@ -168,7 +168,7 @@ spec:
``` ```
[Download example](celery-controller.yaml) [Download example](celery-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE celery-controller.yaml -->
There are several things to point out here... There are several things to point out here...
@ -239,7 +239,7 @@ spec:
``` ```
[Download example](flower-service.yaml) [Download example](flower-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE flower-service.yaml -->
It is marked as external (LoadBalanced). However on many platforms you will have to add an explicit firewall rule to open port 5555. It is marked as external (LoadBalanced). However on many platforms you will have to add an explicit firewall rule to open port 5555.
On GCE this can be done with: On GCE this can be done with:
@ -280,7 +280,7 @@ spec:
``` ```
[Download example](flower-controller.yaml) [Download example](flower-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE flower-controller.yaml -->
This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower: This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower:

View File

@ -93,7 +93,7 @@ spec:
``` ```
[Download example](music-rc.yaml) [Download example](music-rc.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE music-rc.yaml -->
The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to
exist in the same namespace. exist in the same namespace.
@ -120,7 +120,7 @@ data:
``` ```
[Download example](apiserver-secret.yaml) [Download example](apiserver-secret.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE apiserver-secret.yaml -->
Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded
versions of the bearer token reported by `kubectl config view` e.g. versions of the bearer token reported by `kubectl config view` e.g.
@ -186,7 +186,7 @@ spec:
``` ```
[Download example](music-service.yaml) [Download example](music-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE music-service.yaml -->
Let's create the service with an external load balancer: Let's create the service with an external load balancer:

View File

@ -101,7 +101,7 @@ spec:
``` ```
[Download example](redis-master-controller.yaml) [Download example](redis-master-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE redis-master-controller.yaml -->
Change to the `<kubernetes>/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running: Change to the `<kubernetes>/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running:
@ -222,7 +222,7 @@ spec:
``` ```
[Download example](redis-master-service.yaml) [Download example](redis-master-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE redis-master-service.yaml -->
Create the service by running: Create the service by running:
@ -296,7 +296,7 @@ spec:
``` ```
[Download example](redis-slave-controller.yaml) [Download example](redis-slave-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE redis-slave-controller.yaml -->
and create the replication controller by running: and create the replication controller by running:
@ -347,7 +347,7 @@ spec:
``` ```
[Download example](redis-slave-service.yaml) [Download example](redis-slave-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE redis-slave-service.yaml -->
This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command. This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command.
@ -398,7 +398,7 @@ spec:
``` ```
[Download example](frontend-controller.yaml) [Download example](frontend-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE frontend-controller.yaml -->
Using this file, you can turn up your frontend with: Using this file, you can turn up your frontend with:
@ -501,7 +501,7 @@ spec:
``` ```
[Download example](frontend-service.yaml) [Download example](frontend-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE frontend-service.yaml -->
#### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) #### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)

View File

@ -84,7 +84,7 @@ spec:
``` ```
[Download example](hazelcast-service.yaml) [Download example](hazelcast-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE hazelcast-service.yaml -->
The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service.
@ -139,7 +139,7 @@ spec:
``` ```
[Download example](hazelcast-controller.yaml) [Download example](hazelcast-controller.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE hazelcast-controller.yaml -->
There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingle. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later). There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingle. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later).

View File

@ -132,7 +132,7 @@ spec:
``` ```
[Download example](mysql.yaml) [Download example](mysql.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE mysql.yaml -->
Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created. Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created.
Once you've edited the file to set your database password, create the pod as follows, where `<kubernetes>` is the path to your Kubernetes installation: Once you've edited the file to set your database password, create the pod as follows, where `<kubernetes>` is the path to your Kubernetes installation:
@ -187,7 +187,7 @@ spec:
``` ```
[Download example](mysql-service.yaml) [Download example](mysql-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE mysql-service.yaml -->
Start the service like this: Start the service like this:
@ -242,7 +242,7 @@ spec:
``` ```
[Download example](wordpress.yaml) [Download example](wordpress.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE wordpress.yaml -->
Create the pod: Create the pod:
@ -283,7 +283,7 @@ spec:
``` ```
[Download example](wordpress-service.yaml) [Download example](wordpress-service.yaml)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE wordpress-service.yaml -->
Note the `type: LoadBalancer` setting. This will set up the wordpress service behind an external IP. Note the `type: LoadBalancer` setting. This will set up the wordpress service behind an external IP.
Note also that we've set the service port to 80. We'll return to that shortly. Note also that we've set the service port to 80. We'll return to that shortly.

View File

@ -99,7 +99,7 @@ To start Phabricator server use the file [`examples/phabricator/phabricator-cont
``` ```
[Download example](phabricator-controller.json) [Download example](phabricator-controller.json)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE phabricator-controller.json -->
Create the phabricator pod in your Kubernetes cluster by running: Create the phabricator pod in your Kubernetes cluster by running:
@ -189,7 +189,7 @@ To automate this process and make sure that a proper host is authorized even if
``` ```
[Download example](authenticator-controller.json) [Download example](authenticator-controller.json)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE authenticator-controller.json -->
To create the pod run: To create the pod run:
@ -238,7 +238,7 @@ Use the file [`examples/phabricator/phabricator-service.json`](phabricator-servi
``` ```
[Download example](phabricator-service.json) [Download example](phabricator-service.json)
<!-- END MUNGE: EXAMPLE --> <!-- END MUNGE: EXAMPLE phabricator-service.json -->
To create the service run: To create the service run: