mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 02:09:56 +00:00
Rewrite how the munger works
The basic idea is that in the main mungedocs we run the entirefile and create an annotated set of lines about that file. All mungers then act on a struct mungeLines instead of on a bytes array. Making use of the metadata where appropriete. Helper functions exist to make updating a 'macro block' extremely easy.
This commit is contained in:
parent
4cbca2e63c
commit
8886a9940d
22
cmd/mungedocs/README.md
Normal file
22
cmd/mungedocs/README.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Documentation Mungers
|
||||||
|
|
||||||
|
Basically this is like lint/gofmt for md docs.
|
||||||
|
|
||||||
|
It basically does the following:
|
||||||
|
- iterate over all files in the given doc root.
|
||||||
|
- for each file split it into a slice (mungeLines) of lines (mungeLine)
|
||||||
|
- a mungeline has metadata about each line typically determined by a 'fast' regex.
|
||||||
|
- metadata contains things like 'is inside a preformmatted block'
|
||||||
|
- contains a markdown header
|
||||||
|
- has a link to another file
|
||||||
|
- etc..
|
||||||
|
- if you have a really slow regex with a lot of backtracking you might want to write a fast one to limit how often you run the slow one.
|
||||||
|
- each munger is then called in turn
|
||||||
|
- they are given the mungeLines
|
||||||
|
- they create an entirely new set of mungeLines with their modifications
|
||||||
|
- the new set is returned
|
||||||
|
- the new set is then fed into the next munger.
|
||||||
|
- in the end we might commit the end mungeLines to the file or not (--verify)
|
||||||
|
|
||||||
|
|
||||||
|
[]()
|
@ -17,43 +17,42 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"strings"
|
||||||
"regexp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const analyticsMungeTag = "GENERATED_ANALYTICS"
|
||||||
beginMungeExp = regexp.QuoteMeta(beginMungeTag("GENERATED_ANALYTICS"))
|
const analyticsLinePrefix = "[)
|
|
||||||
analyticsExp = regexp.QuoteMeta("[ +
|
|
||||||
"[^?]*" +
|
|
||||||
regexp.QuoteMeta("?pixel)]()")
|
|
||||||
|
|
||||||
// Matches the analytics blurb, with or without the munge headers.
|
func updateAnalytics(fileName string, mlines mungeLines) (mungeLines, error) {
|
||||||
analyticsRE = regexp.MustCompile(`[\n]*` + analyticsExp + `[\n]?` +
|
var out mungeLines
|
||||||
`|` + `[\n]*` + beginMungeExp + `[^<]*` + endMungeExp)
|
fileName, err := makeRepoRelative(fileName, fileName)
|
||||||
)
|
if err != nil {
|
||||||
|
return mlines, err
|
||||||
// This adds the analytics link to every .md file.
|
|
||||||
func checkAnalytics(fileName string, fileBytes []byte) (output []byte, err error) {
|
|
||||||
fileName = makeRepoRelative(fileName)
|
|
||||||
desired := fmt.Sprintf(`
|
|
||||||
|
|
||||||
|
|
||||||
`+beginMungeTag("GENERATED_ANALYTICS")+`
|
|
||||||
[]()
|
|
||||||
`+endMungeTag("GENERATED_ANALYTICS")+`
|
|
||||||
`, fileName)
|
|
||||||
if !analyticsRE.MatchString(desired) {
|
|
||||||
fmt.Printf("%q does not match %q", analyticsRE.String(), desired)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
//output = replaceNonPreformattedRegexp(fileBytes, analyticsRE, func(in []byte) []byte {
|
|
||||||
output = analyticsRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte {
|
link := fmt.Sprintf(analyticsLinePrefix+"%s?pixel)]()", fileName)
|
||||||
return []byte{}
|
insertLines := getMungeLines(link)
|
||||||
})
|
mlines, err = removeMacroBlock(analyticsMungeTag, mlines)
|
||||||
output = bytes.TrimRight(output, "\n")
|
if err != nil {
|
||||||
output = append(output, []byte(desired)...)
|
return mlines, err
|
||||||
return output, nil
|
}
|
||||||
|
|
||||||
|
// Remove floating analytics links not surrounded by the munge tags.
|
||||||
|
for _, mline := range mlines {
|
||||||
|
if mline.preformatted || mline.header || mline.beginTag || mline.endTag {
|
||||||
|
out = append(out, mline)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(mline.data, analyticsLinePrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, mline)
|
||||||
|
}
|
||||||
|
out = appendMacroBlock(out, analyticsMungeTag)
|
||||||
|
out, err = updateMacroBlock(out, analyticsMungeTag, insertLines)
|
||||||
|
if err != nil {
|
||||||
|
return mlines, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
}
|
}
|
||||||
|
@ -23,67 +23,71 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestAnalytics(t *testing.T) {
|
func TestAnalytics(t *testing.T) {
|
||||||
|
b := beginMungeTag("GENERATED_ANALYTICS")
|
||||||
|
e := endMungeTag("GENERATED_ANALYTICS")
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
out string
|
expected string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"aoeu",
|
"aoeu",
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
e + "\n"},
|
||||||
{
|
{
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" + "\n" +
|
||||||
"[]()",
|
"[]()",
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
e + "\n"},
|
||||||
{
|
{
|
||||||
"aoeu" + "\n" +
|
"aoeu" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
e + "\n",
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
e + "\n"},
|
||||||
{
|
{
|
||||||
"aoeu" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" +
|
||||||
"[]()" + "\n" + "\n" + "\n" +
|
"[]()" + "\n" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
e + "\n",
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
e + "\n"},
|
||||||
{
|
{
|
||||||
"prefix" + "\n" +
|
"prefix" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") +
|
e +
|
||||||
"\n" + "suffix",
|
"\n" + "suffix",
|
||||||
"prefix" + "\n" + "suffix" + "\n" + "\n" + "\n" +
|
"prefix" + "\n" + "suffix" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
e + "\n"},
|
||||||
{
|
{
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
e + "\n",
|
||||||
"aoeu" + "\n" + "\n" + "\n" +
|
"aoeu" + "\n" + "\n" + "\n" +
|
||||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
b + "\n" +
|
||||||
"[]()" + "\n" +
|
"[]()" + "\n" +
|
||||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
e + "\n"},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for i, c := range cases {
|
||||||
out, err := checkAnalytics("path/to/file-name.md", []byte(c.in))
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
out, err := updateAnalytics("path/to/file-name.md", in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if string(out) != c.out {
|
if !expected.Equal(out) {
|
||||||
t.Errorf("Expected \n\n%v\n\n but got \n\n%v\n\n", c.out, string(out))
|
t.Errorf("Case %d Expected \n\n%v\n\n but got \n\n%v\n\n", i, expected.String(), out.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,15 +17,17 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const exampleMungeTag = "EXAMPLE"
|
const exampleToken = "EXAMPLE"
|
||||||
|
|
||||||
|
const exampleLineStart = "<!-- BEGIN MUNGE: EXAMPLE"
|
||||||
|
|
||||||
|
var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", exampleToken, `(([^ ])*.(yaml|json))`)))
|
||||||
|
|
||||||
// syncExamples updates all examples in markdown file.
|
// syncExamples updates all examples in markdown file.
|
||||||
//
|
//
|
||||||
@ -43,75 +45,70 @@ const exampleMungeTag = "EXAMPLE"
|
|||||||
//
|
//
|
||||||
// [Download example](../../examples/guestbook/frontend-controller.yaml)
|
// [Download example](../../examples/guestbook/frontend-controller.yaml)
|
||||||
// <!-- END MUNGE: EXAMPLE -->
|
// <!-- END MUNGE: EXAMPLE -->
|
||||||
func syncExamples(filePath string, markdown []byte) ([]byte, error) {
|
func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||||
// find the example syncer begin tag
|
var err error
|
||||||
header := beginMungeTag(fmt.Sprintf("%s %s", exampleMungeTag, `(([^ ])*.(yaml|json))`))
|
type exampleTag struct {
|
||||||
exampleLinkRE := regexp.MustCompile(header)
|
token string
|
||||||
lines := splitLines(markdown)
|
linkText string
|
||||||
updatedMarkdown, err := updateExampleMacroBlock(filePath, lines, exampleLinkRE, endMungeTag(exampleMungeTag))
|
fileType string
|
||||||
if err != nil {
|
|
||||||
return updatedMarkdown, err
|
|
||||||
}
|
}
|
||||||
return updatedMarkdown, nil
|
exampleTags := []exampleTag{}
|
||||||
|
|
||||||
|
// collect all example Tags
|
||||||
|
for _, mline := range mlines {
|
||||||
|
if mline.preformatted || !mline.beginTag {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line := mline.data
|
||||||
|
if !strings.HasPrefix(line, exampleLineStart) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
match := exampleMungeTagRE.FindStringSubmatch(line)
|
||||||
|
if len(match) < 4 {
|
||||||
|
err = fmt.Errorf("Found unparsable EXAMPLE munge line %v", line)
|
||||||
|
return mlines, err
|
||||||
|
}
|
||||||
|
tag := exampleTag{
|
||||||
|
token: exampleToken + " " + match[1],
|
||||||
|
linkText: match[1],
|
||||||
|
fileType: match[3],
|
||||||
|
}
|
||||||
|
exampleTags = append(exampleTags, tag)
|
||||||
|
}
|
||||||
|
// update all example Tags
|
||||||
|
for _, tag := range exampleTags {
|
||||||
|
example, err := exampleContent(filePath, tag.linkText, tag.fileType)
|
||||||
|
if err != nil {
|
||||||
|
return mlines, err
|
||||||
|
}
|
||||||
|
mlines, err = updateMacroBlock(mlines, tag.token, example)
|
||||||
|
if err != nil {
|
||||||
|
return mlines, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mlines, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// exampleContent retrieves the content of the file at linkPath
|
// exampleContent retrieves the content of the file at linkPath
|
||||||
func exampleContent(filePath, linkPath, fileType string) (content string, err error) {
|
func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) {
|
||||||
realRoot := path.Join(*rootDir, *repoRoot) + "/"
|
repoRel, err := makeRepoRelative(linkPath, filePath)
|
||||||
path := path.Join(realRoot, path.Dir(filePath), linkPath)
|
|
||||||
dat, err := ioutil.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return content, err
|
|
||||||
}
|
|
||||||
// remove leading and trailing spaces and newlines
|
|
||||||
trimmedFileContent := strings.TrimSpace(string(dat))
|
|
||||||
content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, linkPath)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateExampleMacroBlock sync the yaml/json example between begin tag and end tag
|
|
||||||
func updateExampleMacroBlock(filePath string, lines []string, beginMarkExp *regexp.Regexp, endMark string) ([]byte, error) {
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
betweenBeginAndEnd := false
|
|
||||||
for _, line := range lines {
|
|
||||||
trimmedLine := strings.Trim(line, " \n")
|
|
||||||
if beginMarkExp.Match([]byte(trimmedLine)) {
|
|
||||||
if betweenBeginAndEnd {
|
|
||||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
|
||||||
}
|
|
||||||
betweenBeginAndEnd = true
|
|
||||||
buffer.WriteString(line)
|
|
||||||
buffer.WriteString("\n")
|
|
||||||
match := beginMarkExp.FindStringSubmatch(line)
|
|
||||||
if len(match) < 4 {
|
|
||||||
return nil, fmt.Errorf("failed to parse the link in example header")
|
|
||||||
}
|
|
||||||
// match[0] is the entire expression; [1] is the link text and [3] is the file type (yaml or json).
|
|
||||||
linkText := match[1]
|
|
||||||
fileType := match[3]
|
|
||||||
example, err := exampleContent(filePath, linkText, fileType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
buffer.WriteString(example)
|
|
||||||
} else if trimmedLine == endMark {
|
fileRel, err := makeFileRelative(linkPath, filePath)
|
||||||
if !betweenBeginAndEnd {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
return nil, err
|
||||||
}
|
}
|
||||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
|
||||||
buffer.WriteString("\n")
|
dat, err := ioutil.ReadFile(repoRel)
|
||||||
buffer.WriteString(line)
|
if err != nil {
|
||||||
buffer.WriteString("\n")
|
return nil, err
|
||||||
betweenBeginAndEnd = false
|
|
||||||
} else {
|
|
||||||
if !betweenBeginAndEnd {
|
|
||||||
buffer.WriteString(line)
|
|
||||||
buffer.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
// remove leading and trailing spaces and newlines
|
||||||
if betweenBeginAndEnd {
|
trimmedFileContent := strings.TrimSpace(string(dat))
|
||||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel)
|
||||||
}
|
out := getMungeLines(content)
|
||||||
return buffer.Bytes(), nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
@ -36,23 +36,26 @@ spec:
|
|||||||
`
|
`
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
out string
|
expected string
|
||||||
}{
|
}{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{
|
{
|
||||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n",
|
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n",
|
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n",
|
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n",
|
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
repoRoot = ""
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
actual, err := syncExamples("mungedocs/filename.md", []byte(c.in))
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
actual, err := syncExamples("filename.md", in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if c.out != string(actual) {
|
if !expected.Equal(actual) {
|
||||||
t.Errorf("Expected example \n'%v' but got \n'%v'", c.out, string(actual))
|
t.Errorf("Expected example \n'%q' but got \n'%q'", expected.String(), actual.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,53 +19,56 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
|
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
|
||||||
var whitespaceRegex = regexp.MustCompile(`^\s*$`)
|
|
||||||
|
|
||||||
func fixHeaderLines(fileBytes []byte) []byte {
|
func fixHeaderLine(mlines mungeLines, newlines mungeLines, linenum int) mungeLines {
|
||||||
lines := splitLines(fileBytes)
|
var out mungeLines
|
||||||
out := []string{}
|
|
||||||
for i := range lines {
|
mline := mlines[linenum]
|
||||||
matches := headerRegex.FindStringSubmatch(lines[i])
|
line := mlines[linenum].data
|
||||||
|
|
||||||
|
matches := headerRegex.FindStringSubmatch(line)
|
||||||
if matches == nil {
|
if matches == nil {
|
||||||
out = append(out, lines[i])
|
out = append(out, mline)
|
||||||
continue
|
return out
|
||||||
}
|
}
|
||||||
if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) {
|
|
||||||
out = append(out, "")
|
// There must be a blank line before the # (unless first line in file)
|
||||||
}
|
if linenum != 0 {
|
||||||
out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2]))
|
newlen := len(newlines)
|
||||||
if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) {
|
if newlines[newlen-1].data != "" {
|
||||||
out = append(out, "")
|
out = append(out, blankMungeLine)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final := strings.Join(out, "\n")
|
|
||||||
// Preserve the end of the file.
|
// There must be a space AFTER the ##'s
|
||||||
if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' {
|
newline := fmt.Sprintf("%s %s", matches[1], matches[2])
|
||||||
final += "\n"
|
newmline := newMungeLine(newline)
|
||||||
|
out = append(out, newmline)
|
||||||
|
|
||||||
|
// The next line needs to be a blank line (unless last line in file)
|
||||||
|
if len(mlines) > linenum+1 && mlines[linenum+1].data != "" {
|
||||||
|
out = append(out, blankMungeLine)
|
||||||
}
|
}
|
||||||
return []byte(final)
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Header lines need whitespace around them and after the #s.
|
// Header lines need whitespace around them and after the #s.
|
||||||
func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) {
|
func checkHeaderLines(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||||
fbs := splitByPreformatted(fileBytes)
|
var out mungeLines
|
||||||
fbs = append([]fileBlock{{false, []byte{}}}, fbs...)
|
for i, mline := range mlines {
|
||||||
fbs = append(fbs, fileBlock{false, []byte{}})
|
if mline.preformatted {
|
||||||
|
out = append(out, mline)
|
||||||
for i := range fbs {
|
|
||||||
block := &fbs[i]
|
|
||||||
if block.preformatted {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
block.data = fixHeaderLines(block.data)
|
if !mline.header {
|
||||||
|
out = append(out, mline)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
output := []byte{}
|
newLines := fixHeaderLine(mlines, out, i)
|
||||||
for _, block := range fbs {
|
out = append(out, newLines...)
|
||||||
output = append(output, block.data...)
|
|
||||||
}
|
}
|
||||||
return output, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
func TestHeaderLines(t *testing.T) {
|
func TestHeaderLines(t *testing.T) {
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
out string
|
expected string
|
||||||
}{
|
}{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{
|
{
|
||||||
@ -62,10 +62,12 @@ func TestHeaderLines(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
actual, err := checkHeaderLines("filename.md", []byte(c.in))
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
actual, err := checkHeaderLines("filename.md", in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if string(actual) != c.out {
|
if !actual.Equal(expected) {
|
||||||
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
|
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,29 +25,25 @@ import (
|
|||||||
|
|
||||||
// Looks for lines that have kubectl commands with -f flags and files that
|
// Looks for lines that have kubectl commands with -f flags and files that
|
||||||
// don't exist.
|
// don't exist.
|
||||||
func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) {
|
func checkKubectlFileTargets(file string, mlines mungeLines) (mungeLines, error) {
|
||||||
inside := false
|
var errors []string
|
||||||
lines := splitLines(markdown)
|
for i, mline := range mlines {
|
||||||
errors := []string{}
|
if !mline.preformatted {
|
||||||
for i := range lines {
|
continue
|
||||||
if strings.HasPrefix(lines[i], "```") {
|
|
||||||
inside = !inside
|
|
||||||
}
|
}
|
||||||
if inside {
|
if err := lookForKubectl(mline.data, i); err != nil {
|
||||||
if err := lookForKubectl(lines, i); err != nil {
|
|
||||||
errors = append(errors, err.Error())
|
errors = append(errors, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
err := error(nil)
|
err := error(nil)
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
||||||
}
|
}
|
||||||
return markdown, err
|
return mlines, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookForKubectl(lines []string, lineNum int) error {
|
func lookForKubectl(line string, lineNum int) error {
|
||||||
fields := strings.Fields(lines[lineNum])
|
fields := strings.Fields(line)
|
||||||
for i := range fields {
|
for i := range fields {
|
||||||
if fields[i] == "kubectl" {
|
if fields[i] == "kubectl" {
|
||||||
return gotKubectl(lineNum, fields, i)
|
return gotKubectl(lineNum, fields, i)
|
||||||
@ -56,26 +52,26 @@ func lookForKubectl(lines []string, lineNum int) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gotKubectl(line int, fields []string, fieldNum int) error {
|
func gotKubectl(lineNum int, fields []string, fieldNum int) error {
|
||||||
for i := fieldNum + 1; i < len(fields); i++ {
|
for i := fieldNum + 1; i < len(fields); i++ {
|
||||||
switch fields[i] {
|
switch fields[i] {
|
||||||
case "create", "update", "replace", "delete":
|
case "create", "update", "replace", "delete":
|
||||||
return gotCommand(line, fields, i)
|
return gotCommand(lineNum, fields, i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gotCommand(line int, fields []string, fieldNum int) error {
|
func gotCommand(lineNum int, fields []string, fieldNum int) error {
|
||||||
for i := fieldNum + 1; i < len(fields); i++ {
|
for i := fieldNum + 1; i < len(fields); i++ {
|
||||||
if strings.HasPrefix(fields[i], "-f") {
|
if strings.HasPrefix(fields[i], "-f") {
|
||||||
return gotDashF(line, fields, i)
|
return gotDashF(lineNum, fields, i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gotDashF(line int, fields []string, fieldNum int) error {
|
func gotDashF(lineNum int, fields []string, fieldNum int) error {
|
||||||
target := ""
|
target := ""
|
||||||
if fields[fieldNum] == "-f" {
|
if fields[fieldNum] == "-f" {
|
||||||
if fieldNum+1 == len(fields) {
|
if fieldNum+1 == len(fields) {
|
||||||
@ -112,9 +108,9 @@ func gotDashF(line int, fields []string, fieldNum int) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If we got here we expect the file to exist.
|
// If we got here we expect the file to exist.
|
||||||
_, err := os.Stat(path.Join(*rootDir, *repoRoot, target))
|
_, err := os.Stat(path.Join(repoRoot, target))
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return fmt.Errorf("%d: target file %q does not exist", line, target)
|
return fmt.Errorf("%d: target file %q does not exist", lineNum, target)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -130,9 +130,9 @@ func TestKubectlDashF(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
*rootDir = ""
|
repoRoot = ""
|
||||||
*repoRoot = ""
|
in := getMungeLines(c.in)
|
||||||
_, err := checkKubectlFileTargets("filename.md", []byte(c.in))
|
_, err := checkKubectlFileTargets("filename.md", in)
|
||||||
if err != nil && c.ok {
|
if err != nil && c.ok {
|
||||||
t.Errorf("case[%d]: expected success, got %v", i, err)
|
t.Errorf("case[%d]: expected success, got %v", i, err)
|
||||||
}
|
}
|
||||||
|
@ -29,20 +29,20 @@ var (
|
|||||||
// Finds markdown links of the form [foo](bar "alt-text").
|
// Finds markdown links of the form [foo](bar "alt-text").
|
||||||
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
|
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
|
||||||
// Splits the link target into link target and alt-text.
|
// Splits the link target into link target and alt-text.
|
||||||
altTextRE = regexp.MustCompile(`(.*)( ".*")`)
|
altTextRE = regexp.MustCompile(`([^)]*)( ".*")`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// checkLinks assumes fileBytes has links in markdown syntax, and verifies that
|
func processLink(in string, filePath string) (string, error) {
|
||||||
// any relative links actually point to files that exist.
|
var err error
|
||||||
func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
out := linkRE.ReplaceAllStringFunc(in, func(in string) string {
|
||||||
dir := path.Dir(filePath)
|
match := linkRE.FindStringSubmatch(in)
|
||||||
errors := []string{}
|
if match == nil {
|
||||||
|
err = fmt.Errorf("Detected this line had a link, but unable to parse, %v", in)
|
||||||
output := replaceNonPreformattedRegexp(fileBytes, linkRE, func(in []byte) (out []byte) {
|
return ""
|
||||||
match := linkRE.FindSubmatch(in)
|
}
|
||||||
// match[0] is the entire expression; [1] is the visible text and [2] is the link text.
|
// match[0] is the entire expression;
|
||||||
visibleText := string(match[1])
|
visibleText := match[1]
|
||||||
linkText := string(match[2])
|
linkText := match[2]
|
||||||
altText := ""
|
altText := ""
|
||||||
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
|
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
|
||||||
linkText = parts[1]
|
linkText = parts[1]
|
||||||
@ -54,13 +54,10 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
|||||||
linkText = strings.Trim(linkText, "\n")
|
linkText = strings.Trim(linkText, "\n")
|
||||||
linkText = strings.Trim(linkText, " ")
|
linkText = strings.Trim(linkText, " ")
|
||||||
|
|
||||||
u, err := url.Parse(linkText)
|
u, terr := url.Parse(linkText)
|
||||||
if err != nil {
|
if terr != nil {
|
||||||
errors = append(
|
err = fmt.Errorf("link %q is unparsable: %v", linkText, terr)
|
||||||
errors,
|
return ""
|
||||||
fmt.Sprintf("link %q is unparsable: %v", linkText, err),
|
|
||||||
)
|
|
||||||
return in
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if u.Host != "" && u.Host != "github.com" {
|
if u.Host != "" && u.Host != "github.com" {
|
||||||
@ -72,10 +69,8 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
|||||||
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
|
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
|
||||||
newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
|
newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
|
||||||
if !targetExists {
|
if !targetExists {
|
||||||
errors = append(
|
err = fmt.Errorf("%q: target not found", linkText)
|
||||||
errors,
|
return ""
|
||||||
fmt.Sprintf("%q: target not found", linkText),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
u.Path = newPath
|
u.Path = newPath
|
||||||
if strings.HasPrefix(u.Path, "/") {
|
if strings.HasPrefix(u.Path, "/") {
|
||||||
@ -89,11 +84,16 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
|||||||
// Make the visible text show the absolute path if it's
|
// Make the visible text show the absolute path if it's
|
||||||
// not nested in or beneath the current directory.
|
// not nested in or beneath the current directory.
|
||||||
if strings.HasPrefix(u.Path, "..") {
|
if strings.HasPrefix(u.Path, "..") {
|
||||||
suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path))
|
dir := path.Dir(filePath)
|
||||||
|
suggestedVisibleText, err = makeRepoRelative(path.Join(dir, u.Path), filePath)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
suggestedVisibleText = u.Path
|
suggestedVisibleText = u.Path
|
||||||
}
|
}
|
||||||
if unescaped, err := url.QueryUnescape(u.String()); err != nil {
|
var unescaped string
|
||||||
|
if unescaped, err = url.QueryUnescape(u.String()); err != nil {
|
||||||
// Remove %28 type stuff, be nice to humans.
|
// Remove %28 type stuff, be nice to humans.
|
||||||
// And don't fight with the toc generator.
|
// And don't fight with the toc generator.
|
||||||
linkText = unescaped
|
linkText = unescaped
|
||||||
@ -107,18 +107,37 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
|||||||
visibleText = suggestedVisibleText
|
visibleText = suggestedVisibleText
|
||||||
}
|
}
|
||||||
|
|
||||||
return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText))
|
return fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)
|
||||||
})
|
})
|
||||||
|
if out == "" {
|
||||||
|
return in, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLinks assumes lines has links in markdown syntax, and verifies that
|
||||||
|
// any relative links actually point to files that exist.
|
||||||
|
func checkLinks(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||||
|
var out mungeLines
|
||||||
|
errors := []string{}
|
||||||
|
|
||||||
|
for _, mline := range mlines {
|
||||||
|
if mline.preformatted || !mline.link {
|
||||||
|
out = append(out, mline)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line, err := processLink(mline.data, filePath)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err.Error())
|
||||||
|
}
|
||||||
|
ml := newMungeLine(line)
|
||||||
|
out = append(out, ml)
|
||||||
|
}
|
||||||
err := error(nil)
|
err := error(nil)
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
||||||
}
|
}
|
||||||
return output, err
|
return out, err
|
||||||
}
|
|
||||||
|
|
||||||
func makeRepoRelative(filePath string) string {
|
|
||||||
realRoot := path.Join(*rootDir, *repoRoot) + "/"
|
|
||||||
return strings.TrimPrefix(filePath, realRoot)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We have to append together before path.Clean will be able to tell that stuff
|
// We have to append together before path.Clean will be able to tell that stuff
|
||||||
|
76
cmd/mungedocs/links_test.go
Normal file
76
cmd/mungedocs/links_test.go
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = fmt.Printf
|
||||||
|
|
||||||
|
func TestBadLinks(t *testing.T) {
|
||||||
|
var cases = []struct {
|
||||||
|
in string
|
||||||
|
}{
|
||||||
|
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/NOTREADME.md)"},
|
||||||
|
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/NOTREADME.md)"},
|
||||||
|
{"[NOTREADME](../NOTREADME.md)"},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
in := getMungeLines(c.in)
|
||||||
|
_, err := checkLinks("filename.md", in)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestGoodLinks(t *testing.T) {
|
||||||
|
var cases = []struct {
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"", ""},
|
||||||
|
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/README.md)",
|
||||||
|
"[README](README.md)"},
|
||||||
|
{"[README](../README.md)",
|
||||||
|
"[README](README.md)"},
|
||||||
|
{"[README](https://lwn.net)",
|
||||||
|
"[README](https://lwn.net)"},
|
||||||
|
// _ to -
|
||||||
|
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/devel/cli_roadmap.md)",
|
||||||
|
"[README](../../docs/devel/cli-roadmap.md)"},
|
||||||
|
// - to _
|
||||||
|
{"[README](../../docs/devel/api-changes.md)",
|
||||||
|
"[README](../../docs/devel/api_changes.md)"},
|
||||||
|
|
||||||
|
// Does this even make sense? i dunno
|
||||||
|
{"[README](/docs/README.md)",
|
||||||
|
"[README](https://github.com/docs/README.md)"},
|
||||||
|
{"[README](/GoogleCloudPlatform/kubernetes/tree/master/docs/README.md)",
|
||||||
|
"[README](../../docs/README.md)"},
|
||||||
|
}
|
||||||
|
for i, c := range cases {
|
||||||
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
actual, err := checkLinks("filename.md", in)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if !actual.Equal(expected) {
|
||||||
|
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -32,13 +31,15 @@ import (
|
|||||||
var (
|
var (
|
||||||
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
|
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
|
||||||
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
|
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
|
||||||
repoRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
|
// "repo-root" seems like a dumb name, this is the relative path (from rootDir) to get to the repoRoot
|
||||||
|
relRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
|
||||||
It's done this way so that generally you just have to set --root-dir.
|
It's done this way so that generally you just have to set --root-dir.
|
||||||
Examples:
|
Examples:
|
||||||
* --root-dir=docs/ --repo-root=.. means the repository root is ./
|
* --root-dir=docs/ --repo-root=.. means the repository root is ./
|
||||||
* --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/
|
* --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/
|
||||||
* --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`)
|
* --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`)
|
||||||
skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList)
|
skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList)
|
||||||
|
repoRoot string
|
||||||
|
|
||||||
ErrChangesNeeded = errors.New("mungedocs: changes required")
|
ErrChangesNeeded = errors.New("mungedocs: changes required")
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ Examples:
|
|||||||
{"check-links", checkLinks},
|
{"check-links", checkLinks},
|
||||||
{"blank-lines-surround-preformatted", checkPreformatted},
|
{"blank-lines-surround-preformatted", checkPreformatted},
|
||||||
{"header-lines", checkHeaderLines},
|
{"header-lines", checkHeaderLines},
|
||||||
{"analytics", checkAnalytics},
|
{"analytics", updateAnalytics},
|
||||||
{"kubectl-dash-f", checkKubectlFileTargets},
|
{"kubectl-dash-f", checkKubectlFileTargets},
|
||||||
{"sync-examples", syncExamples},
|
{"sync-examples", syncExamples},
|
||||||
}
|
}
|
||||||
@ -68,7 +69,7 @@ Examples:
|
|||||||
// data into a new byte array and return that.
|
// data into a new byte array and return that.
|
||||||
type munge struct {
|
type munge struct {
|
||||||
name string
|
name string
|
||||||
fn func(filePath string, before []byte) (after []byte, err error)
|
fn func(filePath string, mlines mungeLines) (after mungeLines, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileProcessor struct {
|
type fileProcessor struct {
|
||||||
@ -90,12 +91,14 @@ func (f fileProcessor) visit(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mungeLines := getMungeLines(string(fileBytes))
|
||||||
|
|
||||||
modificationsMade := false
|
modificationsMade := false
|
||||||
errFound := false
|
errFound := false
|
||||||
filePrinted := false
|
filePrinted := false
|
||||||
for _, munge := range f.munges {
|
for _, munge := range f.munges {
|
||||||
after, err := munge.fn(path, fileBytes)
|
after, err := munge.fn(path, mungeLines)
|
||||||
if err != nil || !bytes.Equal(after, fileBytes) {
|
if err != nil || !after.Equal(mungeLines) {
|
||||||
if !filePrinted {
|
if !filePrinted {
|
||||||
fmt.Printf("%s\n----\n", path)
|
fmt.Printf("%s\n----\n", path)
|
||||||
filePrinted = true
|
filePrinted = true
|
||||||
@ -110,7 +113,7 @@ func (f fileProcessor) visit(path string) error {
|
|||||||
}
|
}
|
||||||
fmt.Println("")
|
fmt.Println("")
|
||||||
}
|
}
|
||||||
fileBytes = after
|
mungeLines = after
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write out new file with any changes.
|
// Write out new file with any changes.
|
||||||
@ -119,7 +122,7 @@ func (f fileProcessor) visit(path string) error {
|
|||||||
// We're not allowed to make changes.
|
// We're not allowed to make changes.
|
||||||
return ErrChangesNeeded
|
return ErrChangesNeeded
|
||||||
}
|
}
|
||||||
ioutil.WriteFile(path, fileBytes, 0644)
|
ioutil.WriteFile(path, mungeLines.Bytes(), 0644)
|
||||||
}
|
}
|
||||||
if errFound {
|
if errFound {
|
||||||
return ErrChangesNeeded
|
return ErrChangesNeeded
|
||||||
@ -165,6 +168,7 @@ func wantedMunges() (filtered []munge) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
var err error
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if *rootDir == "" {
|
if *rootDir == "" {
|
||||||
@ -172,11 +176,9 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split the root dir of "foo/docs" into "foo" and "docs". We
|
repoRoot = path.Join(*rootDir, *relRoot)
|
||||||
// chdir into "foo" and walk "docs" so the walk is always at a
|
repoRoot, err = filepath.Abs(repoRoot)
|
||||||
// relative path.
|
if err != nil {
|
||||||
stem, leaf := path.Split(strings.TrimRight(*rootDir, "/"))
|
|
||||||
if err := os.Chdir(stem); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
@ -194,7 +196,7 @@ func main() {
|
|||||||
// changes needed, exit 1 if manual changes are needed.
|
// changes needed, exit 1 if manual changes are needed.
|
||||||
var changesNeeded bool
|
var changesNeeded bool
|
||||||
|
|
||||||
err := filepath.Walk(leaf, newWalkFunc(&fp, &changesNeeded))
|
err = filepath.Walk(*rootDir, newWalkFunc(&fp, &changesNeeded))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
|
@ -16,40 +16,26 @@ limitations under the License.
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
|
|
||||||
// Blocks of ``` need to have blank lines on both sides or they don't look
|
// Blocks of ``` need to have blank lines on both sides or they don't look
|
||||||
// right in HTML.
|
// right in HTML.
|
||||||
func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) {
|
func checkPreformatted(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||||
f := splitByPreformatted(fileBytes)
|
var out mungeLines
|
||||||
f = append(fileBlocks{{false, []byte{}}}, f...)
|
inpreformat := false
|
||||||
f = append(f, fileBlock{false, []byte{}})
|
for i, mline := range mlines {
|
||||||
|
if !inpreformat && mline.preformatted {
|
||||||
output := []byte(nil)
|
if i == 0 || out[len(out)-1].data != "" {
|
||||||
for i := 1; i < len(f)-1; i++ {
|
out = append(out, blankMungeLine)
|
||||||
prev := &f[i-1]
|
|
||||||
block := &f[i]
|
|
||||||
next := &f[i+1]
|
|
||||||
if !block.preformatted {
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
neededSuffix := []byte("\n\n")
|
// start of a preformat block
|
||||||
for !bytes.HasSuffix(prev.data, neededSuffix) {
|
inpreformat = true
|
||||||
prev.data = append(prev.data, '\n')
|
|
||||||
}
|
}
|
||||||
for !bytes.HasSuffix(block.data, neededSuffix) {
|
out = append(out, mline)
|
||||||
block.data = append(block.data, '\n')
|
if inpreformat && !mline.preformatted {
|
||||||
if bytes.HasPrefix(next.data, []byte("\n")) {
|
if i >= len(mlines)-2 || mlines[i+1].data != "" {
|
||||||
// don't change the number of newlines unless needed.
|
out = append(out, blankMungeLine)
|
||||||
next.data = next.data[1:]
|
}
|
||||||
if len(next.data) == 0 {
|
inpreformat = false
|
||||||
f = append(f[:i+1], f[i+2:]...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return out, nil
|
||||||
}
|
|
||||||
for _, block := range f {
|
|
||||||
output = append(output, block.data...)
|
|
||||||
}
|
|
||||||
return output, nil
|
|
||||||
}
|
}
|
||||||
|
57
cmd/mungedocs/preformatted_test.go
Normal file
57
cmd/mungedocs/preformatted_test.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPreformatted(t *testing.T) {
|
||||||
|
var cases = []struct {
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"", ""},
|
||||||
|
{
|
||||||
|
"```\nbob\n```",
|
||||||
|
"\n```\nbob\n```\n\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"```\nbob\n```\n```\nnotbob\n```\n",
|
||||||
|
"\n```\nbob\n```\n\n```\nnotbob\n```\n\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"```bob```\n",
|
||||||
|
"```bob```\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
" ```\n bob\n ```",
|
||||||
|
"\n ```\n bob\n ```\n\n",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, c := range cases {
|
||||||
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
actual, err := checkPreformatted("filename.md", in)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if !actual.Equal(expected) {
|
||||||
|
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@ -26,6 +24,8 @@ import (
|
|||||||
|
|
||||||
const tocMungeTag = "GENERATED_TOC"
|
const tocMungeTag = "GENERATED_TOC"
|
||||||
|
|
||||||
|
var r = regexp.MustCompile("[^A-Za-z0-9-]")
|
||||||
|
|
||||||
// inserts/updates a table of contents in markdown file.
|
// inserts/updates a table of contents in markdown file.
|
||||||
//
|
//
|
||||||
// First, builds a ToC.
|
// First, builds a ToC.
|
||||||
@ -33,15 +33,11 @@ const tocMungeTag = "GENERATED_TOC"
|
|||||||
// the ToC, thereby updating any previously inserted ToC.
|
// the ToC, thereby updating any previously inserted ToC.
|
||||||
//
|
//
|
||||||
// TODO(erictune): put this in own package with tests
|
// TODO(erictune): put this in own package with tests
|
||||||
func updateTOC(filePath string, markdown []byte) ([]byte, error) {
|
func updateTOC(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||||
toc, err := buildTOC(markdown)
|
toc := buildTOC(mlines)
|
||||||
|
updatedMarkdown, err := updateMacroBlock(mlines, tocMungeTag, toc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return mlines, err
|
||||||
}
|
|
||||||
lines := splitLines(markdown)
|
|
||||||
updatedMarkdown, err := updateMacroBlock(lines, tocMungeTag, string(toc))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return updatedMarkdown, nil
|
return updatedMarkdown, nil
|
||||||
}
|
}
|
||||||
@ -52,25 +48,19 @@ func updateTOC(filePath string, markdown []byte) ([]byte, error) {
|
|||||||
// and builds a table of contents from those. Assumes bookmarks for those will be
|
// and builds a table of contents from those. Assumes bookmarks for those will be
|
||||||
// like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces.
|
// like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces.
|
||||||
// builds the ToC.
|
// builds the ToC.
|
||||||
func buildTOC(markdown []byte) ([]byte, error) {
|
|
||||||
var buffer bytes.Buffer
|
func buildTOC(mlines mungeLines) mungeLines {
|
||||||
buffer.WriteString("\n")
|
var out mungeLines
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(markdown))
|
|
||||||
inBlockQuotes := false
|
for _, mline := range mlines {
|
||||||
blockQuoteRegex, err := regexp.Compile("^```")
|
if mline.preformatted || !mline.header {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
match := blockQuoteRegex.Match([]byte(line))
|
|
||||||
if match {
|
|
||||||
inBlockQuotes = !inBlockQuotes
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if inBlockQuotes {
|
// Add a blank line after the munge start tag
|
||||||
continue
|
if len(out) == 0 {
|
||||||
|
out = append(out, blankMungeLine)
|
||||||
}
|
}
|
||||||
|
line := mline.data
|
||||||
noSharps := strings.TrimLeft(line, "#")
|
noSharps := strings.TrimLeft(line, "#")
|
||||||
numSharps := len(line) - len(noSharps)
|
numSharps := len(line) - len(noSharps)
|
||||||
heading := strings.Trim(noSharps, " \n")
|
heading := strings.Trim(noSharps, " \n")
|
||||||
@ -78,16 +68,15 @@ func buildTOC(markdown []byte) ([]byte, error) {
|
|||||||
indent := strings.Repeat(" ", numSharps-1)
|
indent := strings.Repeat(" ", numSharps-1)
|
||||||
bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1)
|
bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1)
|
||||||
// remove symbols (except for -) in bookmarks
|
// remove symbols (except for -) in bookmarks
|
||||||
r := regexp.MustCompile("[^A-Za-z0-9-]")
|
|
||||||
bookmark = r.ReplaceAllString(bookmark, "")
|
bookmark = r.ReplaceAllString(bookmark, "")
|
||||||
tocLine := fmt.Sprintf("%s- [%s](#%s)\n", indent, heading, bookmark)
|
tocLine := fmt.Sprintf("%s- [%s](#%s)", indent, heading, bookmark)
|
||||||
buffer.WriteString(tocLine)
|
out = append(out, newMungeLine(tocLine))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if err := scanner.Err(); err != nil {
|
// Add a blank line before the munge end tag
|
||||||
return []byte{}, err
|
if len(out) != 0 {
|
||||||
|
out = append(out, blankMungeLine)
|
||||||
}
|
}
|
||||||
|
return out
|
||||||
return buffer.Bytes(), nil
|
|
||||||
}
|
}
|
||||||
|
@ -25,28 +25,29 @@ import (
|
|||||||
func Test_buildTOC(t *testing.T) {
|
func Test_buildTOC(t *testing.T) {
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
out string
|
expected string
|
||||||
}{
|
}{
|
||||||
{"", "\n"},
|
{"", ""},
|
||||||
{"Lorem ipsum\ndolor sit amet\n", "\n"},
|
{"Lorem ipsum\ndolor sit amet\n", ""},
|
||||||
{
|
{
|
||||||
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
|
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
|
||||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n",
|
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```",
|
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```",
|
||||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n",
|
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n",
|
"# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n",
|
||||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n",
|
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n\n",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for i, c := range cases {
|
||||||
actual, err := buildTOC([]byte(c.in))
|
in := getMungeLines(c.in)
|
||||||
assert.NoError(t, err)
|
expected := getMungeLines(c.expected)
|
||||||
if c.out != string(actual) {
|
actual := buildTOC(in)
|
||||||
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual))
|
if !expected.Equal(actual) {
|
||||||
|
t.Errorf("Case[%d] Expected TOC '%v' but got '%v'", i, expected.String(), actual.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -54,7 +55,7 @@ func Test_buildTOC(t *testing.T) {
|
|||||||
func Test_updateTOC(t *testing.T) {
|
func Test_updateTOC(t *testing.T) {
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
out string
|
expected string
|
||||||
}{
|
}{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{
|
{
|
||||||
@ -67,10 +68,12 @@ func Test_updateTOC(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
actual, err := updateTOC("filename.md", []byte(c.in))
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
actual, err := updateTOC("filename.md", in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if c.out != string(actual) {
|
if !expected.Equal(actual) {
|
||||||
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual))
|
t.Errorf("Expected TOC '%v' but got '%v'", expected.String(), actual.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import "fmt"
|
|||||||
|
|
||||||
const unversionedWarningTag = "UNVERSIONED_WARNING"
|
const unversionedWarningTag = "UNVERSIONED_WARNING"
|
||||||
|
|
||||||
const unversionedWarningFmt = `
|
const unversionedWarningPre = `
|
||||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||||
|
|
||||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||||
@ -41,7 +41,11 @@ refer to the docs that go with that version.
|
|||||||
|
|
||||||
<strong>
|
<strong>
|
||||||
The latest 1.0.x release of this document can be found
|
The latest 1.0.x release of this document can be found
|
||||||
[here](http://releases.k8s.io/release-1.0/%s).
|
`
|
||||||
|
|
||||||
|
const unversionedWarningFmt = `[here](http://releases.k8s.io/release-1.0/%s).`
|
||||||
|
|
||||||
|
const unversionedWarningPost = `
|
||||||
|
|
||||||
Documentation for other releases can be found at
|
Documentation for other releases can be found at
|
||||||
[releases.k8s.io](http://releases.k8s.io).
|
[releases.k8s.io](http://releases.k8s.io).
|
||||||
@ -49,21 +53,31 @@ Documentation for other releases can be found at
|
|||||||
--
|
--
|
||||||
|
|
||||||
<!-- END STRIP_FOR_RELEASE -->
|
<!-- END STRIP_FOR_RELEASE -->
|
||||||
|
|
||||||
`
|
`
|
||||||
|
|
||||||
func makeUnversionedWarning(fileName string) string {
|
func makeUnversionedWarning(fileName string) mungeLines {
|
||||||
return fmt.Sprintf(unversionedWarningFmt, fileName)
|
insert := unversionedWarningPre + fmt.Sprintf(unversionedWarningFmt, fileName) + unversionedWarningPost
|
||||||
|
return getMungeLines(insert)
|
||||||
}
|
}
|
||||||
|
|
||||||
// inserts/updates a warning for unversioned docs
|
// inserts/updates a warning for unversioned docs
|
||||||
func updateUnversionedWarning(file string, markdown []byte) ([]byte, error) {
|
func updateUnversionedWarning(file string, mlines mungeLines) (mungeLines, error) {
|
||||||
lines := splitLines(markdown)
|
file, err := makeRepoRelative(file, file)
|
||||||
if hasLine(lines, "<!-- TAG IS_VERSIONED -->") {
|
if err != nil {
|
||||||
|
return mlines, err
|
||||||
|
}
|
||||||
|
if hasLine(mlines, "<!-- TAG IS_VERSIONED -->") {
|
||||||
// No warnings on release branches
|
// No warnings on release branches
|
||||||
return markdown, nil
|
return mlines, nil
|
||||||
}
|
}
|
||||||
if !hasMacroBlock(lines, unversionedWarningTag) {
|
if !hasMacroBlock(mlines, unversionedWarningTag) {
|
||||||
lines = prependMacroBlock(unversionedWarningTag, lines)
|
mlines = prependMacroBlock(unversionedWarningTag, mlines)
|
||||||
}
|
}
|
||||||
return updateMacroBlock(lines, unversionedWarningTag, makeUnversionedWarning(file))
|
|
||||||
|
mlines, err = updateMacroBlock(mlines, unversionedWarningTag, makeUnversionedWarning(file))
|
||||||
|
if err != nil {
|
||||||
|
return mlines, err
|
||||||
|
}
|
||||||
|
return mlines, nil
|
||||||
}
|
}
|
||||||
|
@ -26,15 +26,16 @@ func TestUnversionedWarning(t *testing.T) {
|
|||||||
beginMark := beginMungeTag(unversionedWarningTag)
|
beginMark := beginMungeTag(unversionedWarningTag)
|
||||||
endMark := endMungeTag(unversionedWarningTag)
|
endMark := endMungeTag(unversionedWarningTag)
|
||||||
|
|
||||||
warningBlock := beginMark + "\n" + makeUnversionedWarning("filename.md") + "\n" + endMark + "\n"
|
warningString := makeUnversionedWarning("filename.md").String()
|
||||||
|
warningBlock := beginMark + "\n" + warningString + endMark + "\n"
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
out string
|
expected string
|
||||||
}{
|
}{
|
||||||
{"", warningBlock},
|
{"", warningBlock},
|
||||||
{
|
{
|
||||||
"Foo\nBar\n",
|
"Foo\nBar\n",
|
||||||
warningBlock + "Foo\nBar\n",
|
warningBlock + "\nFoo\nBar\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
|
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
|
||||||
@ -58,10 +59,12 @@ func TestUnversionedWarning(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
actual, err := updateUnversionedWarning("filename.md", []byte(c.in))
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.expected)
|
||||||
|
actual, err := updateUnversionedWarning("filename.md", in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if string(actual) != c.out {
|
if !expected.Equal(actual) {
|
||||||
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
|
t.Errorf("case[%d]: expected %v got %v", i, expected.String(), actual.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,102 +17,140 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Splits a document up into a slice of lines.
|
|
||||||
func splitLines(document []byte) []string {
|
|
||||||
lines := strings.Split(string(document), "\n")
|
|
||||||
// Skip trailing empty string from Split-ing
|
|
||||||
if len(lines) > 0 && lines[len(lines)-1] == "" {
|
|
||||||
lines = lines[:len(lines)-1]
|
|
||||||
}
|
|
||||||
return lines
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replaces the text between matching "beginMark" and "endMark" within the
|
// Replaces the text between matching "beginMark" and "endMark" within the
|
||||||
// document represented by "lines" with "insertThis".
|
// document represented by "lines" with "insertThis".
|
||||||
//
|
//
|
||||||
// Delimiters should occupy own line.
|
// Delimiters should occupy own line.
|
||||||
// Returns copy of document with modifications.
|
// Returns copy of document with modifications.
|
||||||
func updateMacroBlock(lines []string, token, insertThis string) ([]byte, error) {
|
func updateMacroBlock(mlines mungeLines, token string, insertThis mungeLines) (mungeLines, error) {
|
||||||
beginMark := beginMungeTag(token)
|
beginMark := beginMungeTag(token)
|
||||||
endMark := endMungeTag(token)
|
endMark := endMungeTag(token)
|
||||||
var buffer bytes.Buffer
|
var out mungeLines
|
||||||
betweenBeginAndEnd := false
|
betweenBeginAndEnd := false
|
||||||
for _, line := range lines {
|
for _, mline := range mlines {
|
||||||
trimmedLine := strings.Trim(line, " \n")
|
if mline.preformatted && !betweenBeginAndEnd {
|
||||||
if trimmedLine == beginMark {
|
out = append(out, mline)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line := mline.data
|
||||||
|
if mline.beginTag && line == beginMark {
|
||||||
if betweenBeginAndEnd {
|
if betweenBeginAndEnd {
|
||||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||||
}
|
}
|
||||||
betweenBeginAndEnd = true
|
betweenBeginAndEnd = true
|
||||||
buffer.WriteString(line)
|
out = append(out, mline)
|
||||||
buffer.WriteString("\n")
|
} else if mline.endTag && line == endMark {
|
||||||
} else if trimmedLine == endMark {
|
|
||||||
if !betweenBeginAndEnd {
|
if !betweenBeginAndEnd {
|
||||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
|
||||||
}
|
}
|
||||||
buffer.WriteString(insertThis)
|
|
||||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
|
||||||
buffer.WriteString("\n")
|
|
||||||
buffer.WriteString(line)
|
|
||||||
buffer.WriteString("\n")
|
|
||||||
betweenBeginAndEnd = false
|
betweenBeginAndEnd = false
|
||||||
|
out = append(out, insertThis...)
|
||||||
|
out = append(out, mline)
|
||||||
} else {
|
} else {
|
||||||
if !betweenBeginAndEnd {
|
if !betweenBeginAndEnd {
|
||||||
buffer.WriteString(line)
|
out = append(out, mline)
|
||||||
buffer.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if betweenBeginAndEnd {
|
if betweenBeginAndEnd {
|
||||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||||
}
|
}
|
||||||
return buffer.Bytes(), nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that a document, represented as a slice of lines, has a line. Ignores
|
// Tests that a document, represented as a slice of lines, has a line. Ignores
|
||||||
// leading and trailing space.
|
// leading and trailing space.
|
||||||
func hasLine(lines []string, needle string) bool {
|
func hasLine(lines mungeLines, needle string) bool {
|
||||||
for _, line := range lines {
|
for _, mline := range lines {
|
||||||
trimmedLine := strings.Trim(line, " \n")
|
haystack := strings.TrimSpace(mline.data)
|
||||||
if trimmedLine == needle {
|
if haystack == needle {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a macro block to the beginning of a set of lines
|
func removeMacroBlock(token string, mlines mungeLines) (mungeLines, error) {
|
||||||
func prependMacroBlock(token string, lines []string) []string {
|
|
||||||
beginMark := beginMungeTag(token)
|
beginMark := beginMungeTag(token)
|
||||||
endMark := endMungeTag(token)
|
endMark := endMungeTag(token)
|
||||||
return append([]string{beginMark, endMark}, lines...)
|
var out mungeLines
|
||||||
|
betweenBeginAndEnd := false
|
||||||
|
for _, mline := range mlines {
|
||||||
|
if mline.preformatted {
|
||||||
|
out = append(out, mline)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line := mline.data
|
||||||
|
if mline.beginTag && line == beginMark {
|
||||||
|
if betweenBeginAndEnd {
|
||||||
|
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||||
|
}
|
||||||
|
betweenBeginAndEnd = true
|
||||||
|
} else if mline.endTag && line == endMark {
|
||||||
|
if !betweenBeginAndEnd {
|
||||||
|
return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
|
||||||
|
}
|
||||||
|
betweenBeginAndEnd = false
|
||||||
|
} else {
|
||||||
|
if !betweenBeginAndEnd {
|
||||||
|
out = append(out, mline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if betweenBeginAndEnd {
|
||||||
|
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a macro block to the beginning of a set of lines
|
||||||
|
func prependMacroBlock(token string, mlines mungeLines) mungeLines {
|
||||||
|
beginLine := newMungeLine(beginMungeTag(token))
|
||||||
|
endLine := newMungeLine(endMungeTag(token))
|
||||||
|
out := mungeLines{beginLine, endLine}
|
||||||
|
if len(mlines) > 0 && mlines[0].data != "" {
|
||||||
|
out = append(out, blankMungeLine)
|
||||||
|
}
|
||||||
|
return append(out, mlines...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a macro block to the end of a set of lines
|
// Add a macro block to the end of a set of lines
|
||||||
func appendMacroBlock(token string, lines []string) []string {
|
func appendMacroBlock(mlines mungeLines, token string) mungeLines {
|
||||||
beginMark := beginMungeTag(token)
|
beginLine := newMungeLine(beginMungeTag(token))
|
||||||
endMark := endMungeTag(token)
|
endLine := newMungeLine(endMungeTag(token))
|
||||||
return append(lines, beginMark, endMark)
|
out := mlines
|
||||||
|
if len(mlines) > 0 && mlines[len(mlines)-1].data != "" {
|
||||||
|
out = append(out, blankMungeLine)
|
||||||
|
}
|
||||||
|
return append(out, beginLine, endLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that a document, represented as a slice of lines, has a macro block.
|
// Tests that a document, represented as a slice of lines, has a macro block.
|
||||||
func hasMacroBlock(lines []string, token string) bool {
|
func hasMacroBlock(lines mungeLines, token string) bool {
|
||||||
beginMark := beginMungeTag(token)
|
beginMark := beginMungeTag(token)
|
||||||
endMark := endMungeTag(token)
|
endMark := endMungeTag(token)
|
||||||
|
|
||||||
foundBegin := false
|
foundBegin := false
|
||||||
for _, line := range lines {
|
for _, mline := range lines {
|
||||||
trimmedLine := strings.Trim(line, " \n")
|
if mline.preformatted {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !mline.beginTag && !mline.endTag {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line := mline.data
|
||||||
switch {
|
switch {
|
||||||
case !foundBegin && trimmedLine == beginMark:
|
case !foundBegin && line == beginMark:
|
||||||
foundBegin = true
|
foundBegin = true
|
||||||
case foundBegin && trimmedLine == endMark:
|
case foundBegin && line == endMark:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -131,72 +169,123 @@ func endMungeTag(desc string) string {
|
|||||||
return fmt.Sprintf("<!-- END MUNGE: %s -->", desc)
|
return fmt.Sprintf("<!-- END MUNGE: %s -->", desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calls 'replace' for all sections of the document not in ``` / ``` blocks. So
|
type mungeLine struct {
|
||||||
// that you don't have false positives inside those blocks.
|
data string
|
||||||
func replaceNonPreformatted(input []byte, replace func([]byte) []byte) []byte {
|
|
||||||
f := splitByPreformatted(input)
|
|
||||||
output := []byte(nil)
|
|
||||||
for _, block := range f {
|
|
||||||
if block.preformatted {
|
|
||||||
output = append(output, block.data...)
|
|
||||||
} else {
|
|
||||||
output = append(output, replace(block.data)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileBlock struct {
|
|
||||||
preformatted bool
|
preformatted bool
|
||||||
data []byte
|
header bool
|
||||||
|
link bool
|
||||||
|
beginTag bool
|
||||||
|
endTag bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileBlocks []fileBlock
|
type mungeLines []mungeLine
|
||||||
|
|
||||||
|
func (m1 mungeLines) Equal(m2 mungeLines) bool {
|
||||||
|
if len(m1) != len(m2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range m1 {
|
||||||
|
if m1[i].data != m2[i].data {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlines mungeLines) String() string {
|
||||||
|
slice := []string{}
|
||||||
|
for _, mline := range mlines {
|
||||||
|
slice = append(slice, mline.data)
|
||||||
|
}
|
||||||
|
s := strings.Join(slice, "\n")
|
||||||
|
// We need to tack on an extra newline at the end of the file
|
||||||
|
return s + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlines mungeLines) Bytes() []byte {
|
||||||
|
return []byte(mlines.String())
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Finds all preformatted block start/stops.
|
// Finds all preformatted block start/stops.
|
||||||
preformatRE = regexp.MustCompile("^\\s*```")
|
preformatRE = regexp.MustCompile("^\\s*```")
|
||||||
notPreformatRE = regexp.MustCompile("^\\s*```.*```")
|
notPreformatRE = regexp.MustCompile("^\\s*```.*```")
|
||||||
|
// Is this line a header?
|
||||||
|
mlHeaderRE = regexp.MustCompile(`^#`)
|
||||||
|
// Is there a link on this line?
|
||||||
|
mlLinkRE = regexp.MustCompile(`\[[^]]*\]\([^)]*\)`)
|
||||||
|
beginTagRE = regexp.MustCompile(`<!-- BEGIN MUNGE:`)
|
||||||
|
endTagRE = regexp.MustCompile(`<!-- END MUNGE:`)
|
||||||
|
|
||||||
|
blankMungeLine = newMungeLine("")
|
||||||
)
|
)
|
||||||
|
|
||||||
func splitByPreformatted(input []byte) fileBlocks {
|
// Does not set 'preformatted'
|
||||||
f := fileBlocks{}
|
func newMungeLine(line string) mungeLine {
|
||||||
|
return mungeLine{
|
||||||
cur := []byte(nil)
|
data: line,
|
||||||
preformatted := false
|
header: mlHeaderRE.MatchString(line),
|
||||||
// SplitAfter keeps the newline, so you don't have to worry about
|
link: mlLinkRE.MatchString(line),
|
||||||
// omitting it on the last line or anything. Also, the documentation
|
beginTag: beginTagRE.MatchString(line),
|
||||||
// claims it's unicode safe.
|
endTag: endTagRE.MatchString(line),
|
||||||
for _, line := range bytes.SplitAfter(input, []byte("\n")) {
|
|
||||||
if !preformatted {
|
|
||||||
if preformatRE.Match(line) && !notPreformatRE.Match(line) {
|
|
||||||
if len(cur) > 0 {
|
|
||||||
f = append(f, fileBlock{false, cur})
|
|
||||||
}
|
}
|
||||||
cur = []byte{}
|
}
|
||||||
|
|
||||||
|
func trimRightSpace(in string) string {
|
||||||
|
return strings.TrimRightFunc(in, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Splits a document up into a slice of lines.
|
||||||
|
func splitLines(document string) []string {
|
||||||
|
lines := strings.Split(document, "\n")
|
||||||
|
// Skip trailing empty string from Split-ing
|
||||||
|
if len(lines) > 0 && lines[len(lines)-1] == "" {
|
||||||
|
lines = lines[:len(lines)-1]
|
||||||
|
}
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMungeLines(in string) mungeLines {
|
||||||
|
var out mungeLines
|
||||||
|
preformatted := false
|
||||||
|
|
||||||
|
lines := splitLines(in)
|
||||||
|
// We indicate if any given line is inside a preformatted block or
|
||||||
|
// outside a preformatted block
|
||||||
|
for _, line := range lines {
|
||||||
|
if !preformatted {
|
||||||
|
if preformatRE.MatchString(line) && !notPreformatRE.MatchString(line) {
|
||||||
preformatted = true
|
preformatted = true
|
||||||
}
|
}
|
||||||
cur = append(cur, line...)
|
|
||||||
} else {
|
} else {
|
||||||
cur = append(cur, line...)
|
if preformatRE.MatchString(line) {
|
||||||
if preformatRE.Match(line) {
|
|
||||||
if len(cur) > 0 {
|
|
||||||
f = append(f, fileBlock{true, cur})
|
|
||||||
}
|
|
||||||
cur = []byte{}
|
|
||||||
preformatted = false
|
preformatted = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ml := newMungeLine(line)
|
||||||
|
ml.preformatted = preformatted
|
||||||
|
out = append(out, ml)
|
||||||
}
|
}
|
||||||
if len(cur) > 0 {
|
return out
|
||||||
f = append(f, fileBlock{preformatted, cur})
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// As above, but further uses exp to parse the non-preformatted sections.
|
// filePath is the file we are looking for
|
||||||
func replaceNonPreformattedRegexp(input []byte, exp *regexp.Regexp, replace func([]byte) []byte) []byte {
|
// inFile is the file where we found the link. So if we are processing
|
||||||
return replaceNonPreformatted(input, func(in []byte) []byte {
|
// /path/to/repoRoot/docs/admin/README.md and are looking for
|
||||||
return exp.ReplaceAllFunc(in, replace)
|
// ../../file.json we can find that location.
|
||||||
})
|
// In many cases filePath and processingFile may be the same
|
||||||
|
func makeRepoRelative(filePath string, processingFile string) (string, error) {
|
||||||
|
if filePath, err := filepath.Rel(repoRoot, filePath); err == nil {
|
||||||
|
return filePath, nil
|
||||||
|
}
|
||||||
|
cwd := path.Dir(processingFile)
|
||||||
|
return filepath.Rel(repoRoot, path.Join(cwd, filePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeFileRelative(filePath string, processingFile string) (string, error) {
|
||||||
|
cwd := path.Dir(processingFile)
|
||||||
|
if filePath, err := filepath.Rel(cwd, filePath); err == nil {
|
||||||
|
return filePath, nil
|
||||||
|
}
|
||||||
|
return filepath.Rel(cwd, path.Join(cwd, filePath))
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -36,20 +36,23 @@ func Test_updateMacroBlock(t *testing.T) {
|
|||||||
{"Lorem ipsum\ndolor sit amet\n",
|
{"Lorem ipsum\ndolor sit amet\n",
|
||||||
"Lorem ipsum\ndolor sit amet\n"},
|
"Lorem ipsum\ndolor sit amet\n"},
|
||||||
{"Lorem ipsum \n" + BEGIN + "\ndolor\n" + END + "\nsit amet\n",
|
{"Lorem ipsum \n" + BEGIN + "\ndolor\n" + END + "\nsit amet\n",
|
||||||
"Lorem ipsum \n " + BEGIN + "\nfoo\n\n" + END + "\nsit amet\n"},
|
"Lorem ipsum \n" + BEGIN + "\nfoo\n" + END + "\nsit amet\n"},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
actual, err := updateMacroBlock(splitLines([]byte(c.in)), "TOKEN", "foo\n")
|
in := getMungeLines(c.in)
|
||||||
|
expected := getMungeLines(c.out)
|
||||||
|
actual, err := updateMacroBlock(in, token, getMungeLines("foo"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if c.out != string(actual) {
|
if !expected.Equal(actual) {
|
||||||
t.Errorf("Expected '%v' but got '%v'", c.out, string(actual))
|
t.Errorf("Expected '%v' but got '%v'", expected.String(), expected.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_updateMacroBlock_errors(t *testing.T) {
|
func Test_updateMacroBlock_errors(t *testing.T) {
|
||||||
b := beginMungeTag("TOKEN")
|
token := "TOKEN"
|
||||||
e := endMungeTag("TOKEN")
|
b := beginMungeTag(token)
|
||||||
|
e := endMungeTag(token)
|
||||||
|
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in string
|
in string
|
||||||
@ -64,29 +67,31 @@ func Test_updateMacroBlock_errors(t *testing.T) {
|
|||||||
{b + "\n" + b + "\n" + e + "\n" + e},
|
{b + "\n" + b + "\n" + e + "\n" + e},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
_, err := updateMacroBlock(splitLines([]byte(c.in)), "TOKEN", "foo")
|
in := getMungeLines(c.in)
|
||||||
|
_, err := updateMacroBlock(in, token, getMungeLines("foo"))
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHasLine(t *testing.T) {
|
func TestHasLine(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
lines []string
|
haystack string
|
||||||
needle string
|
needle string
|
||||||
expected bool
|
expected bool
|
||||||
}{
|
}{
|
||||||
{[]string{"abc", "def", "ghi"}, "abc", true},
|
{"abc\ndef\nghi", "abc", true},
|
||||||
{[]string{" abc", "def", "ghi"}, "abc", true},
|
{" abc\ndef\nghi", "abc", true},
|
||||||
{[]string{"abc ", "def", "ghi"}, "abc", true},
|
{"abc \ndef\nghi", "abc", true},
|
||||||
{[]string{"\n abc", "def", "ghi"}, "abc", true},
|
{"\n abc\ndef\nghi", "abc", true},
|
||||||
{[]string{"abc \n", "def", "ghi"}, "abc", true},
|
{"abc \n\ndef\nghi", "abc", true},
|
||||||
{[]string{"abc", "def", "ghi"}, "def", true},
|
{"abc\ndef\nghi", "def", true},
|
||||||
{[]string{"abc", "def", "ghi"}, "ghi", true},
|
{"abc\ndef\nghi", "ghi", true},
|
||||||
{[]string{"abc", "def", "ghi"}, "xyz", false},
|
{"abc\ndef\nghi", "xyz", false},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
if hasLine(c.lines, c.needle) != c.expected {
|
in := getMungeLines(c.haystack)
|
||||||
|
if hasLine(in, c.needle) != c.expected {
|
||||||
t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected)
|
t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,76 +117,53 @@ func TestHasMacroBlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
if hasMacroBlock(c.lines, token) != c.expected {
|
in := getMungeLines(strings.Join(c.lines, "\n"))
|
||||||
|
if hasMacroBlock(in, token) != c.expected {
|
||||||
t.Errorf("case[%d]: expected %t, got %t", i, c.expected, !c.expected)
|
t.Errorf("case[%d]: expected %t, got %t", i, c.expected, !c.expected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReplaceNonPreformatted(t *testing.T) {
|
func TestAppendMacroBlock(t *testing.T) {
|
||||||
|
token := "<<<"
|
||||||
|
b := beginMungeTag(token)
|
||||||
|
e := endMungeTag(token)
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
in string
|
in []string
|
||||||
out string
|
expected []string
|
||||||
}{
|
}{
|
||||||
{"aoeu", ""},
|
{[]string{}, []string{b, e}},
|
||||||
{"aoeu\n```\naoeu\n```\naoeu", "```\naoeu\n```\n"},
|
{[]string{"bob"}, []string{"bob", "", b, e}},
|
||||||
{"ao\neu\n```\naoeu\n\n\n", "```\naoeu\n\n\n"},
|
{[]string{b, e}, []string{b, e, "", b, e}},
|
||||||
{"aoeu ```aoeu``` aoeu", ""},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
out := string(replaceNonPreformatted([]byte(c.in), func([]byte) []byte { return nil }))
|
in := getMungeLines(strings.Join(c.in, "\n"))
|
||||||
if out != c.out {
|
expected := getMungeLines(strings.Join(c.expected, "\n"))
|
||||||
t.Errorf("%v: got %q, wanted %q", i, out, c.out)
|
out := appendMacroBlock(in, token)
|
||||||
|
if !out.Equal(expected) {
|
||||||
|
t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReplaceNonPreformattedNoChange(t *testing.T) {
|
func TestPrependMacroBlock(t *testing.T) {
|
||||||
|
token := "<<<"
|
||||||
|
b := beginMungeTag(token)
|
||||||
|
e := endMungeTag(token)
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
in string
|
in []string
|
||||||
|
expected []string
|
||||||
}{
|
}{
|
||||||
{"aoeu"},
|
{[]string{}, []string{b, e}},
|
||||||
{"aoeu\n```\naoeu\n```\naoeu"},
|
{[]string{"bob"}, []string{b, e, "", "bob"}},
|
||||||
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu"},
|
{[]string{b, e}, []string{b, e, "", b, e}},
|
||||||
{"ao\neu\n```\naoeu\n\n\n"},
|
|
||||||
{"aoeu ```aoeu``` aoeu"},
|
|
||||||
{"aoeu\n```\naoeu\n```"},
|
|
||||||
{"aoeu\n```\naoeu\n```\n"},
|
|
||||||
{"aoeu\n```\naoeu\n```\n\n"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
out := string(replaceNonPreformatted([]byte(c.in), func(in []byte) []byte { return in }))
|
in := getMungeLines(strings.Join(c.in, "\n"))
|
||||||
if out != c.in {
|
expected := getMungeLines(strings.Join(c.expected, "\n"))
|
||||||
t.Errorf("%v: got %q, wanted %q", i, out, c.in)
|
out := prependMacroBlock(token, in)
|
||||||
}
|
if !out.Equal(expected) {
|
||||||
}
|
t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
|
||||||
}
|
|
||||||
|
|
||||||
func TestReplaceNonPreformattedCallOrder(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
in string
|
|
||||||
expect []string
|
|
||||||
}{
|
|
||||||
{"aoeu", []string{"aoeu"}},
|
|
||||||
{"aoeu\n```\naoeu\n```\naoeu", []string{"aoeu\n", "aoeu"}},
|
|
||||||
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu", []string{"aoeu\n\n", "\naoeu"}},
|
|
||||||
{"ao\neu\n```\naoeu\n\n\n", []string{"ao\neu\n"}},
|
|
||||||
{"aoeu ```aoeu``` aoeu", []string{"aoeu ```aoeu``` aoeu"}},
|
|
||||||
{"aoeu\n```\naoeu\n```", []string{"aoeu\n"}},
|
|
||||||
{"aoeu\n```\naoeu\n```\n", []string{"aoeu\n"}},
|
|
||||||
{"aoeu\n```\naoeu\n```\n\n", []string{"aoeu\n", "\n"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, c := range cases {
|
|
||||||
got := []string{}
|
|
||||||
replaceNonPreformatted([]byte(c.in), func(in []byte) []byte {
|
|
||||||
got = append(got, string(in))
|
|
||||||
return in
|
|
||||||
})
|
|
||||||
if e, a := c.expect, got; !reflect.DeepEqual(e, a) {
|
|
||||||
t.Errorf("%v: got %q, wanted %q", i, a, e)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user