Merge pull request #536 from s1061123/fix/update-vendor

Update vendors
This commit is contained in:
Doug Smith 2020-07-21 20:17:50 -04:00 committed by GitHub
commit c85b79f5ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 9736 additions and 1176 deletions

1
go.mod
View File

@ -16,6 +16,7 @@ require (
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
golang.org/x/text v0.3.3 // indirect
google.golang.org/grpc v1.23.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v2 v2.2.8 // indirect

2
go.sum
View File

@ -208,6 +208,8 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@ -6,6 +6,7 @@
package unicode // import "golang.org/x/text/encoding/unicode"
import (
"bytes"
"errors"
"unicode/utf16"
"unicode/utf8"
@ -25,15 +26,95 @@ import (
// the introduction of some kind of error type for conveying the erroneous code
// point.
// UTF8 is the UTF-8 encoding.
// UTF8 is the UTF-8 encoding. It neither removes nor adds byte order marks.
var UTF8 encoding.Encoding = utf8enc
// UTF8BOM is an UTF-8 encoding where the decoder strips a leading byte order
// mark while the encoder adds one.
//
// Some editors add a byte order mark as a signature to UTF-8 files. Although
// the byte order mark is not useful for detecting byte order in UTF-8, it is
// sometimes used as a convention to mark UTF-8-encoded files. This relies on
// the observation that the UTF-8 byte order mark is either an illegal or at
// least very unlikely sequence in any other character encoding.
var UTF8BOM encoding.Encoding = utf8bomEncoding{}
type utf8bomEncoding struct{}
func (utf8bomEncoding) String() string {
return "UTF-8-BOM"
}
func (utf8bomEncoding) ID() (identifier.MIB, string) {
return identifier.Unofficial, "x-utf8bom"
}
func (utf8bomEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{
Transformer: &utf8bomEncoder{t: runes.ReplaceIllFormed()},
}
}
func (utf8bomEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: &utf8bomDecoder{}}
}
var utf8enc = &internal.Encoding{
&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
"UTF-8",
identifier.UTF8,
}
type utf8bomDecoder struct {
checked bool
}
func (t *utf8bomDecoder) Reset() {
t.checked = false
}
func (t *utf8bomDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if !t.checked {
if !atEOF && len(src) < len(utf8BOM) {
if len(src) == 0 {
return 0, 0, nil
}
return 0, 0, transform.ErrShortSrc
}
if bytes.HasPrefix(src, []byte(utf8BOM)) {
nSrc += len(utf8BOM)
src = src[len(utf8BOM):]
}
t.checked = true
}
nDst, n, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF)
nSrc += n
return nDst, nSrc, err
}
type utf8bomEncoder struct {
written bool
t transform.Transformer
}
func (t *utf8bomEncoder) Reset() {
t.written = false
t.t.Reset()
}
func (t *utf8bomEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if !t.written {
if len(dst) < len(utf8BOM) {
return nDst, 0, transform.ErrShortDst
}
nDst = copy(dst, utf8BOM)
t.written = true
}
n, nSrc, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF)
nDst += n
return nDst, nSrc, err
}
type utf8Decoder struct{ transform.NopResetter }
func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
@ -287,16 +368,13 @@ func (u *utf16Decoder) Reset() {
}
func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(src) < 2 && atEOF && u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
if len(src) == 0 {
if atEOF && u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
return 0, 0, nil
}
if u.current.bomPolicy&acceptBOM != 0 {
if len(src) < 2 {
return 0, 0, transform.ErrShortSrc
}
if len(src) >= 2 && u.current.bomPolicy&acceptBOM != 0 {
switch {
case src[0] == 0xfe && src[1] == 0xff:
u.current.endianness = BigEndian

View File

@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) {
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
atEOF := pSrc+n == len(s)
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF)
pDst += nDst
pSrc += nSrc
@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
if atEOF {
return string(dst[:pDst]), pSrc, err
}
if nSrc == 0 {
src = grow(src, 0)
}

View File

@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() {
// Rule W1.
// Changes all NSMs.
preceedingCharacterType := s.sos
precedingCharacterType := s.sos
for i, t := range s.types {
if t == NSM {
s.types[i] = preceedingCharacterType
s.types[i] = precedingCharacterType
} else {
if t.in(LRI, RLI, FSI, PDI) {
preceedingCharacterType = ON
precedingCharacterType = ON
}
preceedingCharacterType = t
precedingCharacterType = t
}
}

View File

@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.13
// +build go1.13,!go1.14
package bidi

1923
vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.13
// +build go1.13,!go1.14
package norm

7710
vendor/golang.org/x/text/unicode/norm/tables12.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
package lumberjack_test
import (
"log"
"gopkg.in/natefinch/lumberjack.v2"
)
// To use lumberjack with the standard library's log package, just pass it into
// the SetOutput function when your application starts.
func Example() {
log.SetOutput(&lumberjack.Logger{
Filename: "/var/log/myapp/foo.log",
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, // days
Compress: true, // disabled by default
})
}

View File

@ -1,205 +0,0 @@
// +build linux
package lumberjack
import (
"os"
"syscall"
"testing"
"time"
)
func TestMaintainMode(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestMaintainMode", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
mode := os.FileMode(0600)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
isNil(err, t)
f.Close()
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
filename2 := backupFile(dir)
info, err := os.Stat(filename)
isNil(err, t)
info2, err := os.Stat(filename2)
isNil(err, t)
equals(mode, info.Mode(), t)
equals(mode, info2.Mode(), t)
}
func TestMaintainOwner(t *testing.T) {
fakeFS := newFakeFS()
os_Chown = fakeFS.Chown
os_Stat = fakeFS.Stat
defer func() {
os_Chown = os.Chown
os_Stat = os.Stat
}()
currentTime = fakeTime
dir := makeTempDir("TestMaintainOwner", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644)
isNil(err, t)
f.Close()
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
equals(555, fakeFS.files[filename].uid, t)
equals(666, fakeFS.files[filename].gid, t)
}
func TestCompressMaintainMode(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestCompressMaintainMode", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
mode := os.FileMode(0600)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
isNil(err, t)
f.Close()
l := &Logger{
Compress: true,
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get compressed on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// a compressed version of the log file should now exist with the correct
// mode.
filename2 := backupFile(dir)
info, err := os.Stat(filename)
isNil(err, t)
info2, err := os.Stat(filename2+compressSuffix)
isNil(err, t)
equals(mode, info.Mode(), t)
equals(mode, info2.Mode(), t)
}
func TestCompressMaintainOwner(t *testing.T) {
fakeFS := newFakeFS()
os_Chown = fakeFS.Chown
os_Stat = fakeFS.Stat
defer func() {
os_Chown = os.Chown
os_Stat = os.Stat
}()
currentTime = fakeTime
dir := makeTempDir("TestCompressMaintainOwner", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644)
isNil(err, t)
f.Close()
l := &Logger{
Compress: true,
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get compressed on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// a compressed version of the log file should now exist with the correct
// owner.
filename2 := backupFile(dir)
equals(555, fakeFS.files[filename2+compressSuffix].uid, t)
equals(666, fakeFS.files[filename2+compressSuffix].gid, t)
}
type fakeFile struct {
uid int
gid int
}
type fakeFS struct {
files map[string]fakeFile
}
func newFakeFS() *fakeFS {
return &fakeFS{files: make(map[string]fakeFile)}
}
func (fs *fakeFS) Chown(name string, uid, gid int) error {
fs.files[name] = fakeFile{uid: uid, gid: gid}
return nil
}
func (fs *fakeFS) Stat(name string) (os.FileInfo, error) {
info, err := os.Stat(name)
if err != nil {
return nil, err
}
stat := info.Sys().(*syscall.Stat_t)
stat.Uid = 555
stat.Gid = 666
return info, nil
}

View File

@ -1,816 +0,0 @@
package lumberjack
import (
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v2"
)
// !!!NOTE!!!
//
// Running these tests in parallel will almost certainly cause sporadic (or even
// regular) failures, because they're all messing with the same global variable
// that controls the logic's mocked time.Now. So... don't do that.
// Since all the tests uses the time to determine filenames etc, we need to
// control the wall clock as much as possible, which means having a wall clock
// that doesn't change unless we want it to.
var fakeCurrentTime = time.Now()
func fakeTime() time.Time {
return fakeCurrentTime
}
func TestNewFile(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestNewFile", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(logFile(dir), b, t)
fileCount(dir, 1, t)
}
func TestOpenExisting(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestOpenExisting", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
data := []byte("foo!")
err := ioutil.WriteFile(filename, data, 0644)
isNil(err, t)
existsWithContent(filename, data, t)
l := &Logger{
Filename: filename,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
// make sure the file got appended
existsWithContent(filename, append(data, b...), t)
// make sure no other files were created
fileCount(dir, 1, t)
}
func TestWriteTooLong(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestWriteTooLong", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
MaxSize: 5,
}
defer l.Close()
b := []byte("booooooooooooooo!")
n, err := l.Write(b)
notNil(err, t)
equals(0, n, t)
equals(err.Error(),
fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t)
_, err = os.Stat(logFile(dir))
assert(os.IsNotExist(err), t, "File exists, but should not have been created")
}
func TestMakeLogDir(t *testing.T) {
currentTime = fakeTime
dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat)
dir = filepath.Join(os.TempDir(), dir)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(logFile(dir), b, t)
fileCount(dir, 1, t)
}
func TestDefaultFilename(t *testing.T) {
currentTime = fakeTime
dir := os.TempDir()
filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log")
defer os.Remove(filename)
l := &Logger{}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
}
func TestAutoRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestAutoRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
fileCount(dir, 1, t)
newFakeTime()
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// the old logfile should be moved aside and the main logfile should have
// only the last write in it.
existsWithContent(filename, b2, t)
// the backup file will use the current fake time and have the old contents.
existsWithContent(backupFile(dir), b, t)
fileCount(dir, 2, t)
}
func TestFirstWriteRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestFirstWriteRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
}
defer l.Close()
start := []byte("boooooo!")
err := ioutil.WriteFile(filename, start, 0600)
isNil(err, t)
newFakeTime()
// this would make us rotate
b := []byte("fooo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
existsWithContent(backupFile(dir), start, t)
fileCount(dir, 2, t)
}
func TestMaxBackups(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestMaxBackups", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxBackups: 1,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
fileCount(dir, 1, t)
newFakeTime()
// this will put us over the max
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
secondFilename := backupFile(dir)
existsWithContent(secondFilename, b, t)
// make sure the old file still exists with the same content.
existsWithContent(filename, b2, t)
fileCount(dir, 2, t)
newFakeTime()
// this will make us rotate again
b3 := []byte("baaaaaar!")
n, err = l.Write(b3)
isNil(err, t)
equals(len(b3), n, t)
// this will use the new fake time
thirdFilename := backupFile(dir)
existsWithContent(thirdFilename, b2, t)
existsWithContent(filename, b3, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// should only have two files in the dir still
fileCount(dir, 2, t)
// second file name should still exist
existsWithContent(thirdFilename, b2, t)
// should have deleted the first backup
notExist(secondFilename, t)
// now test that we don't delete directories or non-logfile files
newFakeTime()
// create a file that is close to but different from the logfile name.
// It shouldn't get caught by our deletion filters.
notlogfile := logFile(dir) + ".foo"
err = ioutil.WriteFile(notlogfile, []byte("data"), 0644)
isNil(err, t)
// Make a directory that exactly matches our log file filters... it still
// shouldn't get caught by the deletion filter since it's a directory.
notlogfiledir := backupFile(dir)
err = os.Mkdir(notlogfiledir, 0700)
isNil(err, t)
newFakeTime()
// this will use the new fake time
fourthFilename := backupFile(dir)
// Create a log file that is/was being compressed - this should
// not be counted since both the compressed and the uncompressed
// log files still exist.
compLogFile := fourthFilename + compressSuffix
err = ioutil.WriteFile(compLogFile, []byte("compress"), 0644)
isNil(err, t)
// this will make us rotate again
b4 := []byte("baaaaaaz!")
n, err = l.Write(b4)
isNil(err, t)
equals(len(b4), n, t)
existsWithContent(fourthFilename, b3, t)
existsWithContent(fourthFilename+compressSuffix, []byte("compress"), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// We should have four things in the directory now - the 2 log files, the
// not log file, and the directory
fileCount(dir, 5, t)
// third file name should still exist
existsWithContent(filename, b4, t)
existsWithContent(fourthFilename, b3, t)
// should have deleted the first filename
notExist(thirdFilename, t)
// the not-a-logfile should still exist
exists(notlogfile, t)
// the directory
exists(notlogfiledir, t)
}
func TestCleanupExistingBackups(t *testing.T) {
// test that if we start with more backup files than we're supposed to have
// in total, that extra ones get cleaned up when we rotate.
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestCleanupExistingBackups", t)
defer os.RemoveAll(dir)
// make 3 backup files
data := []byte("data")
backup := backupFile(dir)
err := ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
newFakeTime()
backup = backupFile(dir)
err = ioutil.WriteFile(backup+compressSuffix, data, 0644)
isNil(err, t)
newFakeTime()
backup = backupFile(dir)
err = ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
// now create a primary log file with some data
filename := logFile(dir)
err = ioutil.WriteFile(filename, data, 0644)
isNil(err, t)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxBackups: 1,
}
defer l.Close()
newFakeTime()
b2 := []byte("foooooo!")
n, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// now we should only have 2 files left - the primary and one backup
fileCount(dir, 2, t)
}
func TestMaxAge(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestMaxAge", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxAge: 1,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
fileCount(dir, 1, t)
// two days later
newFakeTime()
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
existsWithContent(backupFile(dir), b, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// We should still have 2 log files, since the most recent backup was just
// created.
fileCount(dir, 2, t)
existsWithContent(filename, b2, t)
// we should have deleted the old file due to being too old
existsWithContent(backupFile(dir), b, t)
// two days later
newFakeTime()
b3 := []byte("baaaaar!")
n, err = l.Write(b3)
isNil(err, t)
equals(len(b3), n, t)
existsWithContent(backupFile(dir), b2, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// We should have 2 log files - the main log file, and the most recent
// backup. The earlier backup is past the cutoff and should be gone.
fileCount(dir, 2, t)
existsWithContent(filename, b3, t)
// we should have deleted the old file due to being too old
existsWithContent(backupFile(dir), b2, t)
}
func TestOldLogFiles(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestOldLogFiles", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
data := []byte("data")
err := ioutil.WriteFile(filename, data, 07)
isNil(err, t)
// This gives us a time with the same precision as the time we get from the
// timestamp in the name.
t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
isNil(err, t)
backup := backupFile(dir)
err = ioutil.WriteFile(backup, data, 07)
isNil(err, t)
newFakeTime()
t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
isNil(err, t)
backup2 := backupFile(dir)
err = ioutil.WriteFile(backup2, data, 07)
isNil(err, t)
l := &Logger{Filename: filename}
files, err := l.oldLogFiles()
isNil(err, t)
equals(2, len(files), t)
// should be sorted by newest file first, which would be t2
equals(t2, files[0].timestamp, t)
equals(t1, files[1].timestamp, t)
}
func TestTimeFromName(t *testing.T) {
l := &Logger{Filename: "/var/log/myfoo/foo.log"}
prefix, ext := l.prefixAndExt()
tests := []struct {
filename string
want time.Time
wantErr bool
}{
{"foo-2014-05-04T14-44-33.555.log", time.Date(2014, 5, 4, 14, 44, 33, 555000000, time.UTC), false},
{"foo-2014-05-04T14-44-33.555", time.Time{}, true},
{"2014-05-04T14-44-33.555.log", time.Time{}, true},
{"foo.log", time.Time{}, true},
}
for _, test := range tests {
got, err := l.timeFromName(test.filename, prefix, ext)
equals(got, test.want, t)
equals(err != nil, test.wantErr, t)
}
}
func TestLocalTime(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestLocalTime", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
MaxSize: 10,
LocalTime: true,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
b2 := []byte("fooooooo!")
n2, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n2, t)
existsWithContent(logFile(dir), b2, t)
existsWithContent(backupFileLocal(dir), b, t)
}
func TestRotate(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
fileCount(dir, 1, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
filename2 := backupFile(dir)
existsWithContent(filename2, b, t)
existsWithContent(filename, []byte{}, t)
fileCount(dir, 2, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
filename3 := backupFile(dir)
existsWithContent(filename3, []byte{}, t)
existsWithContent(filename, []byte{}, t)
fileCount(dir, 2, t)
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
existsWithContent(filename, b2, t)
}
func TestCompressOnRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestCompressOnRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Compress: true,
Filename: filename,
MaxSize: 10,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithContent(filename, b, t)
fileCount(dir, 1, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// the old logfile should be moved aside and the main logfile should have
// nothing in it.
existsWithContent(filename, []byte{}, t)
// we need to wait a little bit since the files get compressed on a different
// goroutine.
<-time.After(300 * time.Millisecond)
// a compressed version of the log file should now exist and the original
// should have been removed.
bc := new(bytes.Buffer)
gz := gzip.NewWriter(bc)
_, err = gz.Write(b)
isNil(err, t)
err = gz.Close()
isNil(err, t)
existsWithContent(backupFile(dir)+compressSuffix, bc.Bytes(), t)
notExist(backupFile(dir), t)
fileCount(dir, 2, t)
}
func TestCompressOnResume(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestCompressOnResume", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Compress: true,
Filename: filename,
MaxSize: 10,
}
defer l.Close()
// Create a backup file and empty "compressed" file.
filename2 := backupFile(dir)
b := []byte("foo!")
err := ioutil.WriteFile(filename2, b, 0644)
isNil(err, t)
err = ioutil.WriteFile(filename2+compressSuffix, []byte{}, 0644)
isNil(err, t)
newFakeTime()
b2 := []byte("boo!")
n, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
existsWithContent(filename, b2, t)
// we need to wait a little bit since the files get compressed on a different
// goroutine.
<-time.After(300 * time.Millisecond)
// The write should have started the compression - a compressed version of
// the log file should now exist and the original should have been removed.
bc := new(bytes.Buffer)
gz := gzip.NewWriter(bc)
_, err = gz.Write(b)
isNil(err, t)
err = gz.Close()
isNil(err, t)
existsWithContent(filename2+compressSuffix, bc.Bytes(), t)
notExist(filename2, t)
fileCount(dir, 2, t)
}
func TestJson(t *testing.T) {
data := []byte(`
{
"filename": "foo",
"maxsize": 5,
"maxage": 10,
"maxbackups": 3,
"localtime": true,
"compress": true
}`[1:])
l := Logger{}
err := json.Unmarshal(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
equals(true, l.Compress, t)
}
func TestYaml(t *testing.T) {
data := []byte(`
filename: foo
maxsize: 5
maxage: 10
maxbackups: 3
localtime: true
compress: true`[1:])
l := Logger{}
err := yaml.Unmarshal(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
equals(true, l.Compress, t)
}
func TestToml(t *testing.T) {
data := `
filename = "foo"
maxsize = 5
maxage = 10
maxbackups = 3
localtime = true
compress = true`[1:]
l := Logger{}
md, err := toml.Decode(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
equals(true, l.Compress, t)
equals(0, len(md.Undecoded()), t)
}
// makeTempDir creates a file with a semi-unique name in the OS temp directory.
// It should be based on the name of the test, to keep parallel tests from
// colliding, and must be cleaned up after the test is finished.
func makeTempDir(name string, t testing.TB) string {
dir := time.Now().Format(name + backupTimeFormat)
dir = filepath.Join(os.TempDir(), dir)
isNilUp(os.Mkdir(dir, 0700), t, 1)
return dir
}
// existsWithContent checks that the given file exists and has the correct content.
func existsWithContent(path string, content []byte, t testing.TB) {
info, err := os.Stat(path)
isNilUp(err, t, 1)
equalsUp(int64(len(content)), info.Size(), t, 1)
b, err := ioutil.ReadFile(path)
isNilUp(err, t, 1)
equalsUp(content, b, t, 1)
}
// logFile returns the log file name in the given directory for the current fake
// time.
func logFile(dir string) string {
return filepath.Join(dir, "foobar.log")
}
func backupFile(dir string) string {
return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log")
}
func backupFileLocal(dir string) string {
return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log")
}
// logFileLocal returns the log file name in the given directory for the current
// fake time using the local timezone.
func logFileLocal(dir string) string {
return filepath.Join(dir, fakeTime().Format(backupTimeFormat))
}
// fileCount checks that the number of files in the directory is exp.
func fileCount(dir string, exp int, t testing.TB) {
files, err := ioutil.ReadDir(dir)
isNilUp(err, t, 1)
// Make sure no other files were created.
equalsUp(exp, len(files), t, 1)
}
// newFakeTime sets the fake "current time" to two days later.
func newFakeTime() {
fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2)
}
func notExist(path string, t testing.TB) {
_, err := os.Stat(path)
assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err)
}
func exists(path string, t testing.TB) {
_, err := os.Stat(path)
assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err)
}

View File

@ -1,27 +0,0 @@
// +build linux
package lumberjack_test
import (
"log"
"os"
"os/signal"
"syscall"
"gopkg.in/natefinch/lumberjack.v2"
)
// Example of how to rotate in response to SIGHUP.
func ExampleLogger_Rotate() {
l := &lumberjack.Logger{}
log.SetOutput(l)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for {
<-c
l.Rotate()
}
}()
}

View File

@ -1,91 +0,0 @@
package lumberjack
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert will log the given message if condition is false.
func assert(condition bool, t testing.TB, msg string, v ...interface{}) {
assertUp(condition, t, 1, msg, v...)
}
// assertUp is like assert, but used inside helper functions, to ensure that
// the file and line number reported by failures corresponds to one or more
// levels up the stack.
func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(caller + 1)
v = append([]interface{}{filepath.Base(file), line}, v...)
fmt.Printf("%s:%d: "+msg+"\n", v...)
t.FailNow()
}
}
// equals tests that the two values are equal according to reflect.DeepEqual.
func equals(exp, act interface{}, t testing.TB) {
equalsUp(exp, act, t, 1)
}
// equalsUp is like equals, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func equalsUp(exp, act interface{}, t testing.TB, caller int) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n",
filepath.Base(file), line, exp, exp, act, act)
t.FailNow()
}
}
// isNil reports a failure if the given value is not nil. Note that values
// which cannot be nil will always fail this check.
func isNil(obtained interface{}, t testing.TB) {
isNilUp(obtained, t, 1)
}
// isNilUp is like isNil, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func isNilUp(obtained interface{}, t testing.TB, caller int) {
if !_isNil(obtained) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained)
t.FailNow()
}
}
// notNil reports a failure if the given value is nil.
func notNil(obtained interface{}, t testing.TB) {
notNilUp(obtained, t, 1)
}
// notNilUp is like notNil, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func notNilUp(obtained interface{}, t testing.TB, caller int) {
if _isNil(obtained) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained)
t.FailNow()
}
}
// _isNil is a helper function for isNil and notNil, and should not be used
// directly.
func _isNil(obtained interface{}) bool {
if obtained == nil {
return true
}
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}

6
vendor/modules.txt vendored
View File

@ -126,7 +126,7 @@ golang.org/x/oauth2/internal
# golang.org/x/sys v0.0.0-20190927073244-c990c680b611
golang.org/x/sys/unix
golang.org/x/sys/windows
# golang.org/x/text v0.3.2
# golang.org/x/text v0.3.3
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex
@ -197,12 +197,12 @@ google.golang.org/grpc/tap
gopkg.in/fsnotify.v1
# gopkg.in/inf.v0 v0.9.0
gopkg.in/inf.v0
# gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/natefinch/lumberjack.v2
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2
# gopkg.in/natefinch v2.0.0
gopkg.in/natefinch
# k8s.io/api v0.0.0-20181115043458-b799cb063522
k8s.io/api/admissionregistration/v1alpha1
k8s.io/api/admissionregistration/v1beta1