mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Merge pull request #20397 from pmorie/atomic-projections
Auto commit by PR queue bot
This commit is contained in:
commit
10b6074a2c
457
pkg/volume/util/atomic_writer.go
Normal file
457
pkg/volume/util/atomic_writer.go
Normal file
@ -0,0 +1,457 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
maxFileNameLength = 255
|
||||
maxPathLength = 4096
|
||||
)
|
||||
|
||||
// AtomicWriter handles atomically projecting content for a set of files into
|
||||
// a target directory. AtomicWriter maintains a sentinel file named
|
||||
// "..sentinel" in the target directory which is updated after new data is
|
||||
// projected into the target directory, allowing consumers of the data to
|
||||
// listen for updates by monitoring the sentinel file with inotify or
|
||||
// fanotify.
|
||||
//
|
||||
// Note:
|
||||
//
|
||||
// 1. AtomicWriter reserves the set of pathnames starting with `..`.
|
||||
// 2. AtomicWriter offers no concurrency guarantees and must be synchronized
|
||||
// by the caller.
|
||||
//
|
||||
// The visible files in this volume are symlinks to files in the writer's data
|
||||
// directory. Actual files are stored in a hidden timestamped directory which
|
||||
// is symlinked to by the data directory. The timestamped directory and
|
||||
// data directory symlink are created in the writer's target dir. This scheme
|
||||
// allows the files to be atomically updated by changing the target of the
|
||||
// data directory symlink.
|
||||
type AtomicWriter struct {
|
||||
targetDir string
|
||||
logContext string
|
||||
}
|
||||
|
||||
// NewAtomicWriter creates a new AtomicWriter configured to write to the given
|
||||
// target directory, or returns an error if the target directory does not exist.
|
||||
func NewAtomicWriter(targetDir, logContext string) (*AtomicWriter, error) {
|
||||
_, err := os.Stat(targetDir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AtomicWriter{targetDir: targetDir, logContext: logContext}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
sentinelFileName = "..sentinel"
|
||||
dataDirName = "..data"
|
||||
newDataDirName = "..data_tmp"
|
||||
)
|
||||
|
||||
// Write does an atomic projection of the given payload into the writer's target
|
||||
// directory. Input paths must not begin with '..'.
|
||||
//
|
||||
// The Write algorithm is:
|
||||
//
|
||||
// 1. The payload is validated; if the payload is invalid, the function returns
|
||||
// 2. The user-visible portion of the volume is walked to determine whether any
|
||||
// portion of the payload was deleted and is still present on disk.
|
||||
// If the payload is already present on disk and there are no deleted files,
|
||||
// the function returns
|
||||
// 3. A check is made to determine whether data present in the payload has changed
|
||||
// 4. A new timestamped dir is created
|
||||
// 5. The payload is written to the new timestamped directory
|
||||
// 6. Symlinks and directory for new user-visible files are created (if needed).
|
||||
//
|
||||
// For example consider the files:
|
||||
// <target-dir>/podName
|
||||
// <target-dir>/user/labels
|
||||
// <target-dir>/k8s/annotations
|
||||
//
|
||||
// The user visible files are symbolic links into the internal data directory:
|
||||
// <target-dir>/podName -> ..data/podName
|
||||
// <target-dir>/usr/labels -> ../..data/usr/labels
|
||||
// <target-dir>/k8s/annotations -> ../..data/k8s/annotations
|
||||
//
|
||||
// Relative links are created into the data directory for files in subdirectories.
|
||||
//
|
||||
// The data directory itself is a link to a timestamped directory with
|
||||
// the real data:
|
||||
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
|
||||
// 7. The current timestamped directory is detected by reading the data directory
|
||||
// symlink
|
||||
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
|
||||
// become the new data directory
|
||||
// 9. The new data directory symlink is renamed to the data directory; rename is atomic
|
||||
// 10. The sentinel file modification and access times are updated (file is created if it does not
|
||||
// already exist)
|
||||
// 11. Old paths are removed from the user-visible portion of the target directory
|
||||
// 12. The previous timestamped directory is removed, if it exists
|
||||
func (w *AtomicWriter) Write(payload map[string][]byte) error {
|
||||
// (1)
|
||||
cleanPayload, err := validatePayload(payload)
|
||||
if err != nil {
|
||||
glog.Errorf("%s: invalid payload: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (2)
|
||||
pathsToRemove, err := w.pathsToRemove(cleanPayload)
|
||||
if err != nil {
|
||||
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (3)
|
||||
if should, err := w.shouldWritePayload(cleanPayload); err != nil {
|
||||
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
|
||||
return err
|
||||
} else if !should && len(pathsToRemove) == 0 {
|
||||
glog.V(5).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// (4)
|
||||
tsDir, err := w.newTimestampDir()
|
||||
if err != nil {
|
||||
glog.V(5).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (5)
|
||||
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
|
||||
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (6)
|
||||
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
|
||||
glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (7)
|
||||
_, tsDirName := filepath.Split(tsDir)
|
||||
dataDirPath := path.Join(w.targetDir, dataDirName)
|
||||
oldTsDir, err := os.Readlink(dataDirPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (8)
|
||||
newDataDirPath := path.Join(w.targetDir, newDataDirName)
|
||||
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
|
||||
os.RemoveAll(tsDir)
|
||||
glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (9)
|
||||
if err = os.Rename(newDataDirPath, dataDirPath); err != nil {
|
||||
os.Remove(newDataDirPath)
|
||||
os.RemoveAll(tsDir)
|
||||
glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (10)
|
||||
if err = w.touchSentinelFile(); err != nil {
|
||||
glog.Errorf("%s: error touching sentinel file: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (11)
|
||||
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
|
||||
glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (12)
|
||||
if len(oldTsDir) > 0 {
|
||||
if err = os.RemoveAll(path.Join(w.targetDir, oldTsDir)); err != nil {
|
||||
glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
|
||||
func validatePayload(payload map[string][]byte) (map[string][]byte, error) {
|
||||
cleanPayload := make(map[string][]byte)
|
||||
for k, content := range payload {
|
||||
if err := validatePath(k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cleanPayload[path.Clean(k)] = content
|
||||
}
|
||||
|
||||
return cleanPayload, nil
|
||||
}
|
||||
|
||||
// validatePath validates a single path, returning an error if the path is
|
||||
// invalid. paths may not:
|
||||
//
|
||||
// 1. be absolute
|
||||
// 2. contain '..' as an element
|
||||
// 3. start with '..'
|
||||
// 4. contain filenames larger than 255 characters
|
||||
// 5. be longer than 4096 characters
|
||||
func validatePath(targetPath string) error {
|
||||
// TODO: somehow unify this with the similar api validation,
|
||||
// validateVolumeSourcePath; the error semantics are just different enough
|
||||
// from this that it was time-prohibitive trying to find the right
|
||||
// refactoring to re-use.
|
||||
if targetPath == "" {
|
||||
return fmt.Errorf("invalid path: must not be empty: %q", targetPath)
|
||||
}
|
||||
if path.IsAbs(targetPath) {
|
||||
return fmt.Errorf("invalid path: must be relative path: %s", targetPath)
|
||||
}
|
||||
|
||||
if len(targetPath) > maxPathLength {
|
||||
return fmt.Errorf("invalid path: must be less than %d characters", maxPathLength)
|
||||
}
|
||||
|
||||
items := strings.Split(targetPath, string(os.PathSeparator))
|
||||
for _, item := range items {
|
||||
if item == ".." {
|
||||
return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
|
||||
}
|
||||
if len(item) > maxFileNameLength {
|
||||
return fmt.Errorf("invalid path: filenames must be less than %d characters", maxFileNameLength)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
|
||||
return fmt.Errorf("invalid path: must not start with '..': %s", targetPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldWritePayload returns whether the payload should be written to disk.
|
||||
func (w *AtomicWriter) shouldWritePayload(payload map[string][]byte) (bool, error) {
|
||||
for userVisiblePath, content := range payload {
|
||||
shouldWrite, err := w.shouldWriteFile(path.Join(w.targetDir, userVisiblePath), content)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if shouldWrite {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// shouldWriteFile returns whether a new version of a file should be written to disk.
|
||||
func (w *AtomicWriter) shouldWriteFile(path string, content []byte) (bool, error) {
|
||||
_, err := os.Lstat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
contentOnFs, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return (bytes.Compare(content, contentOnFs) != 0), nil
|
||||
}
|
||||
|
||||
// pathsToRemove walks the user-visible portion of the target directory and
|
||||
// determines which paths should be removed (if any) after the payload is
|
||||
// written to the target directory.
|
||||
func (w *AtomicWriter) pathsToRemove(payload map[string][]byte) (sets.String, error) {
|
||||
paths := sets.NewString()
|
||||
visitor := func(path string, info os.FileInfo, err error) error {
|
||||
if path == w.targetDir {
|
||||
return nil
|
||||
}
|
||||
|
||||
relativePath := strings.TrimPrefix(path, w.targetDir)
|
||||
relativePath = strings.TrimPrefix(relativePath, "/")
|
||||
if strings.HasPrefix(relativePath, "..") {
|
||||
return nil
|
||||
}
|
||||
|
||||
paths.Insert(relativePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(w.targetDir, visitor)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
|
||||
|
||||
newPaths := sets.NewString()
|
||||
for file := range payload {
|
||||
// add all subpaths for the payload to the set of new paths
|
||||
// to avoid attempting to remove non-empty dirs
|
||||
for subPath := file; subPath != ""; {
|
||||
newPaths.Insert(subPath)
|
||||
subPath, _ = filepath.Split(subPath)
|
||||
subPath = strings.TrimSuffix(subPath, "/")
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
|
||||
|
||||
result := paths.Difference(newPaths)
|
||||
glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// newTimestampDir creates a new timestamp directory
|
||||
func (w *AtomicWriter) newTimestampDir() (string, error) {
|
||||
tsDir, err := ioutil.TempDir(w.targetDir, fmt.Sprintf("..%s.", time.Now().Format("1981_02_01_15_04_05")))
|
||||
if err != nil {
|
||||
glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return tsDir, nil
|
||||
}
|
||||
|
||||
// writePayloadToDir writes the given payload to the given directory. The
|
||||
// directory must exist.
|
||||
func (w *AtomicWriter) writePayloadToDir(payload map[string][]byte, dir string) error {
|
||||
for userVisiblePath, content := range payload {
|
||||
fullPath := path.Join(dir, userVisiblePath)
|
||||
baseDir, _ := filepath.Split(fullPath)
|
||||
|
||||
err := os.MkdirAll(baseDir, os.ModePerm)
|
||||
if err != nil {
|
||||
glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(fullPath, content, 0644)
|
||||
if err != nil {
|
||||
glog.Errorf("%s: unable to write file %s: %v", w.logContext, fullPath, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createUserVisibleFiles creates the relative symlinks for all the
|
||||
// files configured in the payload. If the directory in a file path does not
|
||||
// exist, it is created.
|
||||
//
|
||||
// Viz:
|
||||
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
|
||||
// the following symlinks and subdirectories are created:
|
||||
// bar -> ..data/bar
|
||||
// foo/bar -> ../..data/foo/bar
|
||||
// baz/bar -> ../..data/baz/bar
|
||||
// foo/baz/blah -> ../../..data/foo/baz/blah
|
||||
func (w *AtomicWriter) createUserVisibleFiles(payload map[string][]byte) error {
|
||||
for userVisiblePath := range payload {
|
||||
dir, _ := filepath.Split(userVisiblePath)
|
||||
subDirs := 0
|
||||
if len(dir) > 0 {
|
||||
// If dir is not empty, the projection path contains at least one
|
||||
// subdirectory (example: userVisiblePath := "foo/bar").
|
||||
// Since filepath.Split leaves a trailing path separator, in this
|
||||
// example, dir = "foo/". In order to calculate the number of
|
||||
// subdirectories, we must subtract 1 from the number returned by split.
|
||||
subDirs = len(strings.Split(dir, "/")) - 1
|
||||
err := os.MkdirAll(path.Join(w.targetDir, dir), os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := os.Readlink(path.Join(w.targetDir, userVisiblePath))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// The link into the data directory for this path doesn't exist; create it,
|
||||
// respecting the number of subdirectories necessary to link
|
||||
// correctly back into the data directory.
|
||||
visibleFile := path.Join(w.targetDir, userVisiblePath)
|
||||
dataDirFile := path.Join(strings.Repeat("../", subDirs), dataDirName, userVisiblePath)
|
||||
|
||||
err = os.Symlink(dataDirFile, visibleFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeUserVisiblePaths removes the set of paths from the user-visible
|
||||
// portion of the writer's target directory.
|
||||
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
|
||||
orderedPaths := paths.List()
|
||||
for ii := len(orderedPaths) - 1; ii >= 0; ii-- {
|
||||
if err := os.Remove(path.Join(w.targetDir, orderedPaths[ii])); err != nil {
|
||||
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, orderedPaths[ii], err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// touchSentinelFile touches the sentinel file or creates it if it doesn't exist.
|
||||
func (w *AtomicWriter) touchSentinelFile() error {
|
||||
sentinelFilePath := path.Join(w.targetDir, sentinelFileName)
|
||||
_, err := os.Stat(sentinelFilePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
file, err := os.Create(sentinelFilePath)
|
||||
if err != nil {
|
||||
glog.Errorf("%s: unexpected error creating sentinel file %s: %v", w.logContext, sentinelFilePath, err)
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts := time.Now()
|
||||
err = os.Chtimes(sentinelFilePath, ts, ts)
|
||||
if err != nil {
|
||||
glog.Errorf("%s: error updating sentinel file mod time: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
852
pkg/volume/util/atomic_writer_test.go
Normal file
852
pkg/volume/util/atomic_writer_test.go
Normal file
@ -0,0 +1,852 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
func TestNewAtomicWriter(t *testing.T) {
|
||||
targetDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating tmp dir: %v", err)
|
||||
}
|
||||
|
||||
_, err = NewAtomicWriter(targetDir, "-test-")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating writer for existing target dir: %v", err)
|
||||
}
|
||||
|
||||
nonExistentDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating tmp dir: %v", err)
|
||||
}
|
||||
err = os.Remove(nonExistentDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error ensuring dir %v does not exist: %v", nonExistentDir, err)
|
||||
}
|
||||
|
||||
_, err = NewAtomicWriter(nonExistentDir, "-test-")
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected success creating writer for nonexistent target dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePath(t *testing.T) {
|
||||
maxPath := strings.Repeat("a", maxPathLength+1)
|
||||
maxFile := strings.Repeat("a", maxFileNameLength+1)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
path string
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "valid 1",
|
||||
path: "i/am/well/behaved.txt",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "valid 2",
|
||||
path: "keepyourheaddownandfollowtherules.txt",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "max path length",
|
||||
path: maxPath,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "max file length",
|
||||
path: maxFile,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "absolute failure",
|
||||
path: "/dev/null",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "reserved path",
|
||||
path: "..sneaky.txt",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "contains doubledot 1",
|
||||
path: "hello/there/../../../../../../etc/passwd",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "contains doubledot 2",
|
||||
path: "hello/../etc/somethingbad",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
path: "",
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
err := validatePath(tc.path)
|
||||
if tc.valid && err != nil {
|
||||
t.Errorf("%v: unexpected failure: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !tc.valid && err == nil {
|
||||
t.Errorf("%v: unexpected success", tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathsToRemove(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
payload1 map[string][]byte
|
||||
payload2 map[string][]byte
|
||||
expected sets.String
|
||||
}{
|
||||
{
|
||||
name: "simple",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"bar.txt": []byte("bar"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
},
|
||||
expected: sets.NewString("bar.txt"),
|
||||
},
|
||||
{
|
||||
name: "simple 2",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zip/bar.txt": []byte("zip/bar"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
},
|
||||
expected: sets.NewString("zip/bar.txt", "zip"),
|
||||
},
|
||||
{
|
||||
name: "subdirs 1",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zip/zap/bar.txt": []byte("zip/bar"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
},
|
||||
expected: sets.NewString("zip/zap/bar.txt", "zip", "zip/zap"),
|
||||
},
|
||||
{
|
||||
name: "subdirs 2",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zip/1/2/3/4/bar.txt": []byte("zip/bar"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
},
|
||||
expected: sets.NewString("zip/1/2/3/4/bar.txt", "zip", "zip/1", "zip/1/2", "zip/1/2/3", "zip/1/2/3/4"),
|
||||
},
|
||||
{
|
||||
name: "subdirs 3",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zip/1/2/3/4/bar.txt": []byte("zip/bar"),
|
||||
"zap/a/b/c/bar.txt": []byte("zap/bar"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
},
|
||||
expected: sets.NewString("zip/1/2/3/4/bar.txt", "zip", "zip/1", "zip/1/2", "zip/1/2/3", "zip/1/2/3/4", "zap", "zap/a", "zap/a/b", "zap/a/b/c", "zap/a/b/c/bar.txt"),
|
||||
},
|
||||
{
|
||||
name: "subdirs 4",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zap/1/2/3/4/bar.txt": []byte("zip/bar"),
|
||||
"zap/1/2/c/bar.txt": []byte("zap/bar"),
|
||||
"zap/1/2/magic.txt": []byte("indigo"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zap/1/2/magic.txt": []byte("indigo"),
|
||||
},
|
||||
expected: sets.NewString("zap/1/2/3/4/bar.txt", "zap/1/2/3", "zap/1/2/3/4", "zap/1/2/3/4/bar.txt", "zap/1/2/c", "zap/1/2/c/bar.txt"),
|
||||
},
|
||||
{
|
||||
name: "subdirs 5",
|
||||
payload1: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zap/1/2/3/4/bar.txt": []byte("zip/bar"),
|
||||
"zap/1/2/c/bar.txt": []byte("zap/bar"),
|
||||
},
|
||||
payload2: map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"zap/1/2/magic.txt": []byte("indigo"),
|
||||
},
|
||||
expected: sets.NewString("zap/1/2/3/4/bar.txt", "zap/1/2/3", "zap/1/2/3/4", "zap/1/2/3/4/bar.txt", "zap/1/2/c", "zap/1/2/c/bar.txt"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
targetDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
|
||||
err = writer.Write(tc.payload1)
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error writing: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
actual, err := writer.pathsToRemove(tc.payload2)
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error determining paths to remove: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if e, a := tc.expected, actual; !e.Equal(a) {
|
||||
t.Errorf("%v: unexpected paths to remove:\nexpected: %v\n got: %v", tc.name, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteOnce(t *testing.T) {
|
||||
// $1 if you can tell me what this binary is
|
||||
encodedMysteryBinary := `f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAeABAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAEAAOAAB
|
||||
AAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAAAAfQAAAAAAAAB9AAAAAAAAAAAA
|
||||
IAAAAAAAsDyZDwU=`
|
||||
|
||||
mysteryBinaryBytes := make([]byte, base64.StdEncoding.DecodedLen(len(encodedMysteryBinary)))
|
||||
numBytes, err := base64.StdEncoding.Decode(mysteryBinaryBytes, []byte(encodedMysteryBinary))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error decoding binary payload: %v", err)
|
||||
}
|
||||
|
||||
if numBytes != 125 {
|
||||
t.Fatalf("Unexpected decoded binary size: expected 125, got %v", numBytes)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
payload map[string][]byte
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
name: "invalid payload 1",
|
||||
payload: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"..bar": []byte("bar"),
|
||||
"binary.bin": mysteryBinaryBytes,
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
name: "invalid payload 2",
|
||||
payload: map[string][]byte{
|
||||
"foo/../bar": []byte("foo"),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
name: "basic 1",
|
||||
payload: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "basic 2",
|
||||
payload: map[string][]byte{
|
||||
"binary.bin": mysteryBinaryBytes,
|
||||
".binary.bin": mysteryBinaryBytes,
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "dotfiles",
|
||||
payload: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
".dotfile": []byte("dotfile"),
|
||||
".dotfile.file": []byte("dotfile.file"),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirectories 1",
|
||||
payload: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt"),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirectories 2",
|
||||
payload: map[string][]byte{
|
||||
"foo//bar.txt": []byte("foo//bar"),
|
||||
"bar///bar/zab.txt": []byte("bar/../bar/zab.txt"),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirectories 3",
|
||||
payload: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt"),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "kitchen sink",
|
||||
payload: map[string][]byte{
|
||||
"foo.log": []byte("foo"),
|
||||
"bar.zap": []byte("bar"),
|
||||
".dotfile": []byte("dotfile"),
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt"),
|
||||
"1/2/3/4/5/6/7/8/9/10/.dotfile.lib": []byte("1-2-3-dotfile"),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
targetDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
|
||||
err = writer.Write(tc.payload)
|
||||
if err != nil && tc.success {
|
||||
t.Errorf("%v: unexpected error writing payload: %v", tc.name, err)
|
||||
continue
|
||||
} else if err == nil && !tc.success {
|
||||
t.Errorf("%v: unexpected success", tc.name)
|
||||
continue
|
||||
} else if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
checkVolumeContents(targetDir, tc.name, tc.payload, t)
|
||||
checkSentinelFile(targetDir, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
first map[string][]byte
|
||||
next map[string][]byte
|
||||
shouldWrite bool
|
||||
}{
|
||||
{
|
||||
name: "update",
|
||||
first: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo": []byte("foo2"),
|
||||
"bar": []byte("bar2"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "no update",
|
||||
first: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
shouldWrite: false,
|
||||
},
|
||||
{
|
||||
name: "no update 2",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
},
|
||||
shouldWrite: false,
|
||||
},
|
||||
{
|
||||
name: "add 1",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
"blu/zip.txt": []byte("zip"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "add 2",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
"blu/two/2/3/4/5/zip.txt": []byte("zip"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "add 3",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
"bar/2/3/4/5/zip.txt": []byte("zip"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "delete 1",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "delete 2",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/1/2/3/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "delete 3",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/1/2/sip.txt": []byte("sip"),
|
||||
"bar/1/2/3/zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/1/2/sip.txt": []byte("sip"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "delete 4",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/1/2/sip.txt": []byte("sip"),
|
||||
"bar/1/2/3/4/5/6zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/1/2/sip.txt": []byte("sip"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "delete all",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
"bar/1/2/sip.txt": []byte("sip"),
|
||||
"bar/1/2/3/4/5/6zab.txt": []byte("bar"),
|
||||
},
|
||||
next: map[string][]byte{},
|
||||
shouldWrite: true,
|
||||
},
|
||||
{
|
||||
name: "add and delete 1",
|
||||
first: map[string][]byte{
|
||||
"foo/bar.txt": []byte("foo"),
|
||||
},
|
||||
next: map[string][]byte{
|
||||
"bar/baz.txt": []byte("baz"),
|
||||
},
|
||||
shouldWrite: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
targetDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
|
||||
|
||||
err = writer.Write(tc.first)
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error writing: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
checkVolumeContents(targetDir, tc.name, tc.first, t)
|
||||
if !tc.shouldWrite {
|
||||
continue
|
||||
}
|
||||
|
||||
oldTs := checkSentinelFile(targetDir, t)
|
||||
|
||||
err = writer.Write(tc.next)
|
||||
if err != nil {
|
||||
if tc.shouldWrite {
|
||||
t.Errorf("%v: unexpected error writing: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
} else if !tc.shouldWrite {
|
||||
t.Errorf("%v: unexpected success", tc.name)
|
||||
continue
|
||||
}
|
||||
|
||||
checkVolumeContents(targetDir, tc.name, tc.next, t)
|
||||
|
||||
ts := checkSentinelFile(targetDir, t)
|
||||
if !ts.After(oldTs) {
|
||||
t.Errorf("Unexpected timestamp on sentinel file; expected %v to be after %v", ts, oldTs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleUpdates(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
payloads []map[string][]byte
|
||||
clearSentinel bool
|
||||
}{
|
||||
{
|
||||
name: "update 1",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
{
|
||||
"foo": []byte("foo2"),
|
||||
"bar": []byte("bar2"),
|
||||
},
|
||||
{
|
||||
"foo": []byte("foo3"),
|
||||
"bar": []byte("bar3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update 2",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt"),
|
||||
},
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar2"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "clear sentinel",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
{
|
||||
"foo": []byte("foo2"),
|
||||
"bar": []byte("bar2"),
|
||||
},
|
||||
{
|
||||
"foo": []byte("foo3"),
|
||||
"bar": []byte("bar3"),
|
||||
},
|
||||
{
|
||||
"foo": []byte("foo4"),
|
||||
"bar": []byte("bar4"),
|
||||
},
|
||||
},
|
||||
clearSentinel: true,
|
||||
},
|
||||
{
|
||||
name: "subdirectories 2",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt"),
|
||||
},
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar2"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt2"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar2"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add 1",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar//zab.txt": []byte("bar/zab.txt"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar"),
|
||||
"bar/zib////zib/zab.txt": []byte("bar/zib/zab.txt"),
|
||||
},
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar2"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt2"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar2"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt2"),
|
||||
"add/new/keys.txt": []byte("addNewKeys"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add 2",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar2"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt2"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar2"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt2"),
|
||||
"add/new/keys.txt": []byte("addNewKeys"),
|
||||
},
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar2"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt2"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar2"),
|
||||
"bar/zib/zab.txt": []byte("bar/zib/zab.txt2"),
|
||||
"add/new/keys.txt": []byte("addNewKeys"),
|
||||
"add/new/keys2.txt": []byte("addNewKeys2"),
|
||||
"add/new/keys3.txt": []byte("addNewKeys3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove 1",
|
||||
payloads: []map[string][]byte{
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
"bar//zab.txt": []byte("bar/zab.txt"),
|
||||
"foo/blaz/bar.txt": []byte("foo/blaz/bar"),
|
||||
"zip/zap/zup/fop.txt": []byte("zip/zap/zup/fop.txt"),
|
||||
},
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar2"),
|
||||
"bar/zab.txt": []byte("bar/zab.txt2"),
|
||||
},
|
||||
{
|
||||
"foo/bar.txt": []byte("foo/bar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
targetDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var oldTs *time.Time = nil
|
||||
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
|
||||
|
||||
for ii, payload := range tc.payloads {
|
||||
writer.Write(payload)
|
||||
|
||||
checkVolumeContents(targetDir, tc.name, payload, t)
|
||||
ts := checkSentinelFile(targetDir, t)
|
||||
|
||||
if oldTs != nil && !ts.After(*oldTs) {
|
||||
t.Errorf("%v[%v] unexpected timestamp on sentinel file; expected %v to be after %v", tc.name, ii, ts, oldTs)
|
||||
}
|
||||
oldTs = &ts
|
||||
|
||||
if tc.clearSentinel {
|
||||
clearSentinelFile(targetDir, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSentinelFileModTimeIncreasing(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
iterations int
|
||||
clearSentinelFile bool
|
||||
}{
|
||||
{
|
||||
name: "5 iters",
|
||||
iterations: 5,
|
||||
},
|
||||
{
|
||||
name: "50 iters",
|
||||
iterations: 50,
|
||||
},
|
||||
{
|
||||
name: "1000 iters",
|
||||
iterations: 1000,
|
||||
},
|
||||
{
|
||||
name: "1000 clear sentinel",
|
||||
iterations: 1000,
|
||||
clearSentinelFile: true,
|
||||
},
|
||||
{
|
||||
name: "10000 clear sentinel",
|
||||
iterations: 10000,
|
||||
clearSentinelFile: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
targetDir, err := utiltesting.MkTmpdir("atomic-write")
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var oldTs *time.Time = nil
|
||||
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
|
||||
|
||||
for i := 0; i < tc.iterations; i++ {
|
||||
err = writer.touchSentinelFile()
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error touching sentinel file: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ts := checkSentinelFile(targetDir, t)
|
||||
if oldTs != nil && !ts.After(*oldTs) {
|
||||
t.Errorf("%v: unexpected timestamp on sentinel file; expected %v to be after %v", tc.name, ts, oldTs)
|
||||
continue
|
||||
}
|
||||
oldTs = &ts
|
||||
|
||||
if tc.clearSentinelFile {
|
||||
clearSentinelFile(targetDir, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkVolumeContents(targetDir, tcName string, payload map[string][]byte, t *testing.T) {
|
||||
// use filepath.Walk to reconstruct the payload, then deep equal
|
||||
observedPayload := map[string][]byte{}
|
||||
visitor := func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode().IsRegular() || info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relativePath := strings.TrimPrefix(path, targetDir)
|
||||
relativePath = strings.TrimPrefix(relativePath, "/")
|
||||
if strings.HasPrefix(relativePath, "..") {
|
||||
return nil
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
observedPayload[relativePath] = content
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(targetDir, visitor)
|
||||
if err != nil {
|
||||
t.Errorf("%v: unexpected error walking directory: %v", tcName, err)
|
||||
}
|
||||
|
||||
cleanPathPayload := make(map[string][]byte, len(payload))
|
||||
for k, v := range payload {
|
||||
cleanPathPayload[path.Clean(k)] = v
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cleanPathPayload, observedPayload) {
|
||||
t.Errorf("%v: payload and observed payload do not match.", tcName)
|
||||
}
|
||||
}
|
||||
|
||||
func checkSentinelFile(targetDir string, t *testing.T) time.Time {
|
||||
sentinelFilePath := filepath.Join(targetDir, sentinelFileName)
|
||||
info, err := os.Stat(sentinelFilePath)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't stat sentinel file for dir %v: %v", targetDir, err)
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
return info.ModTime()
|
||||
}
|
||||
|
||||
func clearSentinelFile(targetDir string, t *testing.T) {
|
||||
sentinelFilePath := filepath.Join(targetDir, sentinelFileName)
|
||||
_, err := os.Stat(sentinelFilePath)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't stat sentinel file for dir %v: %v", targetDir, err)
|
||||
}
|
||||
err = os.Remove(sentinelFilePath)
|
||||
if err != nil {
|
||||
t.Errorf("Error removing sentinel file: %v", err)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user