mirror of
https://github.com/containers/skopeo.git
synced 2025-06-28 15:47:34 +00:00
Bump github.com/containers/storage from 1.24.5 to 1.25.0
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.24.5 to 1.25.0. - [Release notes](https://github.com/containers/storage/releases) - [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md) - [Commits](https://github.com/containers/storage/compare/v1.24.5...v1.25.0) Signed-off-by: dependabot-preview[bot] <support@dependabot.com> Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
parent
aff1b6215b
commit
ac5241482c
2
go.mod
2
go.mod
@ -6,7 +6,7 @@ require (
|
|||||||
github.com/containers/common v0.34.0
|
github.com/containers/common v0.34.0
|
||||||
github.com/containers/image/v5 v5.10.1
|
github.com/containers/image/v5 v5.10.1
|
||||||
github.com/containers/ocicrypt v1.1.0
|
github.com/containers/ocicrypt v1.1.0
|
||||||
github.com/containers/storage v1.24.5
|
github.com/containers/storage v1.25.0
|
||||||
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible
|
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible
|
||||||
github.com/dsnet/compress v0.0.1 // indirect
|
github.com/dsnet/compress v0.0.1 // indirect
|
||||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||||
|
4
go.sum
4
go.sum
@ -74,6 +74,8 @@ github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2D
|
|||||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||||
github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
|
github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
|
||||||
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
|
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
|
||||||
|
github.com/containers/storage v1.25.0 h1:p0PLlQcWmtE+7XLfOCR0WuYyMTby1yozpI4DaKOtWTA=
|
||||||
|
github.com/containers/storage v1.25.0/go.mod h1:UxTYd5F4mPVqmDRcRL0PBS8+HP74aBn96eahnhEvPtk=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
@ -245,6 +247,8 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd
|
|||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
|
github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
|
||||||
|
github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||||
|
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -25,7 +25,7 @@ env:
|
|||||||
# GCE project where images live
|
# GCE project where images live
|
||||||
IMAGE_PROJECT: "libpod-218412"
|
IMAGE_PROJECT: "libpod-218412"
|
||||||
# VM Image built in containers/automation_images
|
# VM Image built in containers/automation_images
|
||||||
_BUILT_IMAGE_SUFFIX: "c6233039174893568"
|
_BUILT_IMAGE_SUFFIX: "c5744859501821952"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
|
||||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
|
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
|
||||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"
|
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"
|
||||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
|||||||
1.24.5
|
1.25.0
|
||||||
|
15
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
15
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -482,6 +482,21 @@ func (a *Driver) Put(id string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For AUFS, it queries the mountpoint for this ID.
|
||||||
|
func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
a.locker.Lock(id)
|
||||||
|
defer a.locker.Unlock(id)
|
||||||
|
a.pathCacheLock.Lock()
|
||||||
|
m, exists := a.pathCache[id]
|
||||||
|
if !exists {
|
||||||
|
m = a.getMountpoint(id)
|
||||||
|
a.pathCache[id] = m
|
||||||
|
}
|
||||||
|
a.pathCacheLock.Unlock()
|
||||||
|
return directory.Usage(m)
|
||||||
|
}
|
||||||
|
|
||||||
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||||
func (a *Driver) isParent(id, parent string) bool {
|
func (a *Driver) isParent(id, parent string) bool {
|
||||||
parents, _ := getParentIDs(a.rootPath(), id)
|
parents, _ := getParentIDs(a.rootPath(), id)
|
||||||
|
7
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
@ -687,6 +688,12 @@ func (d *Driver) Put(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For BTRFS, it queries the subvolumes path for this ID.
|
||||||
|
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
return directory.Usage(d.subvolumesDirID(id))
|
||||||
|
}
|
||||||
|
|
||||||
// Exists checks if the id exists in the filesystem.
|
// Exists checks if the id exists in the filesystem.
|
||||||
func (d *Driver) Exists(id string) bool {
|
func (d *Driver) Exists(id string) bool {
|
||||||
dir := d.subvolumesDirID(id)
|
dir := d.subvolumesDirID(id)
|
||||||
|
9
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
9
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/devicemapper"
|
"github.com/containers/storage/pkg/devicemapper"
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/locker"
|
"github.com/containers/storage/pkg/locker"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
@ -251,6 +252,14 @@ func (d *Driver) Put(id string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For devmapper, it queries the mnt path for this ID.
|
||||||
|
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
|
return directory.Usage(path.Join(d.home, "mnt", id))
|
||||||
|
}
|
||||||
|
|
||||||
// Exists checks to see if the device exists.
|
// Exists checks to see if the device exists.
|
||||||
func (d *Driver) Exists(id string) bool {
|
func (d *Driver) Exists(id string) bool {
|
||||||
return d.DeviceSet.HasDevice(id)
|
return d.DeviceSet.HasDevice(id)
|
||||||
|
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/vbatts/tar-split/tar/storage"
|
"github.com/vbatts/tar-split/tar/storage"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -105,6 +106,8 @@ type ProtoDriver interface {
|
|||||||
// Returns a set of key-value pairs which give low level information
|
// Returns a set of key-value pairs which give low level information
|
||||||
// about the image/container driver is managing.
|
// about the image/container driver is managing.
|
||||||
Metadata(id string) (map[string]string, error)
|
Metadata(id string) (map[string]string, error)
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the specified ID.
|
||||||
|
ReadWriteDiskUsage(id string) (*directory.DiskUsage, error)
|
||||||
// Cleanup performs necessary tasks to release resources
|
// Cleanup performs necessary tasks to release resources
|
||||||
// held by the driver, e.g., unmounting all layered filesystems
|
// held by the driver, e.g., unmounting all layered filesystems
|
||||||
// known to this driver.
|
// known to this driver.
|
||||||
|
4
vendor/github.com/containers/storage/drivers/driver_freebsd.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/driver_freebsd.go
generated
vendored
@ -1,8 +1,6 @@
|
|||||||
package graphdriver
|
package graphdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -16,7 +14,7 @@ var (
|
|||||||
// Mounted checks if the given path is mounted as the fs type
|
// Mounted checks if the given path is mounted as the fs type
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||||
var buf unix.Statfs_t
|
var buf unix.Statfs_t
|
||||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
if err := unix.Statfs(mountPath, &buf); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return FsMagic(buf.Type) == fsType, nil
|
return FsMagic(buf.Type) == fsType, nil
|
||||||
|
26
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
26
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -521,6 +521,18 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
|||||||
return metadata, nil
|
return metadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For Overlay, it attempts to check the XFS quota for size, and falls back to
|
||||||
|
// finding the size of the "diff" directory.
|
||||||
|
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
usage := &directory.DiskUsage{}
|
||||||
|
if d.quotaCtl != nil {
|
||||||
|
err := d.quotaCtl.GetDiskUsage(d.dir(id), usage)
|
||||||
|
return usage, err
|
||||||
|
}
|
||||||
|
return directory.Usage(path.Join(d.dir(id), "diff"))
|
||||||
|
}
|
||||||
|
|
||||||
// Cleanup any state created by overlay which should be cleaned when daemon
|
// Cleanup any state created by overlay which should be cleaned when daemon
|
||||||
// is being shutdown. For now, we just have to unmount the bind mounted
|
// is being shutdown. For now, we just have to unmount the bind mounted
|
||||||
// we had created.
|
// we had created.
|
||||||
@ -612,17 +624,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if d.quotaCtl != nil {
|
||||||
|
quota := quota.Quota{}
|
||||||
if opts != nil && len(opts.StorageOpt) > 0 {
|
if opts != nil && len(opts.StorageOpt) > 0 {
|
||||||
driver := &Driver{}
|
driver := &Driver{}
|
||||||
if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil {
|
if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if driver.options.quota.Size > 0 {
|
if driver.options.quota.Size > 0 {
|
||||||
// Set container disk quota limit
|
quota.Size = driver.options.quota.Size
|
||||||
if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// Set container disk quota limit
|
||||||
|
// If it is set to 0, we will track the disk usage, but not enforce a limit
|
||||||
|
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1221,6 +1238,7 @@ func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent stri
|
|||||||
if d.useNaiveDiff() || !d.isParent(id, parent) {
|
if d.useNaiveDiff() || !d.isParent(id, parent) {
|
||||||
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
|
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
|
||||||
}
|
}
|
||||||
|
|
||||||
return directory.Size(d.getDiffPath(id))
|
return directory.Size(d.getDiffPath(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
32
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
32
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -56,6 +56,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
@ -196,17 +197,37 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
|||||||
|
|
||||||
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
||||||
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||||
|
d, err := q.fsDiskQuotaFromPath(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
|
||||||
|
func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
|
||||||
|
d, err := q.fsDiskQuotaFromPath(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
usage.Size = int64(d.d_bcount) * 512
|
||||||
|
usage.InodeCount = int64(d.d_icount)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
|
||||||
|
var d C.fs_disk_quota_t
|
||||||
|
|
||||||
projectID, ok := q.quotas[targetPath]
|
projectID, ok := q.quotas[targetPath]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("quota not found for path : %s", targetPath)
|
return d, fmt.Errorf("quota not found for path : %s", targetPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// get the quota limit for the container's project id
|
// get the quota limit for the container's project id
|
||||||
//
|
//
|
||||||
var d C.fs_disk_quota_t
|
|
||||||
|
|
||||||
var cs = C.CString(q.backingFsBlockDev)
|
var cs = C.CString(q.backingFsBlockDev)
|
||||||
defer C.free(unsafe.Pointer(cs))
|
defer C.free(unsafe.Pointer(cs))
|
||||||
|
|
||||||
@ -214,12 +235,11 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
|||||||
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
|
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
|
||||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
|
return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
|
||||||
projectID, q.backingFsBlockDev, errno.Error())
|
projectID, q.backingFsBlockDev, errno.Error())
|
||||||
}
|
}
|
||||||
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
|
||||||
|
|
||||||
return nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getProjectID - get the project id of path on xfs
|
// getProjectID - get the project id of path on xfs
|
||||||
|
7
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
@ -243,6 +244,12 @@ func (d *Driver) Put(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For VFS, it queries the directory for this ID.
|
||||||
|
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
return directory.Usage(d.dir(id))
|
||||||
|
}
|
||||||
|
|
||||||
// Exists checks to see if the directory exists for the given id.
|
// Exists checks to see if the directory exists for the given id.
|
||||||
func (d *Driver) Exists(id string) bool {
|
func (d *Driver) Exists(id string) bool {
|
||||||
_, err := os.Stat(d.dir(id))
|
_, err := os.Stat(d.dir(id))
|
||||||
|
7
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/Microsoft/hcsshim"
|
"github.com/Microsoft/hcsshim"
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
"github.com/containers/storage/pkg/longpath"
|
"github.com/containers/storage/pkg/longpath"
|
||||||
@ -436,6 +437,12 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|||||||
return dir, nil
|
return dir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For VFS, it queries the directory for this ID.
|
||||||
|
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
return directory.Usage(d.dir(id))
|
||||||
|
}
|
||||||
|
|
||||||
// Put adds a new layer to the driver.
|
// Put adds a new layer to the driver.
|
||||||
func (d *Driver) Put(id string) error {
|
func (d *Driver) Put(id string) error {
|
||||||
panicIfUsedByLcow()
|
panicIfUsedByLcow()
|
||||||
|
7
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
@ -455,6 +456,12 @@ func (d *Driver) Put(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
||||||
|
// For ZFS, it queries the full mount path for this ID.
|
||||||
|
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||||
|
return directory.Usage(d.mountPath(id))
|
||||||
|
}
|
||||||
|
|
||||||
// Exists checks to see if the cache entry exists for the given id.
|
// Exists checks to see if the cache entry exists for the given id.
|
||||||
func (d *Driver) Exists(id string) bool {
|
func (d *Driver) Exists(id string) bool {
|
||||||
d.Lock()
|
d.Lock()
|
||||||
|
6
vendor/github.com/containers/storage/go.mod
generated
vendored
6
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -8,9 +8,9 @@ require (
|
|||||||
github.com/Microsoft/hcsshim v0.8.14
|
github.com/Microsoft/hcsshim v0.8.14
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/hashicorp/go-multierror v1.1.0
|
github.com/hashicorp/go-multierror v1.1.0
|
||||||
github.com/klauspost/compress v1.11.5
|
github.com/klauspost/compress v1.11.7
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/mattn/go-shellwords v1.0.10
|
github.com/mattn/go-shellwords v1.0.11
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||||
github.com/moby/sys/mountinfo v0.4.0
|
github.com/moby/sys/mountinfo v0.4.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
@ -20,7 +20,7 @@ require (
|
|||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||||
github.com/sirupsen/logrus v1.7.0
|
github.com/sirupsen/logrus v1.7.0
|
||||||
github.com/stretchr/testify v1.6.1
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||||
github.com/tchap/go-patricia v2.3.0+incompatible
|
github.com/tchap/go-patricia v2.3.0+incompatible
|
||||||
github.com/vbatts/tar-split v0.11.1
|
github.com/vbatts/tar-split v0.11.1
|
||||||
|
12
vendor/github.com/containers/storage/go.sum
generated
vendored
12
vendor/github.com/containers/storage/go.sum
generated
vendored
@ -58,8 +58,8 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+
|
|||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
|
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||||
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
@ -71,8 +71,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
|
||||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||||
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
|
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
|
||||||
@ -118,8 +118,8 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
|||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
|
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
|
||||||
|
84
vendor/github.com/containers/storage/layers.go
generated
vendored
84
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -117,6 +117,11 @@ type Layer struct {
|
|||||||
|
|
||||||
// ReadOnly is true if this layer resides in a read-only layer store.
|
// ReadOnly is true if this layer resides in a read-only layer store.
|
||||||
ReadOnly bool `json:"-"`
|
ReadOnly bool `json:"-"`
|
||||||
|
|
||||||
|
// BigDataNames is a list of names of data items that we keep for the
|
||||||
|
// convenience of the caller. They can be large, and are only in
|
||||||
|
// memory when being read from or written to disk.
|
||||||
|
BigDataNames []string `json:"big-data-names,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type layerMountPoint struct {
|
type layerMountPoint struct {
|
||||||
@ -137,6 +142,7 @@ type DiffOptions struct {
|
|||||||
type ROLayerStore interface {
|
type ROLayerStore interface {
|
||||||
ROFileBasedStore
|
ROFileBasedStore
|
||||||
ROMetadataStore
|
ROMetadataStore
|
||||||
|
ROLayerBigDataStore
|
||||||
|
|
||||||
// Exists checks if a layer with the specified name or ID is known.
|
// Exists checks if a layer with the specified name or ID is known.
|
||||||
Exists(id string) bool
|
Exists(id string) bool
|
||||||
@ -194,6 +200,7 @@ type LayerStore interface {
|
|||||||
RWFileBasedStore
|
RWFileBasedStore
|
||||||
RWMetadataStore
|
RWMetadataStore
|
||||||
FlaggableStore
|
FlaggableStore
|
||||||
|
RWLayerBigDataStore
|
||||||
|
|
||||||
// Create creates a new layer, optionally giving it a specified ID rather than
|
// Create creates a new layer, optionally giving it a specified ID rather than
|
||||||
// a randomly-generated one, either inheriting data from another specified
|
// a randomly-generated one, either inheriting data from another specified
|
||||||
@ -278,6 +285,7 @@ func copyLayer(l *Layer) *Layer {
|
|||||||
UncompressedSize: l.UncompressedSize,
|
UncompressedSize: l.UncompressedSize,
|
||||||
CompressionType: l.CompressionType,
|
CompressionType: l.CompressionType,
|
||||||
ReadOnly: l.ReadOnly,
|
ReadOnly: l.ReadOnly,
|
||||||
|
BigDataNames: copyStringSlice(l.BigDataNames),
|
||||||
Flags: copyStringInterfaceMap(l.Flags),
|
Flags: copyStringInterfaceMap(l.Flags),
|
||||||
UIDMap: copyIDMap(l.UIDMap),
|
UIDMap: copyIDMap(l.UIDMap),
|
||||||
GIDMap: copyIDMap(l.GIDMap),
|
GIDMap: copyIDMap(l.GIDMap),
|
||||||
@ -702,6 +710,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||||||
Flags: make(map[string]interface{}),
|
Flags: make(map[string]interface{}),
|
||||||
UIDMap: copyIDMap(moreOptions.UIDMap),
|
UIDMap: copyIDMap(moreOptions.UIDMap),
|
||||||
GIDMap: copyIDMap(moreOptions.GIDMap),
|
GIDMap: copyIDMap(moreOptions.GIDMap),
|
||||||
|
BigDataNames: []string{},
|
||||||
}
|
}
|
||||||
r.layers = append(r.layers, layer)
|
r.layers = append(r.layers, layer)
|
||||||
r.idindex.Add(id)
|
r.idindex.Add(id)
|
||||||
@ -970,6 +979,80 @@ func (r *layerStore) SetNames(id string, names []string) error {
|
|||||||
return ErrLayerUnknown
|
return ErrLayerUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) datadir(id string) string {
|
||||||
|
return filepath.Join(r.layerdir, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) datapath(id, key string) string {
|
||||||
|
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name")
|
||||||
|
}
|
||||||
|
layer, ok := r.lookup(id)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
|
||||||
|
}
|
||||||
|
return os.Open(r.datapath(layer.ID, key))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
|
||||||
|
if key == "" {
|
||||||
|
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item")
|
||||||
|
}
|
||||||
|
if !r.IsReadWrite() {
|
||||||
|
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath())
|
||||||
|
}
|
||||||
|
layer, ok := r.lookup(id)
|
||||||
|
if !ok {
|
||||||
|
return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id)
|
||||||
|
}
|
||||||
|
err := os.MkdirAll(r.datadir(layer.ID), 0700)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAtomicFileWriter doesn't overwrite/truncate the existing inode.
|
||||||
|
// BigData() relies on this behaviour when opening the file for read
|
||||||
|
// so that it is either accessing the old data or the new one.
|
||||||
|
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error opening bigdata file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(writer, data); err != nil {
|
||||||
|
writer.Close()
|
||||||
|
return errors.Wrapf(err, "error copying bigdata for the layer")
|
||||||
|
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
return errors.Wrapf(err, "error closing bigdata file for the layer")
|
||||||
|
}
|
||||||
|
|
||||||
|
addName := true
|
||||||
|
for _, name := range layer.BigDataNames {
|
||||||
|
if name == key {
|
||||||
|
addName = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if addName {
|
||||||
|
layer.BigDataNames = append(layer.BigDataNames, key)
|
||||||
|
return r.Save()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) BigDataNames(id string) ([]string, error) {
|
||||||
|
layer, ok := r.lookup(id)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id)
|
||||||
|
}
|
||||||
|
return copyStringSlice(layer.BigDataNames), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *layerStore) Metadata(id string) (string, error) {
|
func (r *layerStore) Metadata(id string) (string, error) {
|
||||||
if layer, ok := r.lookup(id); ok {
|
if layer, ok := r.lookup(id); ok {
|
||||||
return layer.Metadata, nil
|
return layer.Metadata, nil
|
||||||
@ -1004,6 +1087,7 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||||||
err := r.driver.Remove(id)
|
err := r.driver.Remove(id)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
os.Remove(r.tspath(id))
|
os.Remove(r.tspath(id))
|
||||||
|
os.RemoveAll(r.datadir(id))
|
||||||
delete(r.byid, id)
|
delete(r.byid, id)
|
||||||
for _, name := range layer.Names {
|
for _, name := range layer.Names {
|
||||||
delete(r.byname, name)
|
delete(r.byname, name)
|
||||||
|
111
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
111
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
@ -396,6 +396,22 @@ func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
|||||||
}
|
}
|
||||||
buf.WriteByte(',')
|
buf.WriteByte(',')
|
||||||
}
|
}
|
||||||
|
if len(j.BigDataNames) != 0 {
|
||||||
|
buf.WriteString(`"big-data-names":`)
|
||||||
|
if j.BigDataNames != nil {
|
||||||
|
buf.WriteString(`[`)
|
||||||
|
for i, v := range j.BigDataNames {
|
||||||
|
if i != 0 {
|
||||||
|
buf.WriteString(`,`)
|
||||||
|
}
|
||||||
|
fflib.WriteJsonString(buf, string(v))
|
||||||
|
}
|
||||||
|
buf.WriteString(`]`)
|
||||||
|
} else {
|
||||||
|
buf.WriteString(`null`)
|
||||||
|
}
|
||||||
|
buf.WriteByte(',')
|
||||||
|
}
|
||||||
buf.Rewind(1)
|
buf.Rewind(1)
|
||||||
buf.WriteByte('}')
|
buf.WriteByte('}')
|
||||||
return nil
|
return nil
|
||||||
@ -436,6 +452,8 @@ const (
|
|||||||
ffjtLayerUIDMap
|
ffjtLayerUIDMap
|
||||||
|
|
||||||
ffjtLayerGIDMap
|
ffjtLayerGIDMap
|
||||||
|
|
||||||
|
ffjtLayerBigDataNames
|
||||||
)
|
)
|
||||||
|
|
||||||
var ffjKeyLayerID = []byte("id")
|
var ffjKeyLayerID = []byte("id")
|
||||||
@ -470,6 +488,8 @@ var ffjKeyLayerUIDMap = []byte("uidmap")
|
|||||||
|
|
||||||
var ffjKeyLayerGIDMap = []byte("gidmap")
|
var ffjKeyLayerGIDMap = []byte("gidmap")
|
||||||
|
|
||||||
|
var ffjKeyLayerBigDataNames = []byte("big-data-names")
|
||||||
|
|
||||||
// UnmarshalJSON umarshall json - template of ffjson
|
// UnmarshalJSON umarshall json - template of ffjson
|
||||||
func (j *Layer) UnmarshalJSON(input []byte) error {
|
func (j *Layer) UnmarshalJSON(input []byte) error {
|
||||||
fs := fflib.NewFFLexer(input)
|
fs := fflib.NewFFLexer(input)
|
||||||
@ -531,6 +551,14 @@ mainparse:
|
|||||||
} else {
|
} else {
|
||||||
switch kn[0] {
|
switch kn[0] {
|
||||||
|
|
||||||
|
case 'b':
|
||||||
|
|
||||||
|
if bytes.Equal(ffjKeyLayerBigDataNames, kn) {
|
||||||
|
currentKey = ffjtLayerBigDataNames
|
||||||
|
state = fflib.FFParse_want_colon
|
||||||
|
goto mainparse
|
||||||
|
}
|
||||||
|
|
||||||
case 'c':
|
case 'c':
|
||||||
|
|
||||||
if bytes.Equal(ffjKeyLayerCreated, kn) {
|
if bytes.Equal(ffjKeyLayerCreated, kn) {
|
||||||
@ -640,6 +668,12 @@ mainparse:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fflib.EqualFoldRight(ffjKeyLayerBigDataNames, kn) {
|
||||||
|
currentKey = ffjtLayerBigDataNames
|
||||||
|
state = fflib.FFParse_want_colon
|
||||||
|
goto mainparse
|
||||||
|
}
|
||||||
|
|
||||||
if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) {
|
if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) {
|
||||||
currentKey = ffjtLayerGIDMap
|
currentKey = ffjtLayerGIDMap
|
||||||
state = fflib.FFParse_want_colon
|
state = fflib.FFParse_want_colon
|
||||||
@ -801,6 +835,9 @@ mainparse:
|
|||||||
case ffjtLayerGIDMap:
|
case ffjtLayerGIDMap:
|
||||||
goto handle_GIDMap
|
goto handle_GIDMap
|
||||||
|
|
||||||
|
case ffjtLayerBigDataNames:
|
||||||
|
goto handle_BigDataNames
|
||||||
|
|
||||||
case ffjtLayernosuchkey:
|
case ffjtLayernosuchkey:
|
||||||
err = fs.SkipField(tok)
|
err = fs.SkipField(tok)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1551,6 +1588,80 @@ handle_GIDMap:
|
|||||||
state = fflib.FFParse_after_value
|
state = fflib.FFParse_after_value
|
||||||
goto mainparse
|
goto mainparse
|
||||||
|
|
||||||
|
handle_BigDataNames:
|
||||||
|
|
||||||
|
/* handler: j.BigDataNames type=[]string kind=slice quoted=false*/
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
{
|
||||||
|
if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
|
||||||
|
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
j.BigDataNames = nil
|
||||||
|
} else {
|
||||||
|
|
||||||
|
j.BigDataNames = []string{}
|
||||||
|
|
||||||
|
wantVal := true
|
||||||
|
|
||||||
|
for {
|
||||||
|
|
||||||
|
var tmpJBigDataNames string
|
||||||
|
|
||||||
|
tok = fs.Scan()
|
||||||
|
if tok == fflib.FFTok_error {
|
||||||
|
goto tokerror
|
||||||
|
}
|
||||||
|
if tok == fflib.FFTok_right_brace {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_comma {
|
||||||
|
if wantVal == true {
|
||||||
|
// TODO(pquerna): this isn't an ideal error message, this handles
|
||||||
|
// things like [,,,] as an array value.
|
||||||
|
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
wantVal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
/* handler: tmpJBigDataNames type=string kind=string quoted=false*/
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
{
|
||||||
|
if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
|
||||||
|
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
outBuf := fs.Output.Bytes()
|
||||||
|
|
||||||
|
tmpJBigDataNames = string(string(outBuf))
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames)
|
||||||
|
|
||||||
|
wantVal = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state = fflib.FFParse_after_value
|
||||||
|
goto mainparse
|
||||||
|
|
||||||
wantedvalue:
|
wantedvalue:
|
||||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||||
wrongtokenerror:
|
wrongtokenerror:
|
||||||
|
125
vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
generated
vendored
Normal file
125
vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
// +build freebsd
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return srcPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific. On Linux, we
|
||||||
|
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||||
|
// a trailing "." or "/" which may be important.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return srcPath + string(filepath.Separator) + include
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
return p, nil // already unix-style
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
// Currently go does not fill in the major/minors
|
||||||
|
if s.Mode&unix.S_IFBLK != 0 ||
|
||||||
|
s.Mode&unix.S_IFCHR != 0 {
|
||||||
|
hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
|
||||||
|
hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
inode = s.Ino
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
}
|
||||||
|
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func major(device uint64) uint64 {
|
||||||
|
return (device >> 8) & 0xfff
|
||||||
|
}
|
||||||
|
|
||||||
|
func minor(device uint64) uint64 {
|
||||||
|
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
// cannot create a device if running in user namespace
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := uint32(hdr.Mode & 07777)
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeBlock:
|
||||||
|
mode |= unix.S_IFBLK
|
||||||
|
case tar.TypeChar:
|
||||||
|
mode |= unix.S_IFCHR
|
||||||
|
case tar.TypeFifo:
|
||||||
|
mode |= unix.S_IFIFO
|
||||||
|
}
|
||||||
|
|
||||||
|
return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
|
||||||
|
permissionsMask := hdrInfo.Mode()
|
||||||
|
if forceMask != nil {
|
||||||
|
permissionsMask = *forceMask
|
||||||
|
}
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||||
|
if err := os.Chmod(path, permissionsMask); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||||
|
if err := os.Chmod(path, permissionsMask); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
2
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !windows
|
// +build !windows,!freebsd
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
|
||||||
|
8
vendor/github.com/containers/storage/pkg/directory/directory.go
generated
vendored
8
vendor/github.com/containers/storage/pkg/directory/directory.go
generated
vendored
@ -6,9 +6,15 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DiskUsage is a structure that describes the disk usage (size and inode count)
|
||||||
|
// of a particular directory.
|
||||||
|
type DiskUsage struct {
|
||||||
|
Size int64
|
||||||
|
InodeCount int64
|
||||||
|
}
|
||||||
|
|
||||||
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
|
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
|
||||||
func MoveToSubdir(oldpath, subdir string) error {
|
func MoveToSubdir(oldpath, subdir string) error {
|
||||||
|
|
||||||
infos, err := ioutil.ReadDir(oldpath)
|
infos, err := ioutil.ReadDir(oldpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
31
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
31
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
@ -10,37 +10,50 @@ import (
|
|||||||
|
|
||||||
// Size walks a directory tree and returns its total size in bytes.
|
// Size walks a directory tree and returns its total size in bytes.
|
||||||
func Size(dir string) (size int64, err error) {
|
func Size(dir string) (size int64, err error) {
|
||||||
|
usage, err := Usage(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return usage.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
|
||||||
|
func Usage(dir string) (usage *DiskUsage, err error) {
|
||||||
|
usage = &DiskUsage{}
|
||||||
data := make(map[uint64]struct{})
|
data := make(map[uint64]struct{})
|
||||||
err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
|
err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if dir does not exist, Size() returns the error.
|
// if dir does not exist, Usage() returns the error.
|
||||||
// if dir/x disappeared while walking, Size() ignores dir/x.
|
// if dir/x disappeared while walking, Usage() ignores dir/x.
|
||||||
if os.IsNotExist(err) && d != dir {
|
if os.IsNotExist(err) && d != dir {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore directory sizes
|
|
||||||
if fileInfo == nil {
|
if fileInfo == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s := fileInfo.Size()
|
|
||||||
if fileInfo.IsDir() || s == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check inode to handle hard links correctly
|
// Check inode to handle hard links correctly
|
||||||
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
|
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
|
||||||
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
||||||
if _, exists := data[uint64(inode)]; exists {
|
if _, exists := data[uint64(inode)]; exists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
||||||
data[uint64(inode)] = struct{}{}
|
data[uint64(inode)] = struct{}{}
|
||||||
|
|
||||||
size += s
|
// Count the unique inode
|
||||||
|
usage.InodeCount++
|
||||||
|
|
||||||
|
// Ignore directory sizes
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
usage.Size += fileInfo.Size()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
15
vendor/github.com/containers/storage/pkg/directory/directory_windows.go
generated
vendored
15
vendor/github.com/containers/storage/pkg/directory/directory_windows.go
generated
vendored
@ -7,8 +7,18 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Size walks a directory tree and returns its total size in bytes.
|
// Size walks a directory tree and returns its total size in bytes
|
||||||
func Size(dir string) (size int64, err error) {
|
func Size(dir string) (size int64, err error) {
|
||||||
|
usage, err := Usage(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return usage.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
|
||||||
|
func Usage(dir string) (usage *DiskUsage, err error) {
|
||||||
|
usage = &DiskUsage{}
|
||||||
err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
|
err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if dir does not exist, Size() returns the error.
|
// if dir does not exist, Size() returns the error.
|
||||||
@ -29,7 +39,8 @@ func Size(dir string) (size int64, err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
size += s
|
usage.Size += s
|
||||||
|
usage.InodeCount++
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
2
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !linux,!darwin
|
// +build !linux,!darwin,!freebsd
|
||||||
|
|
||||||
package homedir
|
package homedir
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !linux
|
// +build !linux,!freebsd
|
||||||
|
|
||||||
package mount
|
package mount
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/pkg/system/mknod.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/system/mknod.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !windows
|
// +build !windows,!freebsd
|
||||||
|
|
||||||
package system
|
package system
|
||||||
|
|
||||||
|
22
vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
generated
vendored
Normal file
22
vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// +build freebsd
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
||||||
|
// with attributes specified by mode and dev.
|
||||||
|
func Mknod(path string, mode uint32, dev uint64) error {
|
||||||
|
return unix.Mknod(path, mode, dev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
|
||||||
|
// and minor number of the newly created device special file.
|
||||||
|
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||||
|
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
|
||||||
|
// then the top 12 bits of the minor.
|
||||||
|
func Mkdev(major int64, minor int64) uint32 {
|
||||||
|
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
|
||||||
|
}
|
125
vendor/github.com/containers/storage/store.go
generated
vendored
125
vendor/github.com/containers/storage/store.go
generated
vendored
@ -122,6 +122,30 @@ type ContainerBigDataStore interface {
|
|||||||
SetBigData(id, key string, data []byte) error
|
SetBigData(id, key string, data []byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers.
|
||||||
|
type ROLayerBigDataStore interface {
|
||||||
|
// SetBigData stores a (potentially large) piece of data associated
|
||||||
|
// with this ID.
|
||||||
|
BigData(id, key string) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// BigDataNames() returns a list of the names of previously-stored pieces of
|
||||||
|
// data.
|
||||||
|
BigDataNames(id string) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RWLayerBigDataStore wraps up how we store big-data associated with layers.
|
||||||
|
type RWLayerBigDataStore interface {
|
||||||
|
// SetBigData stores a (potentially large) piece of data associated
|
||||||
|
// with this ID.
|
||||||
|
SetBigData(id, key string, data io.Reader) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// A LayerBigDataStore wraps up how we store big-data associated with layers.
|
||||||
|
type LayerBigDataStore interface {
|
||||||
|
ROLayerBigDataStore
|
||||||
|
RWLayerBigDataStore
|
||||||
|
}
|
||||||
|
|
||||||
// A FlaggableStore can have flags set and cleared on items which it manages.
|
// A FlaggableStore can have flags set and cleared on items which it manages.
|
||||||
type FlaggableStore interface {
|
type FlaggableStore interface {
|
||||||
// ClearFlag removes a named flag from an item in the store.
|
// ClearFlag removes a named flag from an item in the store.
|
||||||
@ -385,6 +409,18 @@ type Store interface {
|
|||||||
// allow ImagesByDigest to find images by their correct digests.
|
// allow ImagesByDigest to find images by their correct digests.
|
||||||
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
|
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
|
||||||
|
|
||||||
|
// ListLayerBigData retrieves a list of the (possibly large) chunks of
|
||||||
|
// named data associated with an layer.
|
||||||
|
ListLayerBigData(id string) ([]string, error)
|
||||||
|
|
||||||
|
// LayerBigData retrieves a (possibly large) chunk of named data
|
||||||
|
// associated with a layer.
|
||||||
|
LayerBigData(id, key string) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// SetLayerBigData stores a (possibly large) chunk of named data
|
||||||
|
// associated with a layer.
|
||||||
|
SetLayerBigData(id, key string, data io.Reader) error
|
||||||
|
|
||||||
// ImageSize computes the size of the image's layers and ancillary data.
|
// ImageSize computes the size of the image's layers and ancillary data.
|
||||||
ImageSize(id string) (int64, error)
|
ImageSize(id string) (int64, error)
|
||||||
|
|
||||||
@ -1627,6 +1663,95 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
|
|||||||
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
|
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListLayerBigData retrieves a list of the (possibly large) chunks of
|
||||||
|
// named data associated with an layer.
|
||||||
|
func (s *store) ListLayerBigData(id string) ([]string, error) {
|
||||||
|
lstore, err := s.LayerStore()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lstores, err := s.ROLayerStores()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
foundLayer := false
|
||||||
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
|
store := s
|
||||||
|
store.RLock()
|
||||||
|
defer store.Unlock()
|
||||||
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
|
if err = store.Load(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data, err := store.BigDataNames(id)
|
||||||
|
if err == nil {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if store.Exists(id) {
|
||||||
|
foundLayer = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundLayer {
|
||||||
|
return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id)
|
||||||
|
}
|
||||||
|
return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerBigData retrieves a (possibly large) chunk of named data
|
||||||
|
// associated with a layer.
|
||||||
|
func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) {
|
||||||
|
lstore, err := s.LayerStore()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lstores, err := s.ROLayerStores()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
foundLayer := false
|
||||||
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
|
store := s
|
||||||
|
store.RLock()
|
||||||
|
defer store.Unlock()
|
||||||
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
|
if err = store.Load(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data, err := store.BigData(id, key)
|
||||||
|
if err == nil {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if store.Exists(id) {
|
||||||
|
foundLayer = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundLayer {
|
||||||
|
return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id)
|
||||||
|
}
|
||||||
|
return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLayerBigData stores a (possibly large) chunk of named data
|
||||||
|
// associated with a layer.
|
||||||
|
func (s *store) SetLayerBigData(id, key string, data io.Reader) error {
|
||||||
|
store, err := s.LayerStore()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store.Lock()
|
||||||
|
defer store.Unlock()
|
||||||
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
|
if err = store.Load(); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return store.SetBigData(id, key, data)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
||||||
ristore, err := s.ImageStore()
|
ristore, err := s.ImageStore()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
3
vendor/github.com/mattn/go-shellwords/.travis.yml
generated
vendored
3
vendor/github.com/mattn/go-shellwords/.travis.yml
generated
vendored
@ -1,3 +1,6 @@
|
|||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64le
|
||||||
language: go
|
language: go
|
||||||
sudo: false
|
sudo: false
|
||||||
go:
|
go:
|
||||||
|
9
vendor/github.com/mattn/go-shellwords/README.md
generated
vendored
9
vendor/github.com/mattn/go-shellwords/README.md
generated
vendored
@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
[](https://codecov.io/gh/mattn/go-shellwords)
|
[](https://codecov.io/gh/mattn/go-shellwords)
|
||||||
[](https://travis-ci.org/mattn/go-shellwords)
|
[](https://travis-ci.org/mattn/go-shellwords)
|
||||||
[](http://godoc.org/github.com/mattn/go-shellwords)
|
[](https://pkg.go.dev/github.com/mattn/go-shellwords)
|
||||||
|
[](https://github.com/mattn/go-shellwords/actions)
|
||||||
|
|
||||||
Parse line as shell words.
|
Parse line as shell words.
|
||||||
|
|
||||||
@ -13,6 +14,12 @@ args, err := shellwords.Parse("./foo --bar=baz")
|
|||||||
// args should be ["./foo", "--bar=baz"]
|
// args should be ["./foo", "--bar=baz"]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz")
|
||||||
|
// envs should be ["FOO=foo", "BAR=baz"]
|
||||||
|
// args should be ["./foo", "--bar=baz"]
|
||||||
|
```
|
||||||
|
|
||||||
```go
|
```go
|
||||||
os.Setenv("FOO", "bar")
|
os.Setenv("FOO", "bar")
|
||||||
p := shellwords.NewParser()
|
p := shellwords.NewParser()
|
||||||
|
146
vendor/github.com/mattn/go-shellwords/shellwords.go
generated
vendored
146
vendor/github.com/mattn/go-shellwords/shellwords.go
generated
vendored
@ -1,10 +1,11 @@
|
|||||||
package shellwords
|
package shellwords
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -12,8 +13,6 @@ var (
|
|||||||
ParseBacktick bool = false
|
ParseBacktick bool = false
|
||||||
)
|
)
|
||||||
|
|
||||||
var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`)
|
|
||||||
|
|
||||||
func isSpace(r rune) bool {
|
func isSpace(r rune) bool {
|
||||||
switch r {
|
switch r {
|
||||||
case ' ', '\t', '\r', '\n':
|
case ' ', '\t', '\r', '\n':
|
||||||
@ -27,13 +26,72 @@ func replaceEnv(getenv func(string) string, s string) string {
|
|||||||
getenv = os.Getenv
|
getenv = os.Getenv
|
||||||
}
|
}
|
||||||
|
|
||||||
return envRe.ReplaceAllStringFunc(s, func(s string) string {
|
var buf bytes.Buffer
|
||||||
s = s[1:]
|
rs := []rune(s)
|
||||||
if s[0] == '{' {
|
for i := 0; i < len(rs); i++ {
|
||||||
s = s[1 : len(s)-1]
|
r := rs[i]
|
||||||
|
if r == '\\' {
|
||||||
|
i++
|
||||||
|
if i == len(rs) {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
return getenv(s)
|
buf.WriteRune(rs[i])
|
||||||
})
|
continue
|
||||||
|
} else if r == '$' {
|
||||||
|
i++
|
||||||
|
if i == len(rs) {
|
||||||
|
buf.WriteRune(r)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if rs[i] == 0x7b {
|
||||||
|
i++
|
||||||
|
p := i
|
||||||
|
for ; i < len(rs); i++ {
|
||||||
|
r = rs[i]
|
||||||
|
if r == '\\' {
|
||||||
|
i++
|
||||||
|
if i == len(rs) {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r != 0x7d {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
if i > p {
|
||||||
|
buf.WriteString(getenv(s[p:i]))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p := i
|
||||||
|
for ; i < len(rs); i++ {
|
||||||
|
r := rs[i]
|
||||||
|
if r == '\\' {
|
||||||
|
i++
|
||||||
|
if i == len(rs) {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i > p {
|
||||||
|
buf.WriteString(getenv(s[p:i]))
|
||||||
|
i--
|
||||||
|
} else {
|
||||||
|
buf.WriteString(s[p:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
buf.WriteRune(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
type Parser struct {
|
type Parser struct {
|
||||||
@ -56,6 +114,14 @@ func NewParser() *Parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type argType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
argNo argType = iota
|
||||||
|
argSingle
|
||||||
|
argQuoted
|
||||||
|
)
|
||||||
|
|
||||||
func (p *Parser) Parse(line string) ([]string, error) {
|
func (p *Parser) Parse(line string) ([]string, error) {
|
||||||
args := []string{}
|
args := []string{}
|
||||||
buf := ""
|
buf := ""
|
||||||
@ -63,13 +129,16 @@ func (p *Parser) Parse(line string) ([]string, error) {
|
|||||||
backtick := ""
|
backtick := ""
|
||||||
|
|
||||||
pos := -1
|
pos := -1
|
||||||
got := false
|
got := argNo
|
||||||
|
|
||||||
|
i := -1
|
||||||
loop:
|
loop:
|
||||||
for i, r := range line {
|
for _, r := range line {
|
||||||
|
i++
|
||||||
if escaped {
|
if escaped {
|
||||||
buf += string(r)
|
buf += string(r)
|
||||||
escaped = false
|
escaped = false
|
||||||
|
got = argSingle
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,21 +155,23 @@ loop:
|
|||||||
if singleQuoted || doubleQuoted || backQuote || dollarQuote {
|
if singleQuoted || doubleQuoted || backQuote || dollarQuote {
|
||||||
buf += string(r)
|
buf += string(r)
|
||||||
backtick += string(r)
|
backtick += string(r)
|
||||||
} else if got {
|
} else if got != argNo {
|
||||||
if p.ParseEnv {
|
if p.ParseEnv {
|
||||||
|
if got == argSingle {
|
||||||
parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir}
|
parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir}
|
||||||
strs, err := parser.Parse(replaceEnv(p.Getenv, buf))
|
strs, err := parser.Parse(replaceEnv(p.Getenv, buf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, str := range strs {
|
args = append(args, strs...)
|
||||||
args = append(args, str)
|
} else {
|
||||||
|
args = append(args, replaceEnv(p.Getenv, buf))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
args = append(args, buf)
|
args = append(args, buf)
|
||||||
}
|
}
|
||||||
buf = ""
|
buf = ""
|
||||||
got = false
|
got = argNo
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -153,7 +224,7 @@ loop:
|
|||||||
case '"':
|
case '"':
|
||||||
if !singleQuoted && !dollarQuote {
|
if !singleQuoted && !dollarQuote {
|
||||||
if doubleQuoted {
|
if doubleQuoted {
|
||||||
got = true
|
got = argQuoted
|
||||||
}
|
}
|
||||||
doubleQuoted = !doubleQuoted
|
doubleQuoted = !doubleQuoted
|
||||||
continue
|
continue
|
||||||
@ -161,7 +232,7 @@ loop:
|
|||||||
case '\'':
|
case '\'':
|
||||||
if !doubleQuoted && !dollarQuote {
|
if !doubleQuoted && !dollarQuote {
|
||||||
if singleQuoted {
|
if singleQuoted {
|
||||||
got = true
|
got = argSingle
|
||||||
}
|
}
|
||||||
singleQuoted = !singleQuoted
|
singleQuoted = !singleQuoted
|
||||||
continue
|
continue
|
||||||
@ -171,7 +242,7 @@ loop:
|
|||||||
if r == '>' && len(buf) > 0 {
|
if r == '>' && len(buf) > 0 {
|
||||||
if c := buf[0]; '0' <= c && c <= '9' {
|
if c := buf[0]; '0' <= c && c <= '9' {
|
||||||
i -= 1
|
i -= 1
|
||||||
got = false
|
got = argNo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pos = i
|
pos = i
|
||||||
@ -179,22 +250,24 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
got = true
|
got = argSingle
|
||||||
buf += string(r)
|
buf += string(r)
|
||||||
if backQuote || dollarQuote {
|
if backQuote || dollarQuote {
|
||||||
backtick += string(r)
|
backtick += string(r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if got {
|
if got != argNo {
|
||||||
if p.ParseEnv {
|
if p.ParseEnv {
|
||||||
|
if got == argSingle {
|
||||||
parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir}
|
parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir}
|
||||||
strs, err := parser.Parse(replaceEnv(p.Getenv, buf))
|
strs, err := parser.Parse(replaceEnv(p.Getenv, buf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, str := range strs {
|
args = append(args, strs...)
|
||||||
args = append(args, str)
|
} else {
|
||||||
|
args = append(args, replaceEnv(p.Getenv, buf))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
args = append(args, buf)
|
args = append(args, buf)
|
||||||
@ -210,6 +283,35 @@ loop:
|
|||||||
return args, nil
|
return args, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) {
|
||||||
|
_args, err := p.Parse(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
envs = []string{}
|
||||||
|
args = []string{}
|
||||||
|
parsingEnv := true
|
||||||
|
for _, arg := range _args {
|
||||||
|
if parsingEnv && isEnv(arg) {
|
||||||
|
envs = append(envs, arg)
|
||||||
|
} else {
|
||||||
|
if parsingEnv {
|
||||||
|
parsingEnv = false
|
||||||
|
}
|
||||||
|
args = append(args, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return envs, args, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEnv(arg string) bool {
|
||||||
|
return len(strings.Split(arg, "=")) == 2
|
||||||
|
}
|
||||||
|
|
||||||
func Parse(line string) ([]string, error) {
|
func Parse(line string) ([]string, error) {
|
||||||
return NewParser().Parse(line)
|
return NewParser().Parse(line)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ParseWithEnvs(line string) (envs []string, args []string, err error) {
|
||||||
|
return NewParser().ParseWithEnvs(line)
|
||||||
|
}
|
||||||
|
4
vendor/github.com/mattn/go-shellwords/util_posix.go
generated
vendored
4
vendor/github.com/mattn/go-shellwords/util_posix.go
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
package shellwords
|
package shellwords
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
@ -23,7 +23,7 @@ func shellRun(line, dir string) (string, error) {
|
|||||||
if eerr, ok := err.(*exec.ExitError); ok {
|
if eerr, ok := err.(*exec.ExitError); ok {
|
||||||
b = eerr.Stderr
|
b = eerr.Stderr
|
||||||
}
|
}
|
||||||
return "", errors.New(err.Error() + ":" + string(b))
|
return "", fmt.Errorf("%s: %w", string(b), err)
|
||||||
}
|
}
|
||||||
return strings.TrimSpace(string(b)), nil
|
return strings.TrimSpace(string(b)), nil
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/mattn/go-shellwords/util_windows.go
generated
vendored
4
vendor/github.com/mattn/go-shellwords/util_windows.go
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
package shellwords
|
package shellwords
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
@ -23,7 +23,7 @@ func shellRun(line, dir string) (string, error) {
|
|||||||
if eerr, ok := err.(*exec.ExitError); ok {
|
if eerr, ok := err.(*exec.ExitError); ok {
|
||||||
b = eerr.Stderr
|
b = eerr.Stderr
|
||||||
}
|
}
|
||||||
return "", errors.New(err.Error() + ":" + string(b))
|
return "", fmt.Errorf("%s: %w", string(b), err)
|
||||||
}
|
}
|
||||||
return strings.TrimSpace(string(b)), nil
|
return strings.TrimSpace(string(b)), nil
|
||||||
}
|
}
|
||||||
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -108,7 +108,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7
|
|||||||
github.com/containers/ocicrypt/spec
|
github.com/containers/ocicrypt/spec
|
||||||
github.com/containers/ocicrypt/utils
|
github.com/containers/ocicrypt/utils
|
||||||
github.com/containers/ocicrypt/utils/keyprovider
|
github.com/containers/ocicrypt/utils/keyprovider
|
||||||
# github.com/containers/storage v1.24.5
|
# github.com/containers/storage v1.25.0
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
github.com/containers/storage/drivers/aufs
|
github.com/containers/storage/drivers/aufs
|
||||||
@ -235,7 +235,7 @@ github.com/kr/pretty
|
|||||||
github.com/kr/text
|
github.com/kr/text
|
||||||
# github.com/mattn/go-runewidth v0.0.9
|
# github.com/mattn/go-runewidth v0.0.9
|
||||||
github.com/mattn/go-runewidth
|
github.com/mattn/go-runewidth
|
||||||
# github.com/mattn/go-shellwords v1.0.10
|
# github.com/mattn/go-shellwords v1.0.11
|
||||||
github.com/mattn/go-shellwords
|
github.com/mattn/go-shellwords
|
||||||
# github.com/matttproud/golang_protobuf_extensions v1.0.1
|
# github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||||
|
Loading…
Reference in New Issue
Block a user