bump containers/image to 2541165

Signed-off-by: Paul Fisher <pfisher@lyft.com>
This commit is contained in:
Paul Fisher
2021-10-21 13:07:02 -07:00
parent 8f64c0412f
commit c8777f3bf7
31 changed files with 174 additions and 1595 deletions

11
go.mod
View File

@@ -3,16 +3,12 @@ module github.com/containers/skopeo
go 1.12 go 1.12
require ( require (
github.com/bits-and-blooms/bitset v1.2.1 // indirect
github.com/containerd/containerd v1.5.7 // indirect
github.com/containers/common v0.46.0 github.com/containers/common v0.46.0
github.com/containers/image/v5 v5.16.1 github.com/containers/image/v5 v5.16.2-0.20211021181114-25411654075f
github.com/containers/ocicrypt v1.1.2 github.com/containers/ocicrypt v1.1.2
github.com/containers/storage v1.37.0 github.com/containers/storage v1.37.0
github.com/docker/docker v20.10.9+incompatible github.com/docker/docker v20.10.9+incompatible
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
github.com/opencontainers/image-tools v1.0.0-rc3 github.com/opencontainers/image-tools v1.0.0-rc3
@@ -23,11 +19,6 @@ require (
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b // indirect
golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20211005153810-c76a74d43a8e // indirect
google.golang.org/grpc v1.41.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )

11
go.sum
View File

@@ -74,7 +74,6 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.22 h1:CulZ3GW8sNJExknToo+RWD+U+6ZM5kkNfuxywSDPd08= github.com/Microsoft/hcsshim v0.8.22 h1:CulZ3GW8sNJExknToo+RWD+U+6ZM5kkNfuxywSDPd08=
@@ -110,8 +109,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.2.1 h1:M+/hrU9xlMp7t4TyTDQW97d3tRPVuKFC6zBEK16QnXY=
github.com/bits-and-blooms/bitset v1.2.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
@@ -177,7 +174,6 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw=
github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM=
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -232,8 +228,8 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
github.com/containers/common v0.46.0 h1:95zB7kYBQJW+aK5xxZnaobCwoPyYOf85Y0yUx0E5aRg= github.com/containers/common v0.46.0 h1:95zB7kYBQJW+aK5xxZnaobCwoPyYOf85Y0yUx0E5aRg=
github.com/containers/common v0.46.0/go.mod h1:zxv7KjdYddSGoWuLUVp6eSb++Ow1zmSMB2jwxuNB4cU= github.com/containers/common v0.46.0/go.mod h1:zxv7KjdYddSGoWuLUVp6eSb++Ow1zmSMB2jwxuNB4cU=
github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4= github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4=
github.com/containers/image/v5 v5.16.1 h1:4786k48/af3dOkVf9EM+xB880ArkXalICsGC4AXC6to= github.com/containers/image/v5 v5.16.2-0.20211021181114-25411654075f h1:gqeQG8jumo9u6TmyUPrSCuflSjkQX+zFa5bRJAZYz2g=
github.com/containers/image/v5 v5.16.1/go.mod h1:mCvIFdzyyP1B0NBcZ80OIuaYqFn/OpFpaOMOMn1kU2M= github.com/containers/image/v5 v5.16.2-0.20211021181114-25411654075f/go.mod h1:ERFkrXC5lLXov65ia/NiKBKfEeHgiItB9bQz6fReQ7g=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -652,8 +648,9 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
github.com/opencontainers/selinux v1.8.5 h1:OkT6bMHOQ1JQQO4ihjQ49sj0+wciDcjziSVTRn8VeTA=
github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
github.com/opencontainers/selinux v1.9.1 h1:b4VPEF3O5JLZgdTDBmGepaaIbAo0GqoF6EBRq5f/g3Y=
github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=

View File

@@ -1,26 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
target

View File

@@ -1,37 +0,0 @@
language: go
sudo: false
branches:
except:
- release
branches:
only:
- master
- travis
go:
- "1.11.x"
- tip
matrix:
allow_failures:
- go: tip
before_install:
- if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
- if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
- go get github.com/mattn/goveralls
before_script:
- make deps
script:
- make qa
after_failure:
- cat ./target/test/report.xml
after_success:
- if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;

View File

@@ -1,27 +0,0 @@
Copyright (c) 2014 Will Fitzgerald. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,93 +0,0 @@
# bitset
*Go language library to map between non-negative integers and boolean values*
[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest)
[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
## Description
Package bitset implements bitsets, a mapping between non-negative integers and boolean values.
It should be more efficient than map[uint] bool.
It provides methods for setting, clearing, flipping, and testing individual integers.
But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits.
BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used.
Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining.
### Example use:
```go
package main
import (
"fmt"
"math/rand"
"github.com/bits-and-blooms/bitset"
)
func main() {
fmt.Printf("Hello from BitSet!\n")
var b bitset.BitSet
// play some Go Fish
for i := 0; i < 100; i++ {
card1 := uint(rand.Intn(52))
card2 := uint(rand.Intn(52))
b.Set(card1)
if b.Test(card2) {
fmt.Println("Go Fish!")
}
b.Clear(card1)
}
// Chaining
b.Set(10).Set(11)
for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) {
fmt.Println("The following bit is set:", i)
}
if b.Intersection(bitset.New(100).Set(10)).Count() == 1 {
fmt.Println("Intersection works.")
} else {
fmt.Println("Intersection doesn't work???")
}
}
```
As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets.
Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
## Memory Usage
The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring).
## Implementation Note
Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed.
It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped.
## Installation
```bash
go get github.com/bits-and-blooms/bitset
```
## Contributing
If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)")
## Running all tests
Before committing the code, please check if it passes tests, has adequate coverage, etc.
```bash
go test
go test -cover
```

View File

@@ -1,39 +0,0 @@
# Go
# Build your Go project.
# Add steps that test, save build artifacts, deploy, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
trigger:
- master
pool:
vmImage: 'Ubuntu-16.04'
variables:
GOBIN: '$(GOPATH)/bin' # Go binaries path
GOROOT: '/usr/local/go1.11' # Go installation path
GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
steps:
- script: |
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
displayName: 'Set up the Go workspace'
- script: |
go version
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
go build -v .
workingDirectory: '$(modulePath)'
displayName: 'Get dependencies, then build'

View File

@@ -1,954 +0,0 @@
/*
Package bitset implements bitsets, a mapping
between non-negative integers and boolean values. It should be more
efficient than map[uint] bool.
It provides methods for setting, clearing, flipping, and testing
individual integers.
But it also provides set intersection, union, difference,
complement, and symmetric operations, as well as tests to
check whether any, all, or no bits are set, and querying a
bitset's current length and number of positive bits.
BitSets are expanded to the size of the largest set bit; the
memory allocation is approximately Max bits, where Max is
the largest set bit. BitSets are never shrunk. On creation,
a hint can be given for the number of bits that will be used.
Many of the methods, including Set,Clear, and Flip, return
a BitSet pointer, which allows for chaining.
Example use:
import "bitset"
var b BitSet
b.Set(10).Set(11)
if b.Test(1000) {
b.Clear(1000)
}
if B.Intersection(bitset.New(100).Set(10)).Count() > 1 {
fmt.Println("Intersection works.")
}
As an alternative to BitSets, one should check out the 'big' package,
which provides a (less set-theoretical) view of bitsets.
*/
package bitset
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"strconv"
)
// the wordSize of a bit set
const wordSize = uint(64)
// log2WordSize is lg(wordSize)
const log2WordSize = uint(6)
// allBits has every bit set
const allBits uint64 = 0xffffffffffffffff
// default binary BigEndian
var binaryOrder binary.ByteOrder = binary.BigEndian
// default json encoding base64.URLEncoding
var base64Encoding = base64.URLEncoding
// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding)
func Base64StdEncoding() { base64Encoding = base64.StdEncoding }
// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian)
func LittleEndian() { binaryOrder = binary.LittleEndian }
// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0.
type BitSet struct {
length uint
set []uint64
}
// Error is used to distinguish errors (panics) generated in this package.
type Error string
// safeSet will fixup b.set to be non-nil and return the field value
func (b *BitSet) safeSet() []uint64 {
if b.set == nil {
b.set = make([]uint64, wordsNeeded(0))
}
return b.set
}
// From is a constructor used to create a BitSet from an array of integers
func From(buf []uint64) *BitSet {
return &BitSet{uint(len(buf)) * 64, buf}
}
// Bytes returns the bitset as array of integers
func (b *BitSet) Bytes() []uint64 {
return b.set
}
// wordsNeeded calculates the number of words needed for i bits
func wordsNeeded(i uint) int {
if i > (Cap() - wordSize + 1) {
return int(Cap() >> log2WordSize)
}
return int((i + (wordSize - 1)) >> log2WordSize)
}
// New creates a new BitSet with a hint that length bits will be required
func New(length uint) (bset *BitSet) {
defer func() {
if r := recover(); r != nil {
bset = &BitSet{
0,
make([]uint64, 0),
}
}
}()
bset = &BitSet{
length,
make([]uint64, wordsNeeded(length)),
}
return bset
}
// Cap returns the total possible capacity, or number of bits
func Cap() uint {
return ^uint(0)
}
// Len returns the number of bits in the BitSet.
// Note the difference to method Count, see example.
func (b *BitSet) Len() uint {
return b.length
}
// extendSetMaybe adds additional words to incorporate new bits if needed
func (b *BitSet) extendSetMaybe(i uint) {
if i >= b.length { // if we need more bits, make 'em
if i >= Cap() {
panic("You are exceeding the capacity")
}
nsize := wordsNeeded(i + 1)
if b.set == nil {
b.set = make([]uint64, nsize)
} else if cap(b.set) >= nsize {
b.set = b.set[:nsize] // fast resize
} else if len(b.set) < nsize {
newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x
copy(newset, b.set)
b.set = newset
}
b.length = i + 1
}
}
// Test whether bit i is set.
func (b *BitSet) Test(i uint) bool {
if i >= b.length {
return false
}
return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0
}
// Set bit i to 1, the capacity of the bitset is automatically
// increased accordingly.
// If i>= Cap(), this function will panic.
// Warning: using a very large value for 'i'
// may lead to a memory shortage and a panic: the caller is responsible
// for providing sensible parameters in line with their memory capacity.
func (b *BitSet) Set(i uint) *BitSet {
b.extendSetMaybe(i)
b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1))
return b
}
// Clear bit i to 0
func (b *BitSet) Clear(i uint) *BitSet {
if i >= b.length {
return b
}
b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1))
return b
}
// SetTo sets bit i to value.
// If i>= Cap(), this function will panic.
// Warning: using a very large value for 'i'
// may lead to a memory shortage and a panic: the caller is responsible
// for providing sensible parameters in line with their memory capacity.
func (b *BitSet) SetTo(i uint, value bool) *BitSet {
if value {
return b.Set(i)
}
return b.Clear(i)
}
// Flip bit at i.
// If i>= Cap(), this function will panic.
// Warning: using a very large value for 'i'
// may lead to a memory shortage and a panic: the caller is responsible
// for providing sensible parameters in line with their memory capacity.
func (b *BitSet) Flip(i uint) *BitSet {
if i >= b.length {
return b.Set(i)
}
b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1))
return b
}
// FlipRange bit in [start, end).
// If end>= Cap(), this function will panic.
// Warning: using a very large value for 'end'
// may lead to a memory shortage and a panic: the caller is responsible
// for providing sensible parameters in line with their memory capacity.
func (b *BitSet) FlipRange(start, end uint) *BitSet {
if start >= end {
return b
}
b.extendSetMaybe(end - 1)
var startWord uint = start >> log2WordSize
var endWord uint = end >> log2WordSize
b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1)))
for i := startWord; i < endWord; i++ {
b.set[i] = ^b.set[i]
}
b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1))
return b
}
// Shrink shrinks BitSet so that the provided value is the last possible
// set value. It clears all bits > the provided index and reduces the size
// and length of the set.
//
// Note that the parameter value is not the new length in bits: it is the
// maximal value that can be stored in the bitset after the function call.
// The new length in bits is the parameter value + 1. Thus it is not possible
// to use this function to set the length to 0, the minimal value of the length
// after this function call is 1.
//
// A new slice is allocated to store the new bits, so you may see an increase in
// memory usage until the GC runs. Normally this should not be a problem, but if you
// have an extremely large BitSet its important to understand that the old BitSet will
// remain in memory until the GC frees it.
func (b *BitSet) Shrink(lastbitindex uint) *BitSet {
length := lastbitindex + 1
idx := wordsNeeded(length)
if idx > len(b.set) {
return b
}
shrunk := make([]uint64, idx)
copy(shrunk, b.set[:idx])
b.set = shrunk
b.length = length
if length < 64 {
b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1))))
}
return b
}
// Compact shrinks BitSet to so that we preserve all set bits, while minimizing
// memory usage. Compact calls Shrink.
func (b *BitSet) Compact() *BitSet {
idx := len(b.set) - 1
for ; idx >= 0 && b.set[idx] == 0; idx-- {
}
newlength := uint((idx + 1) << log2WordSize)
if newlength >= b.length {
return b // nothing to do
}
if newlength > 0 {
return b.Shrink(newlength - 1)
}
// We preserve one word
return b.Shrink(63)
}
// InsertAt takes an index which indicates where a bit should be
// inserted. Then it shifts all the bits in the set to the left by 1, starting
// from the given index position, and sets the index position to 0.
//
// Depending on the size of your BitSet, and where you are inserting the new entry,
// this method could be extremely slow and in some cases might cause the entire BitSet
// to be recopied.
func (b *BitSet) InsertAt(idx uint) *BitSet {
insertAtElement := (idx >> log2WordSize)
// if length of set is a multiple of wordSize we need to allocate more space first
if b.isLenExactMultiple() {
b.set = append(b.set, uint64(0))
}
var i uint
for i = uint(len(b.set) - 1); i > insertAtElement; i-- {
// all elements above the position where we want to insert can simply by shifted
b.set[i] <<= 1
// we take the most significant bit of the previous element and set it as
// the least significant bit of the current element
b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63
}
// generate a mask to extract the data that we need to shift left
// within the element where we insert a bit
dataMask := ^(uint64(1)<<uint64(idx&(wordSize-1)) - 1)
// extract that data that we'll shift
data := b.set[i] & dataMask
// set the positions of the data mask to 0 in the element where we insert
b.set[i] &= ^dataMask
// shift data mask to the left and insert its data to the slice element
b.set[i] |= data << 1
// add 1 to length of BitSet
b.length++
return b
}
// String creates a string representation of the Bitmap
func (b *BitSet) String() string {
// follows code from https://github.com/RoaringBitmap/roaring
var buffer bytes.Buffer
start := []byte("{")
buffer.Write(start)
counter := 0
i, e := b.NextSet(0)
for e {
counter = counter + 1
// to avoid exhausting the memory
if counter > 0x40000 {
buffer.WriteString("...")
break
}
buffer.WriteString(strconv.FormatInt(int64(i), 10))
i, e = b.NextSet(i + 1)
if e {
buffer.WriteString(",")
}
}
buffer.WriteString("}")
return buffer.String()
}
// DeleteAt deletes the bit at the given index position from
// within the bitset
// All the bits residing on the left of the deleted bit get
// shifted right by 1
// The running time of this operation may potentially be
// relatively slow, O(length)
func (b *BitSet) DeleteAt(i uint) *BitSet {
// the index of the slice element where we'll delete a bit
deleteAtElement := i >> log2WordSize
// generate a mask for the data that needs to be shifted right
// within that slice element that gets modified
dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1)
// extract the data that we'll shift right from the slice element
data := b.set[deleteAtElement] & dataMask
// set the masked area to 0 while leaving the rest as it is
b.set[deleteAtElement] &= ^dataMask
// shift the previously extracted data to the right and then
// set it in the previously masked area
b.set[deleteAtElement] |= (data >> 1) & dataMask
// loop over all the consecutive slice elements to copy each
// lowest bit into the highest position of the previous element,
// then shift the entire content to the right by 1
for i := int(deleteAtElement) + 1; i < len(b.set); i++ {
b.set[i-1] |= (b.set[i] & 1) << 63
b.set[i] >>= 1
}
b.length = b.length - 1
return b
}
// NextSet returns the next bit set from the specified index,
// including possibly the current index
// along with an error code (true = valid, false = no set bit found)
// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...}
//
// Users concerned with performance may want to use NextSetMany to
// retrieve several values at once.
func (b *BitSet) NextSet(i uint) (uint, bool) {
x := int(i >> log2WordSize)
if x >= len(b.set) {
return 0, false
}
w := b.set[x]
w = w >> (i & (wordSize - 1))
if w != 0 {
return i + trailingZeroes64(w), true
}
x = x + 1
for x < len(b.set) {
if b.set[x] != 0 {
return uint(x)*wordSize + trailingZeroes64(b.set[x]), true
}
x = x + 1
}
return 0, false
}
// NextSetMany returns many next bit sets from the specified index,
// including possibly the current index and up to cap(buffer).
// If the returned slice has len zero, then no more set bits were found
//
// buffer := make([]uint, 256) // this should be reused
// j := uint(0)
// j, buffer = bitmap.NextSetMany(j, buffer)
// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) {
// for k := range buffer {
// do something with buffer[k]
// }
// j += 1
// }
//
//
// It is possible to retrieve all set bits as follow:
//
// indices := make([]uint, bitmap.Count())
// bitmap.NextSetMany(0, indices)
//
// However if bitmap.Count() is large, it might be preferable to
// use several calls to NextSetMany, for performance reasons.
func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) {
myanswer := buffer
capacity := cap(buffer)
x := int(i >> log2WordSize)
if x >= len(b.set) || capacity == 0 {
return 0, myanswer[:0]
}
skip := i & (wordSize - 1)
word := b.set[x] >> skip
myanswer = myanswer[:capacity]
size := int(0)
for word != 0 {
r := trailingZeroes64(word)
t := word & ((^word) + 1)
myanswer[size] = r + i
size++
if size == capacity {
goto End
}
word = word ^ t
}
x++
for idx, word := range b.set[x:] {
for word != 0 {
r := trailingZeroes64(word)
t := word & ((^word) + 1)
myanswer[size] = r + (uint(x+idx) << 6)
size++
if size == capacity {
goto End
}
word = word ^ t
}
}
End:
if size > 0 {
return myanswer[size-1], myanswer[:size]
}
return 0, myanswer[:0]
}
// NextClear returns the next clear bit from the specified index,
// including possibly the current index
// along with an error code (true = valid, false = no bit found i.e. all bits are set)
func (b *BitSet) NextClear(i uint) (uint, bool) {
x := int(i >> log2WordSize)
if x >= len(b.set) {
return 0, false
}
w := b.set[x]
w = w >> (i & (wordSize - 1))
wA := allBits >> (i & (wordSize - 1))
index := i + trailingZeroes64(^w)
if w != wA && index < b.length {
return index, true
}
x++
for x < len(b.set) {
index = uint(x)*wordSize + trailingZeroes64(^b.set[x])
if b.set[x] != allBits && index < b.length {
return index, true
}
x++
}
return 0, false
}
// ClearAll clears the entire BitSet
func (b *BitSet) ClearAll() *BitSet {
if b != nil && b.set != nil {
for i := range b.set {
b.set[i] = 0
}
}
return b
}
// wordCount returns the number of words used in a bit set
func (b *BitSet) wordCount() int {
return len(b.set)
}
// Clone this BitSet
func (b *BitSet) Clone() *BitSet {
c := New(b.length)
if b.set != nil { // Clone should not modify current object
copy(c.set, b.set)
}
return c
}
// Copy into a destination BitSet
// Returning the size of the destination BitSet
// like array copy
func (b *BitSet) Copy(c *BitSet) (count uint) {
if c == nil {
return
}
if b.set != nil { // Copy should not modify current object
copy(c.set, b.set)
}
count = c.length
if b.length < c.length {
count = b.length
}
return
}
// Count (number of set bits).
// Also known as "popcount" or "population count".
func (b *BitSet) Count() uint {
if b != nil && b.set != nil {
return uint(popcntSlice(b.set))
}
return 0
}
// Equal tests the equivalence of two BitSets.
// False if they are of different sizes, otherwise true
// only if all the same bits are set
func (b *BitSet) Equal(c *BitSet) bool {
if c == nil || b == nil {
return c == b
}
if b.length != c.length {
return false
}
if b.length == 0 { // if they have both length == 0, then could have nil set
return true
}
// testing for equality shoud not transform the bitset (no call to safeSet)
for p, v := range b.set {
if c.set[p] != v {
return false
}
}
return true
}
func panicIfNull(b *BitSet) {
if b == nil {
panic(Error("BitSet must not be null"))
}
}
// Difference of base set and other set
// This is the BitSet equivalent of &^ (and not)
func (b *BitSet) Difference(compare *BitSet) (result *BitSet) {
panicIfNull(b)
panicIfNull(compare)
result = b.Clone() // clone b (in case b is bigger than compare)
l := int(compare.wordCount())
if l > int(b.wordCount()) {
l = int(b.wordCount())
}
for i := 0; i < l; i++ {
result.set[i] = b.set[i] &^ compare.set[i]
}
return
}
// DifferenceCardinality computes the cardinality of the differnce
func (b *BitSet) DifferenceCardinality(compare *BitSet) uint {
panicIfNull(b)
panicIfNull(compare)
l := int(compare.wordCount())
if l > int(b.wordCount()) {
l = int(b.wordCount())
}
cnt := uint64(0)
cnt += popcntMaskSlice(b.set[:l], compare.set[:l])
cnt += popcntSlice(b.set[l:])
return uint(cnt)
}
// InPlaceDifference computes the difference of base set and other set
// This is the BitSet equivalent of &^ (and not)
func (b *BitSet) InPlaceDifference(compare *BitSet) {
panicIfNull(b)
panicIfNull(compare)
l := int(compare.wordCount())
if l > int(b.wordCount()) {
l = int(b.wordCount())
}
for i := 0; i < l; i++ {
b.set[i] &^= compare.set[i]
}
}
// Convenience function: return two bitsets ordered by
// increasing length. Note: neither can be nil
func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) {
if a.length <= b.length {
ap, bp = a, b
} else {
ap, bp = b, a
}
return
}
// Intersection of base set and other set
// This is the BitSet equivalent of & (and)
func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) {
panicIfNull(b)
panicIfNull(compare)
b, compare = sortByLength(b, compare)
result = New(b.length)
for i, word := range b.set {
result.set[i] = word & compare.set[i]
}
return
}
// IntersectionCardinality computes the cardinality of the union
func (b *BitSet) IntersectionCardinality(compare *BitSet) uint {
panicIfNull(b)
panicIfNull(compare)
b, compare = sortByLength(b, compare)
cnt := popcntAndSlice(b.set, compare.set)
return uint(cnt)
}
// InPlaceIntersection destructively computes the intersection of
// base set and the compare set.
// This is the BitSet equivalent of & (and)
func (b *BitSet) InPlaceIntersection(compare *BitSet) {
panicIfNull(b)
panicIfNull(compare)
l := int(compare.wordCount())
if l > int(b.wordCount()) {
l = int(b.wordCount())
}
for i := 0; i < l; i++ {
b.set[i] &= compare.set[i]
}
for i := l; i < len(b.set); i++ {
b.set[i] = 0
}
if compare.length > 0 {
b.extendSetMaybe(compare.length - 1)
}
}
// Union of base set and other set
// This is the BitSet equivalent of | (or)
func (b *BitSet) Union(compare *BitSet) (result *BitSet) {
panicIfNull(b)
panicIfNull(compare)
b, compare = sortByLength(b, compare)
result = compare.Clone()
for i, word := range b.set {
result.set[i] = word | compare.set[i]
}
return
}
// UnionCardinality computes the cardinality of the uniton of the base set
// and the compare set.
func (b *BitSet) UnionCardinality(compare *BitSet) uint {
panicIfNull(b)
panicIfNull(compare)
b, compare = sortByLength(b, compare)
cnt := popcntOrSlice(b.set, compare.set)
if len(compare.set) > len(b.set) {
cnt += popcntSlice(compare.set[len(b.set):])
}
return uint(cnt)
}
// InPlaceUnion creates the destructive union of base set and compare set.
// This is the BitSet equivalent of | (or).
func (b *BitSet) InPlaceUnion(compare *BitSet) {
panicIfNull(b)
panicIfNull(compare)
l := int(compare.wordCount())
if l > int(b.wordCount()) {
l = int(b.wordCount())
}
if compare.length > 0 {
b.extendSetMaybe(compare.length - 1)
}
for i := 0; i < l; i++ {
b.set[i] |= compare.set[i]
}
if len(compare.set) > l {
for i := l; i < len(compare.set); i++ {
b.set[i] = compare.set[i]
}
}
}
// SymmetricDifference of base set and other set
// This is the BitSet equivalent of ^ (xor)
func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) {
panicIfNull(b)
panicIfNull(compare)
b, compare = sortByLength(b, compare)
// compare is bigger, so clone it
result = compare.Clone()
for i, word := range b.set {
result.set[i] = word ^ compare.set[i]
}
return
}
// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference
func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint {
panicIfNull(b)
panicIfNull(compare)
b, compare = sortByLength(b, compare)
cnt := popcntXorSlice(b.set, compare.set)
if len(compare.set) > len(b.set) {
cnt += popcntSlice(compare.set[len(b.set):])
}
return uint(cnt)
}
// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set
// This is the BitSet equivalent of ^ (xor)
func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) {
panicIfNull(b)
panicIfNull(compare)
l := int(compare.wordCount())
if l > int(b.wordCount()) {
l = int(b.wordCount())
}
if compare.length > 0 {
b.extendSetMaybe(compare.length - 1)
}
for i := 0; i < l; i++ {
b.set[i] ^= compare.set[i]
}
if len(compare.set) > l {
for i := l; i < len(compare.set); i++ {
b.set[i] = compare.set[i]
}
}
}
// Is the length an exact multiple of word sizes?
func (b *BitSet) isLenExactMultiple() bool {
return b.length%wordSize == 0
}
// Clean last word by setting unused bits to 0
func (b *BitSet) cleanLastWord() {
if !b.isLenExactMultiple() {
b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize)
}
}
// Complement computes the (local) complement of a biset (up to length bits)
func (b *BitSet) Complement() (result *BitSet) {
panicIfNull(b)
result = New(b.length)
for i, word := range b.set {
result.set[i] = ^word
}
result.cleanLastWord()
return
}
// All returns true if all bits are set, false otherwise. Returns true for
// empty sets.
func (b *BitSet) All() bool {
panicIfNull(b)
return b.Count() == b.length
}
// None returns true if no bit is set, false otherwise. Returns true for
// empty sets.
func (b *BitSet) None() bool {
panicIfNull(b)
if b != nil && b.set != nil {
for _, word := range b.set {
if word > 0 {
return false
}
}
return true
}
return true
}
// Any returns true if any bit is set, false otherwise
func (b *BitSet) Any() bool {
panicIfNull(b)
return !b.None()
}
// IsSuperSet returns true if this is a superset of the other set
func (b *BitSet) IsSuperSet(other *BitSet) bool {
for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) {
if !b.Test(i) {
return false
}
}
return true
}
// IsStrictSuperSet returns true if this is a strict superset of the other set
func (b *BitSet) IsStrictSuperSet(other *BitSet) bool {
return b.Count() > other.Count() && b.IsSuperSet(other)
}
// DumpAsBits dumps a bit set as a string of bits
func (b *BitSet) DumpAsBits() string {
if b.set == nil {
return "."
}
buffer := bytes.NewBufferString("")
i := len(b.set) - 1
for ; i >= 0; i-- {
fmt.Fprintf(buffer, "%064b.", b.set[i])
}
return buffer.String()
}
// BinaryStorageSize returns the binary storage requirements
func (b *BitSet) BinaryStorageSize() int {
return binary.Size(uint64(0)) + binary.Size(b.set)
}
// WriteTo writes a BitSet to a stream
func (b *BitSet) WriteTo(stream io.Writer) (int64, error) {
length := uint64(b.length)
// Write length
err := binary.Write(stream, binaryOrder, length)
if err != nil {
return 0, err
}
// Write set
err = binary.Write(stream, binaryOrder, b.set)
return int64(b.BinaryStorageSize()), err
}
// ReadFrom reads a BitSet from a stream written using WriteTo
func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) {
var length uint64
// Read length first
err := binary.Read(stream, binaryOrder, &length)
if err != nil {
return 0, err
}
newset := New(uint(length))
if uint64(newset.length) != length {
return 0, errors.New("unmarshalling error: type mismatch")
}
// Read remaining bytes as set
err = binary.Read(stream, binaryOrder, newset.set)
if err != nil {
return 0, err
}
*b = *newset
return int64(b.BinaryStorageSize()), nil
}
// MarshalBinary encodes a BitSet into a binary form and returns the result.
func (b *BitSet) MarshalBinary() ([]byte, error) {
var buf bytes.Buffer
writer := bufio.NewWriter(&buf)
_, err := b.WriteTo(writer)
if err != nil {
return []byte{}, err
}
err = writer.Flush()
return buf.Bytes(), err
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
func (b *BitSet) UnmarshalBinary(data []byte) error {
buf := bytes.NewReader(data)
reader := bufio.NewReader(buf)
_, err := b.ReadFrom(reader)
return err
}
// MarshalJSON marshals a BitSet as a JSON structure
func (b *BitSet) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize()))
_, err := b.WriteTo(buffer)
if err != nil {
return nil, err
}
// URLEncode all bytes
return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes()))
}
// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON
func (b *BitSet) UnmarshalJSON(data []byte) error {
// Unmarshal as string
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
// URLDecode string
buf, err := base64Encoding.DecodeString(s)
if err != nil {
return err
}
_, err = b.ReadFrom(bytes.NewReader(buf))
return err
}

View File

@@ -1,3 +0,0 @@
module github.com/bits-and-blooms/bitset
go 1.14

View File

View File

@@ -1,53 +0,0 @@
package bitset
// bit population count, take from
// https://code.google.com/p/go/issues/detail?id=4988#c11
// credit: https://code.google.com/u/arnehormann/
func popcount(x uint64) (n uint64) {
x -= (x >> 1) & 0x5555555555555555
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
x += x >> 4
x &= 0x0f0f0f0f0f0f0f0f
x *= 0x0101010101010101
return x >> 56
}
func popcntSliceGo(s []uint64) uint64 {
cnt := uint64(0)
for _, x := range s {
cnt += popcount(x)
}
return cnt
}
func popcntMaskSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] &^ m[i])
}
return cnt
}
func popcntAndSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] & m[i])
}
return cnt
}
func popcntOrSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] | m[i])
}
return cnt
}
func popcntXorSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] ^ m[i])
}
return cnt
}

View File

@@ -1,45 +0,0 @@
// +build go1.9
package bitset
import "math/bits"
func popcntSlice(s []uint64) uint64 {
var cnt int
for _, x := range s {
cnt += bits.OnesCount64(x)
}
return uint64(cnt)
}
func popcntMaskSlice(s, m []uint64) uint64 {
var cnt int
for i := range s {
cnt += bits.OnesCount64(s[i] &^ m[i])
}
return uint64(cnt)
}
func popcntAndSlice(s, m []uint64) uint64 {
var cnt int
for i := range s {
cnt += bits.OnesCount64(s[i] & m[i])
}
return uint64(cnt)
}
func popcntOrSlice(s, m []uint64) uint64 {
var cnt int
for i := range s {
cnt += bits.OnesCount64(s[i] | m[i])
}
return uint64(cnt)
}
func popcntXorSlice(s, m []uint64) uint64 {
var cnt int
for i := range s {
cnt += bits.OnesCount64(s[i] ^ m[i])
}
return uint64(cnt)
}

View File

@@ -1,68 +0,0 @@
// +build !go1.9
// +build amd64,!appengine
package bitset
// *** the following functions are defined in popcnt_amd64.s
//go:noescape
func hasAsm() bool
// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
var useAsm = hasAsm()
//go:noescape
func popcntSliceAsm(s []uint64) uint64
//go:noescape
func popcntMaskSliceAsm(s, m []uint64) uint64
//go:noescape
func popcntAndSliceAsm(s, m []uint64) uint64
//go:noescape
func popcntOrSliceAsm(s, m []uint64) uint64
//go:noescape
func popcntXorSliceAsm(s, m []uint64) uint64
func popcntSlice(s []uint64) uint64 {
if useAsm {
return popcntSliceAsm(s)
}
return popcntSliceGo(s)
}
func popcntMaskSlice(s, m []uint64) uint64 {
if useAsm {
return popcntMaskSliceAsm(s, m)
}
return popcntMaskSliceGo(s, m)
}
func popcntAndSlice(s, m []uint64) uint64 {
if useAsm {
return popcntAndSliceAsm(s, m)
}
return popcntAndSliceGo(s, m)
}
func popcntOrSlice(s, m []uint64) uint64 {
if useAsm {
return popcntOrSliceAsm(s, m)
}
return popcntOrSliceGo(s, m)
}
func popcntXorSlice(s, m []uint64) uint64 {
if useAsm {
return popcntXorSliceAsm(s, m)
}
return popcntXorSliceGo(s, m)
}

View File

@@ -1,104 +0,0 @@
// +build !go1.9
// +build amd64,!appengine
TEXT ·hasAsm(SB),4,$0-1
MOVQ $1, AX
CPUID
SHRQ $23, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
TEXT ·popcntSliceAsm(SB),4,$0-32
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntSliceEnd
popcntSliceLoop:
BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
ADDQ DX, AX
ADDQ $8, SI
LOOP popcntSliceLoop
popcntSliceEnd:
MOVQ AX, ret+24(FP)
RET
TEXT ·popcntMaskSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntMaskSliceEnd
MOVQ m+24(FP), DI
popcntMaskSliceLoop:
MOVQ (DI), DX
NOTQ DX
ANDQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntMaskSliceLoop
popcntMaskSliceEnd:
MOVQ AX, ret+48(FP)
RET
TEXT ·popcntAndSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntAndSliceEnd
MOVQ m+24(FP), DI
popcntAndSliceLoop:
MOVQ (DI), DX
ANDQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntAndSliceLoop
popcntAndSliceEnd:
MOVQ AX, ret+48(FP)
RET
TEXT ·popcntOrSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntOrSliceEnd
MOVQ m+24(FP), DI
popcntOrSliceLoop:
MOVQ (DI), DX
ORQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntOrSliceLoop
popcntOrSliceEnd:
MOVQ AX, ret+48(FP)
RET
TEXT ·popcntXorSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntXorSliceEnd
MOVQ m+24(FP), DI
popcntXorSliceLoop:
MOVQ (DI), DX
XORQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntXorSliceLoop
popcntXorSliceEnd:
MOVQ AX, ret+48(FP)
RET

View File

@@ -1,24 +0,0 @@
// +build !go1.9
// +build !amd64 appengine
package bitset
func popcntSlice(s []uint64) uint64 {
return popcntSliceGo(s)
}
func popcntMaskSlice(s, m []uint64) uint64 {
return popcntMaskSliceGo(s, m)
}
func popcntAndSlice(s, m []uint64) uint64 {
return popcntAndSliceGo(s, m)
}
func popcntOrSlice(s, m []uint64) uint64 {
return popcntOrSliceGo(s, m)
}
func popcntXorSlice(s, m []uint64) uint64 {
return popcntXorSliceGo(s, m)
}

View File

@@ -1,14 +0,0 @@
// +build !go1.9
package bitset
var deBruijn = [...]byte{
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
}
func trailingZeroes64(v uint64) uint {
return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58])
}

View File

@@ -1,9 +0,0 @@
// +build go1.9
package bitset
import "math/bits"
func trailingZeroes64(v uint64) uint {
return uint(bits.TrailingZeros64(v))
}

View File

@@ -17,9 +17,9 @@ import (
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2" v2 "github.com/docker/distribution/registry/api/v2"
@@ -131,11 +131,23 @@ func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
defer cleanup()
stream = streamCopy
}
if inputInfo.Digest != "" { if inputInfo.Digest != "" {
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, cache)
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@@ -282,6 +294,21 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
} }
} }
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
// The caller must ensure info.Digest is set.
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (bool, types.BlobInfo, error) {
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
if err != nil {
return false, types.BlobInfo{}, err
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
}
return false, types.BlobInfo{}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty. // info.Digest must not be empty.
@@ -297,13 +324,12 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
} }
// First, check whether the blob happens to already exist at the destination. // First, check whether the blob happens to already exist at the destination.
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, cache)
if err != nil { if err != nil {
return false, types.BlobInfo{}, err return false, types.BlobInfo{}, err
} }
if exists { if haveBlob {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) return true, reusedInfo, nil
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
} }
// Then try reusing blobs from other locations. // Then try reusing blobs from other locations.

View File

@@ -5,13 +5,10 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"os"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@@ -98,25 +95,11 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// When the layer is decompressed, we also have to generate the digest on uncompressed data. // When the layer is decompressed, we also have to generate the digest on uncompressed data.
if inputInfo.Size == -1 || inputInfo.Digest == "" { if inputInfo.Size == -1 || inputInfo.Digest == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob") streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
defer os.Remove(streamCopy.Name()) defer cleanup()
defer streamCopy.Close()
digester, stream2 := putblobdigest.DigestIfUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(streamCopy, stream2)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, io.SeekStart)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
inputInfo.Digest = digester.Digest()
stream = streamCopy stream = streamCopy
logrus.Debugf("... streaming done") logrus.Debugf("... streaming done")
} }

View File

@@ -0,0 +1,41 @@
package streamdigest
import (
"fmt"
"io"
"io/ioutil"
"os"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/types"
)
// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo.
// The temporary file is returned as an io.Reader along with a cleanup function.
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
// If an error occurs, inputInfo is not modified.
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
if err != nil {
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
}
cleanup := func() {
diskBlob.Close()
os.Remove(diskBlob.Name())
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo)
written, err := io.Copy(diskBlob, stream)
if err != nil {
cleanup()
return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err)
}
_, err = diskBlob.Seek(0, io.SeekStart)
if err != nil {
cleanup()
return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err)
}
inputInfo.Digest = digester.Digest()
inputInfo.Size = written
return diskBlob, cleanup, nil
}

View File

@@ -268,18 +268,18 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
} }
// Anonymous function to query credentials from auth files. // Anonymous function to query credentials from auth files.
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) { getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
for _, path := range getAuthFilePaths(sys, homeDir) { for _, path := range getAuthFilePaths(sys, homeDir) {
authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat) authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat)
if err != nil { if err != nil {
return types.DockerAuthConfig{}, err return types.DockerAuthConfig{}, "", err
} }
if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
return authConfig, nil return authConfig, path.path, nil
} }
} }
return types.DockerAuthConfig{}, nil return types.DockerAuthConfig{}, "", nil
} }
helpers, err := sysregistriesv2.CredentialHelpers(sys) helpers, err := sysregistriesv2.CredentialHelpers(sys)
@@ -289,12 +289,15 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
var multiErr error var multiErr error
for _, helper := range helpers { for _, helper := range helpers {
var creds types.DockerAuthConfig var (
var err error creds types.DockerAuthConfig
credHelperPath string
err error
)
switch helper { switch helper {
// Special-case the built-in helper for auth files. // Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper: case sysregistriesv2.AuthenticationFileHelper:
creds, err = getCredentialsFromAuthFiles() creds, credHelperPath, err = getCredentialsFromAuthFiles()
// External helpers. // External helpers.
default: default:
creds, err = getAuthFromCredHelper(helper, registry) creds, err = getAuthFromCredHelper(helper, registry)
@@ -307,7 +310,11 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 { if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 {
continue continue
} }
logrus.Debugf("Found credentials for %s in credential helper %s", registry, helper) msg := fmt.Sprintf("Found credentials for %s in credential helper %s", registry, helper)
if credHelperPath != "" {
msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
}
logrus.Debug(msg)
return creds, nil return creds, nil
} }
if multiErr != nil { if multiErr != nil {

View File

@@ -80,7 +80,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
// be dropped. // be dropped.
// https://github.com/containers/image/pull/1191#discussion_r610621608 // https://github.com/containers/image/pull/1191#discussion_r610621608
if e.Location == "" { if e.Location == "" {
if prefix[:2] != "*." { if !strings.HasPrefix(prefix, "*.") {
return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix) return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix)
} }
return ref, nil return ref, nil
@@ -369,7 +369,7 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
} }
// FIXME: allow config authors to always use Prefix. // FIXME: allow config authors to always use Prefix.
// https://github.com/containers/image/pull/1191#discussion_r610622495 // https://github.com/containers/image/pull/1191#discussion_r610622495
if reg.Prefix[:2] != "*." && reg.Location == "" { if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" {
return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"} return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"}
} }
} }
@@ -804,7 +804,7 @@ func refMatchingSubdomainPrefix(ref, prefix string) int {
// (This is split from the caller primarily to make testing easier.) // (This is split from the caller primarily to make testing easier.)
func refMatchingPrefix(ref, prefix string) int { func refMatchingPrefix(ref, prefix string) int {
switch { switch {
case prefix[0:2] == "*.": case strings.HasPrefix(prefix, "*."):
return refMatchingSubdomainPrefix(ref, prefix) return refMatchingSubdomainPrefix(ref, prefix)
case len(ref) < len(prefix): case len(ref) < len(prefix):
return -1 return -1
@@ -924,7 +924,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// https://github.com/containers/image/pull/1191#discussion_r610623829 // https://github.com/containers/image/pull/1191#discussion_r610623829
for i := range res.partialV2.Registries { for i := range res.partialV2.Registries {
prefix := res.partialV2.Registries[i].Prefix prefix := res.partialV2.Registries[i].Prefix
if prefix[:2] == "*." && strings.ContainsAny(prefix, "/@:") { if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") {
msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix) msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix)
return nil, &InvalidRegistries{s: msg} return nil, &InvalidRegistries{s: msg}
} }

View File

@@ -225,7 +225,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
// needs to match a store that was previously initialized using // needs to match a store that was previously initialized using
// storage.GetStore(), or be enough to let the storage library fill out // storage.GetStore(), or be enough to let the storage library fill out
// the rest using knowledge that it has from elsewhere. // the rest using knowledge that it has from elsewhere.
if reference[0] == '[' { if len(reference) > 0 && reference[0] == '[' {
closeIndex := strings.IndexRune(reference, ']') closeIndex := strings.IndexRune(reference, ']')
if closeIndex < 1 { if closeIndex < 1 {
return nil, ErrInvalidReference return nil, ErrInvalidReference

View File

@@ -622,6 +622,10 @@ type SystemContext struct {
DockerLogMirrorChoice bool DockerLogMirrorChoice bool
// Directory to use for OSTree temporary files // Directory to use for OSTree temporary files
OSTreeTmpDirPath string OSTreeTmpDirPath string
// If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
// Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
// when the digest for a blob is unknown.
DockerRegistryPushPrecomputeDigests bool
// === docker/daemon.Transport overrides === // === docker/daemon.Transport overrides ===
// A directory containing a CA certificate (ending with ".crt"), // A directory containing a CA certificate (ending with ".crt"),

View File

@@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 16 VersionMinor = 16
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1 VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = "-dev"
) )
// Version is the specification version that the package types support. // Version is the specification version that the package types support.

View File

@@ -103,9 +103,11 @@ func SetFileCreateLabel(fileLabel string) error {
return selinux.SetFSCreateLabel(fileLabel) return selinux.SetFSCreateLabel(fileLabel)
} }
// Relabel changes the label of path to the filelabel string. // Relabel changes the label of path and all the entries beneath the path.
// It changes the MCS label to s0 if shared is true. // It changes the MCS label to s0 if shared is true.
// This will allow all containers to share the content. // This will allow all containers to share the content.
//
// The path itself is guaranteed to be relabeled last.
func Relabel(path string, fileLabel string, shared bool) error { func Relabel(path string, fileLabel string, shared bool) error {
if !selinux.GetEnabled() || fileLabel == "" { if !selinux.GetEnabled() || fileLabel == "" {
return nil return nil

View File

@@ -255,6 +255,8 @@ func CopyLevel(src, dest string) (string, error) {
// Chcon changes the fpath file object to the SELinux label label. // Chcon changes the fpath file object to the SELinux label label.
// If fpath is a directory and recurse is true, then Chcon walks the // If fpath is a directory and recurse is true, then Chcon walks the
// directory tree setting the label. // directory tree setting the label.
//
// The fpath itself is guaranteed to be relabeled last.
func Chcon(fpath string, label string, recurse bool) error { func Chcon(fpath string, label string, recurse bool) error {
return chcon(fpath, label, recurse) return chcon(fpath, label, recurse)
} }

View File

@@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/big"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -16,7 +17,6 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/bits-and-blooms/bitset"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@@ -44,7 +44,7 @@ type selinuxState struct {
type level struct { type level struct {
sens uint sens uint
cats *bitset.BitSet cats *big.Int
} }
type mlsRange struct { type mlsRange struct {
@@ -455,8 +455,8 @@ func computeCreateContext(source string, target string, class string) (string, e
} }
// catsToBitset stores categories in a bitset. // catsToBitset stores categories in a bitset.
func catsToBitset(cats string) (*bitset.BitSet, error) { func catsToBitset(cats string) (*big.Int, error) {
bitset := &bitset.BitSet{} bitset := new(big.Int)
catlist := strings.Split(cats, ",") catlist := strings.Split(cats, ",")
for _, r := range catlist { for _, r := range catlist {
@@ -471,14 +471,14 @@ func catsToBitset(cats string) (*bitset.BitSet, error) {
return nil, err return nil, err
} }
for i := catstart; i <= catend; i++ { for i := catstart; i <= catend; i++ {
bitset.Set(i) bitset.SetBit(bitset, int(i), 1)
} }
} else { } else {
cat, err := parseLevelItem(ranges[0], category) cat, err := parseLevelItem(ranges[0], category)
if err != nil { if err != nil {
return nil, err return nil, err
} }
bitset.Set(cat) bitset.SetBit(bitset, int(cat), 1)
} }
} }
@@ -548,37 +548,30 @@ func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
// bitsetToStr takes a category bitset and returns it in the // bitsetToStr takes a category bitset and returns it in the
// canonical selinux syntax // canonical selinux syntax
func bitsetToStr(c *bitset.BitSet) string { func bitsetToStr(c *big.Int) string {
var str string var str string
i, e := c.NextSet(0)
len := 0 length := 0
for e { for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ {
if len == 0 { if c.Bit(i) == 0 {
continue
}
if length == 0 {
if str != "" { if str != "" {
str += "," str += ","
} }
str += "c" + strconv.Itoa(int(i)) str += "c" + strconv.Itoa(i)
} }
if c.Bit(i+1) == 1 {
next, e := c.NextSet(i + 1) length++
if e {
// consecutive cats
if next == i+1 {
len++
i = next
continue continue
} }
if length == 1 {
str += ",c" + strconv.Itoa(i)
} else if length > 1 {
str += ".c" + strconv.Itoa(i)
} }
if len == 1 { length = 0
str += ",c" + strconv.Itoa(int(i))
} else if len > 1 {
str += ".c" + strconv.Itoa(int(i))
}
if !e {
break
}
len = 0
i = next
} }
return str return str
@@ -591,13 +584,16 @@ func (l1 *level) equal(l2 *level) bool {
if l1.sens != l2.sens { if l1.sens != l2.sens {
return false return false
} }
return l1.cats.Equal(l2.cats) if l2.cats == nil || l1.cats == nil {
return l2.cats == l1.cats
}
return l1.cats.Cmp(l2.cats) == 0
} }
// String returns an mlsRange as a string. // String returns an mlsRange as a string.
func (m mlsRange) String() string { func (m mlsRange) String() string {
low := "s" + strconv.Itoa(int(m.low.sens)) low := "s" + strconv.Itoa(int(m.low.sens))
if m.low.cats != nil && m.low.cats.Count() > 0 { if m.low.cats != nil && m.low.cats.BitLen() > 0 {
low += ":" + bitsetToStr(m.low.cats) low += ":" + bitsetToStr(m.low.cats)
} }
@@ -606,7 +602,7 @@ func (m mlsRange) String() string {
} }
high := "s" + strconv.Itoa(int(m.high.sens)) high := "s" + strconv.Itoa(int(m.high.sens))
if m.high.cats != nil && m.high.cats.Count() > 0 { if m.high.cats != nil && m.high.cats.BitLen() > 0 {
high += ":" + bitsetToStr(m.high.cats) high += ":" + bitsetToStr(m.high.cats)
} }
@@ -656,10 +652,12 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) {
/* find the intersecting categories */ /* find the intersecting categories */
if s.low.cats != nil && t.low.cats != nil { if s.low.cats != nil && t.low.cats != nil {
outrange.low.cats = s.low.cats.Intersection(t.low.cats) outrange.low.cats = new(big.Int)
outrange.low.cats.And(s.low.cats, t.low.cats)
} }
if s.high.cats != nil && t.high.cats != nil { if s.high.cats != nil && t.high.cats != nil {
outrange.high.cats = s.high.cats.Intersection(t.high.cats) outrange.high.cats = new(big.Int)
outrange.high.cats.And(s.high.cats, t.high.cats)
} }
return outrange.String(), nil return outrange.String(), nil

View File

@@ -51,6 +51,9 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
var ( var (
err error err error
wg sync.WaitGroup wg sync.WaitGroup
rootLen = len(root)
rootEntry *walkArgs
) )
wg.Add(1) wg.Add(1)
go func() { go func() {
@@ -59,6 +62,11 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
close(files) close(files)
return err return err
} }
if len(p) == rootLen {
// Root entry is processed separately below.
rootEntry = &walkArgs{path: p, info: &info}
return nil
}
// add a file to the queue unless a callback sent an error // add a file to the queue unless a callback sent an error
select { select {
case e := <-errCh: case e := <-errCh:
@@ -92,6 +100,10 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
wg.Wait() wg.Wait()
if err == nil {
err = walkFn(rootEntry.path, *rootEntry.info, nil)
}
return err return err
} }

View File

@@ -1,3 +1,4 @@
//go:build go1.16
// +build go1.16 // +build go1.16
package pwalkdir package pwalkdir
@@ -51,6 +52,9 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
var ( var (
err error err error
wg sync.WaitGroup wg sync.WaitGroup
rootLen = len(root)
rootEntry *walkArgs
) )
wg.Add(1) wg.Add(1)
go func() { go func() {
@@ -59,6 +63,11 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
close(files) close(files)
return err return err
} }
if len(p) == rootLen {
// Root entry is processed separately below.
rootEntry = &walkArgs{path: p, entry: entry}
return nil
}
// Add a file to the queue unless a callback sent an error. // Add a file to the queue unless a callback sent an error.
select { select {
case e := <-errCh: case e := <-errCh:
@@ -92,6 +101,10 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
wg.Wait() wg.Wait()
if err == nil {
err = walkFn(rootEntry.path, rootEntry.entry, nil)
}
return err return err
} }

7
vendor/modules.txt vendored
View File

@@ -34,8 +34,6 @@ github.com/VividCortex/ewma
github.com/acarl005/stripansi github.com/acarl005/stripansi
# github.com/beorn7/perks v1.0.1 # github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile github.com/beorn7/perks/quantile
# github.com/bits-and-blooms/bitset v1.2.1
github.com/bits-and-blooms/bitset
# github.com/cespare/xxhash/v2 v2.1.1 # github.com/cespare/xxhash/v2 v2.1.1
github.com/cespare/xxhash/v2 github.com/cespare/xxhash/v2
# github.com/containerd/cgroups v1.0.1 # github.com/containerd/cgroups v1.0.1
@@ -54,7 +52,7 @@ github.com/containers/common/pkg/completion
github.com/containers/common/pkg/report github.com/containers/common/pkg/report
github.com/containers/common/pkg/report/camelcase github.com/containers/common/pkg/report/camelcase
github.com/containers/common/pkg/retry github.com/containers/common/pkg/retry
# github.com/containers/image/v5 v5.16.1 # github.com/containers/image/v5 v5.16.2-0.20211021181114-25411654075f
github.com/containers/image/v5/copy github.com/containers/image/v5/copy
github.com/containers/image/v5/directory github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath github.com/containers/image/v5/directory/explicitfilepath
@@ -71,6 +69,7 @@ github.com/containers/image/v5/internal/pkg/keyctl
github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/pkg/platform
github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/putblobdigest
github.com/containers/image/v5/internal/rootless github.com/containers/image/v5/internal/rootless
github.com/containers/image/v5/internal/streamdigest
github.com/containers/image/v5/internal/tmpdir github.com/containers/image/v5/internal/tmpdir
github.com/containers/image/v5/internal/types github.com/containers/image/v5/internal/types
github.com/containers/image/v5/internal/uploadreader github.com/containers/image/v5/internal/uploadreader
@@ -283,7 +282,7 @@ github.com/opencontainers/runc/libcontainer/user
github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/userns
# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 # github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-spec/specs-go github.com/opencontainers/runtime-spec/specs-go
# github.com/opencontainers/selinux v1.8.5 # github.com/opencontainers/selinux v1.9.1
github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalk