mirror of
https://github.com/k8snetworkplumbingwg/multus-cni.git
synced 2025-09-02 17:35:49 +00:00
Compare commits
49 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
9b45d4b211 | ||
|
f77c591d69 | ||
|
8c9f409032 | ||
|
0724c2b9b0 | ||
|
085870c4b5 | ||
|
26ef3c3eb4 | ||
|
1eeab9b589 | ||
|
7f5620db76 | ||
|
1e29a6b50c | ||
|
d09cc8e581 | ||
|
cd23938191 | ||
|
3b8aa66765 | ||
|
37adefba51 | ||
|
ec30b99534 | ||
|
a53233028b | ||
|
492ffec8c8 | ||
|
c25a39c5a3 | ||
|
3f1031e7b4 | ||
|
2339c11a15 | ||
|
2ba3d3cda2 | ||
|
6abe8ee06b | ||
|
a03b3e8c41 | ||
|
753d18740a | ||
|
1a0fa953f9 | ||
|
bd57d31780 | ||
|
91a7d22e2b | ||
|
8d555dba24 | ||
|
e566760a45 | ||
|
cd7492f79b | ||
|
9e3ad2ea1e | ||
|
a52680b3bf | ||
|
78cf9fced8 | ||
|
905b5d0e42 | ||
|
0ef8c27a67 | ||
|
e603d8dbb0 | ||
|
9e96c38ebe | ||
|
ad6f57f14d | ||
|
479e2bd78e | ||
|
ae56437297 | ||
|
dc4d6e6764 | ||
|
75acc10312 | ||
|
8d7308e6bb | ||
|
c4b9534529 | ||
|
c8e63996c8 | ||
|
5d98a0e0eb | ||
|
343a4e3687 | ||
|
feb52328d2 | ||
|
118cc629cf | ||
|
784fecfa02 |
18
.github/workflows/image-build.yml
vendored
18
.github/workflows/image-build.yml
vendored
@@ -37,6 +37,24 @@ jobs:
|
||||
tags: ghcr.io/${{ github.repository }}:latest-arm64
|
||||
file: deployments/Dockerfile.arm64
|
||||
|
||||
build-arm32:
|
||||
name: Image build/arm32
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Build container image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: ghcr.io/${{ github.repository }}:latest-arm32
|
||||
file: deployments/Dockerfile.arm32
|
||||
|
||||
build-ppc64le:
|
||||
name: Image build/ppc64le
|
||||
runs-on: ubuntu-latest
|
||||
|
37
.github/workflows/image-push-master.yml
vendored
37
.github/workflows/image-push-master.yml
vendored
@@ -62,6 +62,35 @@ jobs:
|
||||
ghcr.io/${{ github.repository }}:snapshot-arm64
|
||||
file: deployments/Dockerfile.arm64
|
||||
|
||||
push-arm32:
|
||||
name: Image push/arm32
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.repository_owner == 'k8snetworkplumbingwg'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Push container image
|
||||
if: github.repository_owner == 'k8snetworkplumbingwg'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:latest-arm32
|
||||
ghcr.io/${{ github.repository }}:snapshot-arm32
|
||||
file: deployments/Dockerfile.arm32
|
||||
|
||||
push-ppc64le:
|
||||
name: Image push/ppc64le
|
||||
runs-on: ubuntu-latest
|
||||
@@ -172,21 +201,25 @@ jobs:
|
||||
# get artifacts from previous steps
|
||||
docker pull ${{ env.REPOSITORY }}:snapshot-amd64
|
||||
docker pull ${{ env.REPOSITORY }}:snapshot-arm64
|
||||
docker pull ${{ env.REPOSITORY }}:snapshot-arm32
|
||||
docker pull ${{ env.REPOSITORY }}:snapshot-ppc64le
|
||||
docker pull ${{ env.REPOSITORY }}:snapshot-s390x
|
||||
docker pull ${{ env.REPOSITORY }}:latest-amd64
|
||||
docker pull ${{ env.REPOSITORY }}:latest-arm64
|
||||
docker pull ${{ env.REPOSITORY }}:latest-arm32
|
||||
docker pull ${{ env.REPOSITORY }}:latest-ppc64le
|
||||
docker pull ${{ env.REPOSITORY }}:latest-s390x
|
||||
docker manifest create ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-amd64 ${{ env.REPOSITORY }}:snapshot-arm64 ${{ env.REPOSITORY }}:snapshot-ppc64le ${{ env.REPOSITORY }}:snapshot-s390x
|
||||
docker manifest create ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-amd64 ${{ env.REPOSITORY }}:snapshot-arm64 ${{ env.REPOSITORY }}:snapshot-arm32 ${{ env.REPOSITORY }}:snapshot-ppc64le ${{ env.REPOSITORY }}:snapshot-s390x
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-amd64 --arch amd64
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-arm64 --arch arm64
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-arm32 --arch arm
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-ppc64le --arch ppc64le
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-s390x --arch s390x
|
||||
docker manifest push ${{ env.REPOSITORY }}:snapshot
|
||||
docker manifest create ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-amd64 ${{ env.REPOSITORY }}:latest-arm64 ${{ env.REPOSITORY }}:latest-ppc64le ${{ env.REPOSITORY }}:latest-s390x
|
||||
docker manifest create ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-amd64 ${{ env.REPOSITORY }}:latest-arm64 ${{ env.REPOSITORY }}:latest-arm32 ${{ env.REPOSITORY }}:latest-ppc64le ${{ env.REPOSITORY }}:latest-s390x
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-amd64 --arch amd64
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-arm64 --arch arm64
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-arm32 --arch arm
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-ppc64le --arch ppc64le
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-s390x --arch s390x
|
||||
docker manifest push ${{ env.REPOSITORY }}:latest
|
||||
|
44
.github/workflows/image-push-release.yml
vendored
44
.github/workflows/image-push-release.yml
vendored
@@ -76,6 +76,42 @@ jobs:
|
||||
${{ steps.docker_meta.outputs.tags }}-arm64
|
||||
file: deployments/Dockerfile.arm64
|
||||
|
||||
push-arm32:
|
||||
name: Image push/arm32
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.repository_owner == 'k8snetworkplumbingwg'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v1
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tag-latest: false
|
||||
|
||||
- name: Push container image
|
||||
if: github.repository_owner == 'k8snetworkplumbingwg'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:stable-arm32
|
||||
${{ steps.docker_meta.outputs.tags }}-arm32
|
||||
file: deployments/Dockerfile.arm32
|
||||
|
||||
push-ppc64le:
|
||||
name: Image push/ppc64le
|
||||
runs-on: ubuntu-latest
|
||||
@@ -214,21 +250,25 @@ jobs:
|
||||
# get artifacts from previous steps
|
||||
docker pull ${{ steps.docker_meta.outputs.tags }}-amd64
|
||||
docker pull ${{ steps.docker_meta.outputs.tags }}-arm64
|
||||
docker pull ${{ steps.docker_meta.outputs.tags }}-arm32
|
||||
docker pull ${{ steps.docker_meta.outputs.tags }}-ppc64le
|
||||
docker pull ${{ steps.docker_meta.outputs.tags }}-s390x
|
||||
docker manifest create ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-amd64 ${{ steps.docker_meta.outputs.tags }}-arm64 ${{ steps.docker_meta.outputs.tags }}-ppc64le ${{ steps.docker_meta.outputs.tags }}-s390x
|
||||
docker manifest create ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-amd64 ${{ steps.docker_meta.outputs.tags }}-arm64 ${{ steps.docker_meta.outputs.tags }}-arm32 ${{ steps.docker_meta.outputs.tags }}-ppc64le ${{ steps.docker_meta.outputs.tags }}-s390x
|
||||
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-amd64 --arch amd64
|
||||
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-arm64 --arch arm64
|
||||
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-arm32 --arch arm
|
||||
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-ppc64le --arch ppc64le
|
||||
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-s390x --arch s390x
|
||||
docker manifest push ${{ steps.docker_meta.outputs.tags }}
|
||||
docker pull ${{ env.REPOSITORY }}:stable-amd64
|
||||
docker pull ${{ env.REPOSITORY }}:stable-arm64
|
||||
docker pull ${{ env.REPOSITORY }}:stable-arm32
|
||||
docker pull ${{ env.REPOSITORY }}:stable-ppc64le
|
||||
docker pull ${{ env.REPOSITORY }}:stable-s390x
|
||||
docker manifest create ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-amd64 ${{ env.REPOSITORY }}:stable-arm64 ${{ env.REPOSITORY }}:stable-ppc64le ${{ env.REPOSITORY }}:stable-s390x
|
||||
docker manifest create ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-amd64 ${{ env.REPOSITORY }}:stable-arm64 ${{ env.REPOSITORY }}:stable-arm32 ${{ env.REPOSITORY }}:stable-ppc64le ${{ env.REPOSITORY }}:stable-s390x
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-amd64 --arch amd64
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-arm64 --arch arm64
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-arm32 --arch arm
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-ppc64le --arch ppc64le
|
||||
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-s390x --arch s390x
|
||||
docker manifest push ${{ env.REPOSITORY }}:stable
|
||||
|
4
.github/workflows/kind-e2e.yml
vendored
4
.github/workflows/kind-e2e.yml
vendored
@@ -27,6 +27,10 @@ jobs:
|
||||
working-directory: ./e2e
|
||||
run: ./setup_cluster.sh
|
||||
|
||||
- name: Test simple pod
|
||||
working-directory: ./e2e
|
||||
run: ./test-simple-pod.sh
|
||||
|
||||
- name: Test macvlan1
|
||||
working-directory: ./e2e
|
||||
run: ./test-simple-macvlan1.sh
|
||||
|
15
.github/workflows/stale-issues-prs.yml
vendored
Normal file
15
.github/workflows/stale-issues-prs.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: 'Close stale issues and PRs'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
|
||||
stale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
|
||||
days-before-stale: 90
|
||||
days-before-close: 7
|
@@ -1,8 +1,8 @@
|
||||
# Multus-CNI
|
||||
|
||||

|
||||

|
||||
|
||||
[](https://github.com/intel/multus-cni/actions/workflows/build.yml)[](https://github.com/intel/multus-cni/actions/workflows/test.yml)[](https://goreportcard.com/report/github.com/intel/multus-cni)[](https://coveralls.io/github/intel/multus-cni)
|
||||
[](https://github.com/k8snetworkplumbingwg/multus-cni/actions/workflows/build.yml)[](https://github.com/k8snetworkplumbingwg/multus-cni/actions/workflows/test.yml)[](https://goreportcard.com/report/github.com/k8snetworkplumbingwg/multus-cni)[](https://coveralls.io/github/k8snetworkplumbingwg/multus-cni)
|
||||
|
||||
Multus CNI enables attaching multiple network interfaces to pods in Kubernetes.
|
||||
|
||||
@@ -35,7 +35,7 @@ This will configure your systems to be ready to use Multus CNI, but, to get star
|
||||
## Additional installation Options
|
||||
|
||||
- Install via daemonset using the quick-start guide, above.
|
||||
- Download binaries from [release page](https://github.com/intel/multus-cni/releases)
|
||||
- Download binaries from [release page](https://github.com/k8snetworkplumbingwg/multus-cni/releases)
|
||||
- By Docker image from [Docker Hub](https://hub.docker.com/r/nfvpe/multus/tags/)
|
||||
- Or, roll-your-own and build from source
|
||||
- See [Development](docs/development.md)
|
||||
|
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
cniversion "github.com/containernetworking/cni/pkg/version"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/multus"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/multus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@@ -4,7 +4,7 @@ FROM centos:centos7 as build
|
||||
# Add everything
|
||||
ADD . /usr/src/multus-cni
|
||||
|
||||
ENV INSTALL_PKGS "git golang-1.13.10-0.el7.x86_64"
|
||||
ENV INSTALL_PKGS "git golang-1.15.13-0.el7.x86_64"
|
||||
RUN rpm --import https://mirror.go-repo.io/centos/RPM-GPG-KEY-GO-REPO && \
|
||||
curl -s https://mirror.go-repo.io/centos/go-repo.repo | tee /etc/yum.repos.d/go-repo.repo && \
|
||||
yum install -y $INSTALL_PKGS && \
|
||||
|
21
deployments/Dockerfile.arm32
Normal file
21
deployments/Dockerfile.arm32
Normal file
@@ -0,0 +1,21 @@
|
||||
# This Dockerfile is used to build the image available on DockerHub
|
||||
FROM golang:1.13.4 as build
|
||||
|
||||
# Add everything
|
||||
ADD . /usr/src/multus-cni
|
||||
|
||||
ENV GOARCH "arm"
|
||||
ENV GOOS "linux"
|
||||
|
||||
RUN cd /usr/src/multus-cni && \
|
||||
./hack/build-go.sh
|
||||
|
||||
# build arm container
|
||||
FROM arm32v7/centos:7
|
||||
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
|
||||
|
||||
WORKDIR /
|
||||
ADD ./images/entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
@@ -5,6 +5,7 @@ Following is the example of multus config file, in `/etc/cni/net.d/`.
|
||||
|
||||
```
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
|
||||
@@ -39,12 +40,12 @@ Following is the example of multus config file, in `/etc/cni/net.d/`.
|
||||
* `confDir` (string, optional): directory for CNI config file that multus reads. default `/etc/cni/multus/net.d`
|
||||
* `cniDir` (string, optional): Multus CNI data directory, default `/var/lib/cni/multus`
|
||||
* `binDir` (string, optional): additional directory for CNI plugins which multus calls, in addition to the default (the default is typically set to `/opt/cni/bin`)
|
||||
* `kubeconfig` (string, optional): kubeconfig file for the out of cluster communication with kube-apiserver. See the example [kubeconfig](https://github.com/intel/multus-cni/blob/master/doc/node-kubeconfig.yaml). If you would like to use CRD (i.e. network attachment definition), this is required
|
||||
* `kubeconfig` (string, optional): kubeconfig file for the out of cluster communication with kube-apiserver. See the example [kubeconfig](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/node-kubeconfig.yaml). If you would like to use CRD (i.e. network attachment definition), this is required
|
||||
* `logToStderr` (bool, optional): Enable or disable logging to `STDERR`. Defaults to true.
|
||||
* `logFile` (string, optional): file path for log file. multus puts log in given file
|
||||
* `logLevel` (string, optional): logging level ("debug", "error", "verbose", or "panic")
|
||||
* `namespaceIsolation` (boolean, optional): Enables a security feature where pods are only allowed to access `NetworkAttachmentDefinitions` in the namespace where the pod resides. Defaults to false.
|
||||
* `capabilities` ({}list, optional): [capabilities](https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md#dynamic-plugin-specific-fields-capabilities--runtime-configuration) supported by at least one of the delegates. (NOTE: Multus only supports portMappings capability for now). See the [example](https://github.com/intel/multus-cni/blob/master/examples/multus-ptp-portmap.conf).
|
||||
* `capabilities` ({}list, optional): [capabilities](https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md#dynamic-plugin-specific-fields-capabilities--runtime-configuration) supported by at least one of the delegates. (NOTE: Multus only supports portMappings/Bandwidth capability for cluster networks).
|
||||
* `readinessindicatorfile`: The path to a file whose existance denotes that the default network is ready
|
||||
|
||||
User should chose following parameters combination (`clusterNetwork`+`defaultNetworks` or `delegates`):
|
||||
@@ -60,7 +61,7 @@ User should chose following parameters combination (`clusterNetwork`+`defaultNet
|
||||
Multus will find network for clusterNetwork/defaultNetworks as following sequences:
|
||||
|
||||
1. CRD object for given network name, in 'kube-system' namespace
|
||||
1. CNI json config file in `confDir`. Given name should be without extention, like .conf/.conflist. (e.g. "test" for "test.conf")
|
||||
1. CNI json config file in `confDir`. Given name should be without extention, like .conf/.conflist. (e.g. "test" for "test.conf"). The given name for `clusterNetwork` should match the value for `name` key in the config file (e.g. `"name": "test"` in "test.conf" when `"clusterNetwork": "test"`)
|
||||
1. Directory for CNI json config file. Multus will find alphabetically first file for the network
|
||||
1. Multus failed to find network. Multus raise error message
|
||||
|
||||
|
@@ -6,7 +6,7 @@ Multus now uses [gopkg.in](http://gopkg.in/) to expose its code as library.
|
||||
You can use following command to import our code into your go code.
|
||||
|
||||
```
|
||||
go get gopkg.in/intel/multus-cni.v3
|
||||
go get gopkg.in/k8snetworkplumbingwg/multus-cni.v3
|
||||
```
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ If an issue is closed that you don't feel is sufficiently resolved, please feel
|
||||
You can use the built in `./hack/build-go.sh` script!
|
||||
|
||||
```
|
||||
git clone https://github.com/intel/multus-cni.git
|
||||
git clone https://github.com/k8snetworkplumbingwg/multus-cni.git
|
||||
cd multus-cni
|
||||
./hack/build-go.sh
|
||||
```
|
||||
@@ -47,6 +47,6 @@ The following are the best practices for multus logging:
|
||||
* `logging.Panicf()` only be used for critical errors (it should NOT normally be used)
|
||||
|
||||
|
||||
## CI Introduction
|
||||
## Multus release schedule
|
||||
|
||||
TBD
|
||||
On the first maintainer's meeting, twice yearly, after January 1st and July 1st, if a new version has not been tagged, a new version will tagged.
|
||||
|
@@ -13,7 +13,7 @@ Generally we recommend two options: Manually place a Multus binary in your `/opt
|
||||
|
||||
*Copy Multus Binary into place*
|
||||
|
||||
You may acquire the Multus binary via compilation (see the [developer guide](development.md)) or download the a binary from the [GitHub releases](https://github.com/intel/multus-cni/releases) page. Copy multus binary into CNI binary directory, usually `/opt/cni/bin`. Perform this on all nodes in your cluster (master and nodes).
|
||||
You may acquire the Multus binary via compilation (see the [developer guide](development.md)) or download the a binary from the [GitHub releases](https://github.com/k8snetworkplumbingwg/multus-cni/releases) page. Copy multus binary into CNI binary directory, usually `/opt/cni/bin`. Perform this on all nodes in your cluster (master and nodes).
|
||||
|
||||
$ cp multus /opt/cni/bin
|
||||
|
||||
@@ -35,7 +35,7 @@ Execute following commands at all Kubernetes nodes (i.e. master and minions)
|
||||
|
||||
```
|
||||
$ mkdir -p /etc/cni/net.d
|
||||
$ cat >/etc/cni/net.d/30-multus.conf <<EOF
|
||||
$ cat >/etc/cni/net.d/00-multus.conf <<EOF
|
||||
{
|
||||
"name": "multus-cni-network",
|
||||
"type": "multus",
|
||||
@@ -264,7 +264,7 @@ $ cat <<EOF > /etc/cni/multus/net.d/macvlan2.conf
|
||||
|
||||
### Run pod with network annotation
|
||||
|
||||
#### Lauch pod with text annotation
|
||||
#### Launch pod with text annotation
|
||||
|
||||
```
|
||||
# Execute following command at Kubernetes master
|
||||
@@ -284,7 +284,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Lauch pod with text annotation for NetworkAttachmentDefinition in different namespace
|
||||
#### Launch pod with text annotation for NetworkAttachmentDefinition in different namespace
|
||||
|
||||
You can also specify NetworkAttachmentDefinition with its namespace as adding `<namespace>/`
|
||||
|
||||
@@ -330,7 +330,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Lauch pod with text annotation with interface name
|
||||
#### Launch pod with text annotation with interface name
|
||||
|
||||
You can also specify interface name as adding `@<ifname>`.
|
||||
|
||||
@@ -352,7 +352,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Lauch pod with json annotation
|
||||
#### Launch pod with json annotation
|
||||
|
||||
```
|
||||
# Execute following command at Kubernetes master
|
||||
@@ -375,7 +375,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Lauch pod with json annotation for NetworkAttachmentDefinition in different namespace
|
||||
#### Launch pod with json annotation for NetworkAttachmentDefinition in different namespace
|
||||
|
||||
You can also specify NetworkAttachmentDefinition with its namespace as adding `"namespace": "<namespace>"`.
|
||||
|
||||
@@ -400,7 +400,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Lauch pod with json annotation with interface
|
||||
#### Launch pod with json annotation with interface
|
||||
|
||||
You can also specify interface name as adding `"interface": "<ifname>"`.
|
||||
|
||||
@@ -595,6 +595,10 @@ This can be used if you have your CNI configuration stored in an alternate locat
|
||||
|
||||
Used only with `--multus-conf-file=auto`. Allows you to specify an alternate path to the Kubeconfig.
|
||||
|
||||
--multus-master-cni-file-name=
|
||||
|
||||
The `--multus-master-cni-file-name` can be used to select the cni file as the master cni, rather than the first file in cni-conf-dir. For example, `--multus-master-cni-file-name=10-calico.conflist`.
|
||||
|
||||
--multus-log-level=
|
||||
--multus-log-file=
|
||||
|
||||
|
@@ -45,7 +45,7 @@ Our recommended quickstart method to deploy Multus is to deploy using a Daemonse
|
||||
Firstly, clone this GitHub repository.
|
||||
|
||||
```
|
||||
git clone https://github.com/intel/multus-cni.git && cd multus-cni
|
||||
git clone https://github.com/k8snetworkplumbingwg/multus-cni.git && cd multus-cni
|
||||
```
|
||||
|
||||
We'll apply a YAML file with `kubectl` from this repo.
|
||||
|
@@ -4,7 +4,7 @@
|
||||
|
||||
|
||||
```
|
||||
$ git clone https://github.com/intel/multus-cni.git
|
||||
$ git clone https://github.com/k8snetworkplumbingwg/multus-cni.git
|
||||
$ cd multus-cni/e2e
|
||||
$ ./get_tools.sh
|
||||
$ ./setup_cluster.sh
|
||||
|
@@ -10,7 +10,7 @@ running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || t
|
||||
if [ "${running}" != 'true' ]; then
|
||||
# run registry and push the multus image
|
||||
docker run -d --restart=always -p "${reg_port}:5000" --name "${reg_name}" registry:2
|
||||
docker build -t localhost:5000/multus:e2e ..
|
||||
docker build -t localhost:5000/multus:e2e -f ../deployments/Dockerfile ..
|
||||
docker push localhost:5000/multus:e2e
|
||||
fi
|
||||
reg_host="${reg_name}"
|
||||
|
15
e2e/simple-pod.yml
Normal file
15
e2e/simple-pod.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: simple-centos1
|
||||
annotations:
|
||||
labels:
|
||||
app: simple
|
||||
spec:
|
||||
containers:
|
||||
- name: simple-centos1
|
||||
image: centos:8
|
||||
command: ["/bin/sleep", "10000"]
|
||||
securityContext:
|
||||
privileged: true
|
10
e2e/test-simple-pod.sh
Executable file
10
e2e/test-simple-pod.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -o errexit
|
||||
|
||||
export PATH=${PATH}:./bin
|
||||
|
||||
kubectl create -f simple-pod.yml
|
||||
kubectl wait --for=condition=ready -l app=simple --timeout=300s pod
|
||||
|
||||
echo "cleanup resources"
|
||||
kubectl delete -f simple-pod.yml
|
61
go.mod
61
go.mod
@@ -1,26 +1,51 @@
|
||||
module gopkg.in/intel/multus-cni.v3
|
||||
module gopkg.in/k8snetworkplumbingwg/multus-cni.v3
|
||||
|
||||
go 1.12
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.14 // indirect
|
||||
github.com/containernetworking/cni v0.8.1
|
||||
github.com/containernetworking/plugins v0.8.2
|
||||
github.com/json-iterator/go v1.1.9 // indirect
|
||||
github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.1-0.20201119153432-9d213757d22d
|
||||
github.com/onsi/ginkgo v1.11.0
|
||||
github.com/onsi/gomega v1.7.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974
|
||||
google.golang.org/grpc v1.23.0
|
||||
github.com/containernetworking/plugins v0.9.1
|
||||
github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.1-0.20210510153419-66a699ae3b05
|
||||
github.com/onsi/ginkgo v1.12.1
|
||||
github.com/onsi/gomega v1.10.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||
google.golang.org/grpc v1.27.1
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
k8s.io/api v0.18.3
|
||||
k8s.io/apimachinery v0.18.3
|
||||
k8s.io/client-go v0.18.3
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/client-go v0.21.1
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubernetes v1.13.0
|
||||
k8s.io/kubelet v0.0.0
|
||||
k8s.io/kubernetes v1.21.1
|
||||
)
|
||||
|
||||
replace github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2
|
||||
replace (
|
||||
github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2
|
||||
k8s.io/api => k8s.io/api v0.21.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.21.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.21.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.21.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.21.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.21.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.21.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.21.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.21.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.21.1
|
||||
k8s.io/kubernetes => k8s.io/kubernetes v1.21.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.21.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.21.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.1
|
||||
)
|
||||
|
@@ -29,7 +29,7 @@ if [ "$GO111MODULE" == "off" ]; then
|
||||
echo "Building plugin without go module"
|
||||
echo "Warning: this will be deprecated in near future so please use go modules!"
|
||||
|
||||
ORG_PATH="gopkg.in/intel"
|
||||
ORG_PATH="gopkg.in/k8snetworkplumbingwg"
|
||||
REPO_PATH="${ORG_PATH}/multus-cni.v3"
|
||||
|
||||
if [ ! -h gopath/src/${REPO_PATH} ]; then
|
||||
|
@@ -5,7 +5,7 @@ set -e
|
||||
if [ "$GO111MODULE" == "off" ]; then
|
||||
echo "Warning: this will be deprecated in near future so please use go modules!"
|
||||
|
||||
ORG_PATH="github.com/intel"
|
||||
ORG_PATH="gopkg.in/k8snetworkplumbingwg"
|
||||
REPO_PATH="${ORG_PATH}/multus-cni"
|
||||
|
||||
if [ ! -h gopath/src/${REPO_PATH} ]; then
|
||||
|
@@ -65,4 +65,4 @@ Example docker run command:
|
||||
$ docker run -it -v /opt/cni/bin/:/host/opt/cni/bin/ -v /etc/cni/net.d/:/host/etc/cni/net.d/ --entrypoint=/bin/bash dougbtv/multus
|
||||
```
|
||||
|
||||
Originally inspired by and is a portmanteau of the [Flannel daemonset](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml), the [Calico Daemonset](https://github.com/projectcalico/calico/blob/master/v2.0/getting-started/kubernetes/installation/hosted/k8s-backend-addon-manager/calico-daemonset.yaml), and the [Calico CNI install bash script](https://github.com/projectcalico/cni-plugin/blob/be4df4db2e47aa7378b1bdf6933724bac1f348d0/k8s-install/scripts/install-cni.sh#L104-L153).
|
||||
Originally inspired by and is a portmanteau of the [Flannel daemonset](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml), the [Calico Daemonset](https://docs.projectcalico.org/manifests/calico.yaml), and the [Calico CNI install bash script](https://github.com/projectcalico/cni-plugin/blob/be4df4db2e47aa7378b1bdf6933724bac1f348d0/k8s-install/scripts/install-cni.sh#L104-L153).
|
||||
|
@@ -18,6 +18,8 @@ MULTUS_CONF_FILE="/usr/src/multus-cni/images/70-multus.conf"
|
||||
MULTUS_AUTOCONF_DIR="/host/etc/cni/net.d"
|
||||
MULTUS_BIN_FILE="/usr/src/multus-cni/bin/multus"
|
||||
MULTUS_KUBECONFIG_FILE_HOST="/etc/cni/net.d/multus.d/multus.kubeconfig"
|
||||
MULTUS_TEMP_KUBECONFIG="/tmp/multus.kubeconfig"
|
||||
MULTUS_MASTER_CNI_FILE_NAME=""
|
||||
MULTUS_NAMESPACE_ISOLATION=false
|
||||
MULTUS_GLOBAL_NAMESPACES=""
|
||||
MULTUS_LOG_TO_STDERR=true
|
||||
@@ -39,7 +41,9 @@ function usage()
|
||||
echo -e "will be copied to the corresponding configuration directory. When "
|
||||
echo -e "'--multus-conf-file=auto' is used, 00-multus.conf will be automatically "
|
||||
echo -e "generated from the CNI configuration file of the master plugin (the first file "
|
||||
echo -e "in lexicographical order in cni-conf-dir)."
|
||||
echo -e "in lexicographical order in cni-conf-dir). When '--multus-master-cni-file-name'"
|
||||
echo -e "is used, 00-multus.conf will only be automatically generated from the specific"
|
||||
echo -e "file rather than the first file."
|
||||
echo -e ""
|
||||
echo -e "./entrypoint.sh"
|
||||
echo -e "\t-h --help"
|
||||
@@ -50,6 +54,7 @@ function usage()
|
||||
echo -e "\t--multus-bin-file=$MULTUS_BIN_FILE"
|
||||
echo -e "\t--skip-multus-binary-copy=$SKIP_BINARY_COPY"
|
||||
echo -e "\t--multus-kubeconfig-file-host=$MULTUS_KUBECONFIG_FILE_HOST"
|
||||
echo -e "\t--multus-master-cni-file-name=$MULTUS_MASTER_CNI_FILE_NAME (empty by default, example: 10-calico.conflist)"
|
||||
echo -e "\t--namespace-isolation=$MULTUS_NAMESPACE_ISOLATION"
|
||||
echo -e "\t--global-namespaces=$MULTUS_GLOBAL_NAMESPACES (used only with --namespace-isolation=true)"
|
||||
echo -e "\t--multus-autoconfig-dir=$MULTUS_AUTOCONF_DIR (used only with --multus-conf-file=auto)"
|
||||
@@ -110,6 +115,9 @@ while [ "$1" != "" ]; do
|
||||
--multus-kubeconfig-file-host)
|
||||
MULTUS_KUBECONFIG_FILE_HOST=$VALUE
|
||||
;;
|
||||
--multus-master-cni-file-name)
|
||||
MULTUS_MASTER_CNI_FILE_NAME=$VALUE
|
||||
;;
|
||||
--namespace-isolation)
|
||||
MULTUS_NAMESPACE_ISOLATION=$VALUE
|
||||
;;
|
||||
@@ -218,9 +226,10 @@ if [ -f "$SERVICE_ACCOUNT_PATH/token" ]; then
|
||||
# to skip TLS verification for now. We should eventually support
|
||||
# writing more complete kubeconfig files. This is only used
|
||||
# if the provided CNI network config references it.
|
||||
touch $MULTUS_KUBECONFIG
|
||||
chmod ${KUBECONFIG_MODE:-600} $MULTUS_KUBECONFIG
|
||||
cat > $MULTUS_KUBECONFIG <<EOF
|
||||
touch $MULTUS_TEMP_KUBECONFIG
|
||||
chmod ${KUBECONFIG_MODE:-600} $MULTUS_TEMP_KUBECONFIG
|
||||
# Write the kubeconfig to a temp file first.
|
||||
cat > $MULTUS_TEMP_KUBECONFIG <<EOF
|
||||
# Kubeconfig file for Multus CNI plugin.
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
@@ -241,6 +250,9 @@ contexts:
|
||||
current-context: multus-context
|
||||
EOF
|
||||
|
||||
# Atomically move the temp kubeconfig to its permanent home.
|
||||
mv -f $MULTUS_TEMP_KUBECONFIG $MULTUS_KUBECONFIG
|
||||
|
||||
else
|
||||
warn "Doesn't look like we're running in a kubernetes environment (no serviceaccount token)"
|
||||
fi
|
||||
@@ -255,11 +267,15 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
|
||||
found_master=false
|
||||
tries=0
|
||||
while [ $found_master == false ]; do
|
||||
MASTER_PLUGIN="$(ls $MULTUS_AUTOCONF_DIR | grep -E '\.conf(list)?$' | grep -Ev '00-multus\.conf' | head -1)"
|
||||
if [ "$MULTUS_MASTER_CNI_FILE_NAME" != "" ]; then
|
||||
MASTER_PLUGIN="$(ls $MULTUS_AUTOCONF_DIR/$MULTUS_MASTER_CNI_FILE_NAME)" || true
|
||||
else
|
||||
MASTER_PLUGIN="$(ls $MULTUS_AUTOCONF_DIR | grep -E '\.conf(list)?$' | grep -Ev '00-multus\.conf' | head -1)"
|
||||
fi
|
||||
if [ "$MASTER_PLUGIN" == "" ]; then
|
||||
if [ $tries -lt 600 ]; then
|
||||
if ! (($tries % 5)); then
|
||||
log "Attemping to find master plugin configuration, attempt $tries"
|
||||
log "Attempting to find master plugin configuration, attempt $tries"
|
||||
fi
|
||||
let "tries+=1"
|
||||
sleep 1;
|
||||
|
@@ -143,7 +143,7 @@ data:
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-amd64
|
||||
name: kube-multus-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
@@ -163,8 +163,6 @@ spec:
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
@@ -172,7 +170,7 @@ spec:
|
||||
containers:
|
||||
- name: kube-multus
|
||||
# crio support requires multus:latest for now. support 3.3 or later.
|
||||
image: docker.io/nfvpe/multus:stable
|
||||
image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--cni-version=0.3.1"
|
||||
@@ -216,213 +214,3 @@ spec:
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-ppc64le
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: ppc64le
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
# crio support requires multus:latest for now. support 3.3 or later.
|
||||
image: docker.io/nfvpe/multus:stable-ppc64le
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--cni-version=0.3.1"
|
||||
- "--cni-bin-dir=/host/usr/libexec/cni"
|
||||
- "--multus-conf-file=auto"
|
||||
- "--restart-crio=true"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/usr/libexec/cni
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /usr/libexec/cni
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-arm64v8
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: arm64
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
# crio support requires multus:latest for now. support 3.3 or later.
|
||||
image: docker.io/nfvpe/multus:stable-arm64v8
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--cni-version=0.3.1"
|
||||
- "--cni-bin-dir=/host/usr/libexec/cni"
|
||||
- "--multus-conf-file=auto"
|
||||
- "--restart-crio=true"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/usr/libexec/cni
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /usr/libexec/cni
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-s390x
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: s390x
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
# crio support requires multus:latest for now. support 3.3 or later.
|
||||
image: docker.io/nfvpe/multus:stable-s390x
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--cni-version=0.3.1"
|
||||
- "--cni-bin-dir=/host/usr/libexec/cni"
|
||||
- "--multus-conf-file=auto"
|
||||
- "--restart-crio=true"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/usr/libexec/cni
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /usr/libexec/cni
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
|
@@ -116,7 +116,7 @@ data:
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-amd64
|
||||
name: kube-multus-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
@@ -136,15 +136,13 @@ spec:
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
image: docker.io/nfvpe/multus:stable
|
||||
image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--multus-conf-file=auto"
|
||||
@@ -179,71 +177,3 @@ spec:
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-ppc64le
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: ppc64le
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
# ppc64le support requires multus:latest for now. support 3.3 or later.
|
||||
image: docker.io/nfvpe/multus:stable-ppc64le
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--multus-conf-file=auto"
|
||||
- "--cni-version=0.3.1"
|
||||
- "--cni-bin-dir=/host/home/kubernetes/bin"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/home/kubernetes/bin
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /home/kubernetes/bin
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
|
@@ -143,7 +143,7 @@ data:
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-amd64
|
||||
name: kube-multus-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
@@ -163,15 +163,13 @@ spec:
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
image: docker.io/nfvpe/multus:stable
|
||||
image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--multus-conf-file=auto"
|
||||
@@ -206,205 +204,3 @@ spec:
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-ppc64le
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: ppc64le
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
# ppc64le support requires multus:latest for now. support 3.3 or later.
|
||||
image: docker.io/nfvpe/multus:stable-ppc64le
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--multus-conf-file=auto"
|
||||
- "--cni-version=0.3.1"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-arm64v8
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: arm64
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
image: docker.io/nfvpe/multus:stable-arm64v8
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--multus-conf-file=auto"
|
||||
- "--cni-version=0.3.1"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-multus-ds-s390x
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: multus
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
name: multus
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: s390x
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: multus
|
||||
containers:
|
||||
- name: kube-multus
|
||||
image: docker.io/nfvpe/multus:stable-s390x
|
||||
command: ["/entrypoint.sh"]
|
||||
args:
|
||||
- "--multus-conf-file=auto"
|
||||
- "--cni-version=0.3.1"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "90Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: cnibin
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: multus-cfg
|
||||
mountPath: /tmp/multus-conf
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cnibin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: multus-cfg
|
||||
configMap:
|
||||
name: multus-cni-config
|
||||
items:
|
||||
- key: cni-conf.json
|
||||
path: 70-multus.conf
|
||||
|
@@ -19,8 +19,8 @@ import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
|
@@ -10,7 +10,7 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sTypes "k8s.io/apimachinery/pkg/types"
|
||||
|
@@ -41,9 +41,9 @@ import (
|
||||
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
|
||||
netclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1"
|
||||
netutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/kubeletclient"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/kubeletclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -259,7 +259,7 @@ func getKubernetesDelegate(client *ClientInfo, net *types.NetworkSelectionElemen
|
||||
logging.Debugf("getKubernetesDelegate: found resourceName annotation : %s", resourceName)
|
||||
|
||||
if resourceMap == nil {
|
||||
ck, err := kubeletclient.GetResourceClient()
|
||||
ck, err := kubeletclient.GetResourceClient("")
|
||||
if err != nil {
|
||||
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get a ResourceClient instance: %v", err)
|
||||
}
|
||||
@@ -367,7 +367,10 @@ func TryLoadPodDelegates(pod *v1.Pod, conf *types.NetConf, clientInfo *ClientInf
|
||||
return len(delegates), clientInfo, nil
|
||||
}
|
||||
|
||||
return 0, clientInfo, nil
|
||||
if _, ok := err.(*NoK8sNetworkError); ok {
|
||||
return 0, clientInfo, nil
|
||||
}
|
||||
return 0, clientInfo, err
|
||||
}
|
||||
|
||||
// GetK8sClient gets client info from kubeconfig
|
||||
|
@@ -23,10 +23,10 @@ import (
|
||||
"testing"
|
||||
|
||||
types020 "github.com/containernetworking/cni/pkg/types/020"
|
||||
testutils "gopkg.in/intel/multus-cni.v3/pkg/testing"
|
||||
testutils "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/testing"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
|
||||
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
|
||||
netfake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake"
|
||||
|
@@ -1,50 +1,49 @@
|
||||
package kubeletclient
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/checkpoint"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/checkpoint"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultKubeletSocketFile = "kubelet.sock"
|
||||
defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb
|
||||
)
|
||||
|
||||
var (
|
||||
kubeletSocket string
|
||||
defaultPodResourcesPath = "/var/lib/kubelet/pod-resources"
|
||||
defaultPodResourcesPath = "/var/lib/kubelet/pod-resources"
|
||||
)
|
||||
|
||||
// GetResourceClient returns an instance of ResourceClient interface initialized with Pod resource information
|
||||
func GetResourceClient() (types.ResourceClient, error) {
|
||||
func GetResourceClient(kubeletSocket string) (types.ResourceClient, error) {
|
||||
if kubeletSocket == "" {
|
||||
kubeletSocket, _ = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
}
|
||||
// If Kubelet resource API endpoint exist use that by default
|
||||
// Or else fallback with checkpoint file
|
||||
if hasKubeletAPIEndpoint() {
|
||||
if hasKubeletAPIEndpoint(kubeletSocket) {
|
||||
logging.Debugf("GetResourceClient: using Kubelet resource API endpoint")
|
||||
return getKubeletClient()
|
||||
return getKubeletClient(kubeletSocket)
|
||||
}
|
||||
|
||||
logging.Debugf("GetResourceClient: using Kubelet device plugin checkpoint")
|
||||
return checkpoint.GetCheckpoint()
|
||||
}
|
||||
|
||||
func getKubeletClient() (types.ResourceClient, error) {
|
||||
func getKubeletClient(kubeletSocket string) (types.ResourceClient, error) {
|
||||
newClient := &kubeletClient{}
|
||||
if kubeletSocket == "" {
|
||||
kubeletSocket = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
kubeletSocket, _ = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
}
|
||||
|
||||
client, conn, err := podresources.GetClient(kubeletSocket, 10*time.Second, defaultPodResourcesMaxSize)
|
||||
client, conn, err := podresources.GetV1Client(kubeletSocket, 10*time.Second, defaultPodResourcesMaxSize)
|
||||
if err != nil {
|
||||
return nil, logging.Errorf("getKubeletClient: error getting grpc client: %v\n", err)
|
||||
}
|
||||
@@ -102,10 +101,13 @@ func (rc *kubeletClient) GetPodResourceMap(pod *v1.Pod) (map[string]*types.Resou
|
||||
return resourceMap, nil
|
||||
}
|
||||
|
||||
func hasKubeletAPIEndpoint() bool {
|
||||
func hasKubeletAPIEndpoint(endpoint string) bool {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Check for kubelet resource API socket file
|
||||
kubeletAPISocket := filepath.Join(defaultPodResourcesPath, defaultKubeletSocketFile)
|
||||
if _, err := os.Stat(kubeletAPISocket); err != nil {
|
||||
if _, err := os.Stat(u.Path); err != nil {
|
||||
logging.Debugf("hasKubeletAPIEndpoint: error looking up kubelet resource api socket file: %q", err)
|
||||
return false
|
||||
}
|
||||
|
@@ -15,8 +15,8 @@ import (
|
||||
k8sTypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
|
||||
mtypes "gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
mtypes "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -29,6 +29,11 @@ type fakeResourceServer struct {
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
//TODO: This is stub code for test, but we may need to change for the testing we use this API in the future...
|
||||
func (m *fakeResourceServer) GetAllocatableResources(ctx context.Context, req *podresourcesapi.AllocatableResourcesRequest) (*podresourcesapi.AllocatableResourcesResponse, error) {
|
||||
return &podresourcesapi.AllocatableResourcesResponse{}, nil
|
||||
}
|
||||
|
||||
func (m *fakeResourceServer) List(ctx context.Context, req *podresourcesapi.ListPodResourcesRequest) (*podresourcesapi.ListPodResourcesResponse, error) {
|
||||
podName := "pod-name"
|
||||
podNamespace := "pod-namespace"
|
||||
@@ -63,25 +68,27 @@ func TestKubeletclient(t *testing.T) {
|
||||
RunSpecs(t, "Kubeletclient Suite")
|
||||
}
|
||||
|
||||
var testKubeletSocket string
|
||||
func setUp() error {
|
||||
tempSocketDir, err := ioutil.TempDir("", "kubelet-resource-client")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defaultPodResourcesPath = filepath.Join(tempSocketDir, defaultPodResourcesPath)
|
||||
testingPodResourcesPath := filepath.Join(tempSocketDir, defaultPodResourcesPath)
|
||||
|
||||
if err := os.MkdirAll(defaultPodResourcesPath, os.ModeDir); err != nil {
|
||||
if err := os.MkdirAll(testingPodResourcesPath, os.ModeDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
socketDir = defaultPodResourcesPath
|
||||
socketDir = testingPodResourcesPath
|
||||
socketName = filepath.Join(socketDir, "kubelet.sock")
|
||||
testKubeletSocket = socketName
|
||||
|
||||
fakeServer = &fakeResourceServer{server: grpc.NewServer()}
|
||||
podresourcesapi.RegisterPodResourcesListerServer(fakeServer.server, fakeServer)
|
||||
lis, err := util.CreateListener(socketName)
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
go fakeServer.server.Serve(lis)
|
||||
return nil
|
||||
@@ -109,14 +116,12 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
|
||||
Context("GetResourceClient()", func() {
|
||||
It("should return no error", func() {
|
||||
kubeletSocket = socketName
|
||||
_, err := GetResourceClient()
|
||||
_, err := GetResourceClient(testKubeletSocket)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should fail with missing file", func() {
|
||||
kubeletSocket = "sampleSocketString"
|
||||
_, err := GetResourceClient()
|
||||
_, err := GetResourceClient("sampleSocketString")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
@@ -138,41 +143,11 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeletSocket = socketName
|
||||
client, err := getKubeletClient()
|
||||
client, err := getKubeletClient(testKubeletSocket)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
outputRMap := map[string]*mtypes.ResourceInfo{
|
||||
"resource": &mtypes.ResourceInfo{DeviceIDs: []string{"dev0", "dev1"}},
|
||||
}
|
||||
resourceMap, err := client.GetPodResourceMap(fakePod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(resourceMap).ShouldNot(BeNil())
|
||||
Expect(resourceMap).To(Equal(outputRMap))
|
||||
})
|
||||
|
||||
It("should return no error with empty socket", func() {
|
||||
podUID := k8sTypes.UID("970a395d-bb3b-11e8-89df-408d5c537d23")
|
||||
fakePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-name",
|
||||
Namespace: "pod-namespace",
|
||||
UID: podUID,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container-name",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeletSocket = ""
|
||||
client, err := getKubeletClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
outputRMap := map[string]*mtypes.ResourceInfo{
|
||||
"resource": &mtypes.ResourceInfo{DeviceIDs: []string{"dev0", "dev1"}},
|
||||
"resource": {DeviceIDs: []string{"dev0", "dev1"}},
|
||||
}
|
||||
resourceMap, err := client.GetPodResourceMap(fakePod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -181,8 +156,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
})
|
||||
|
||||
It("should return an error with garbage socket value", func() {
|
||||
kubeletSocket = "/badfilepath!?//"
|
||||
_, err := getKubeletClient()
|
||||
_, err := getKubeletClient("/badfilepath!?//")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
@@ -197,8 +171,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
UID: podUID,
|
||||
},
|
||||
}
|
||||
kubeletSocket = socketName
|
||||
client, err := getKubeletClient()
|
||||
client, err := getKubeletClient(testKubeletSocket)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = client.GetPodResourceMap(fakePod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -215,8 +188,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
UID: podUID,
|
||||
},
|
||||
}
|
||||
kubeletSocket = socketName
|
||||
client, err := getKubeletClient()
|
||||
client, err := getKubeletClient(testKubeletSocket)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = client.GetPodResourceMap(fakePod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -234,8 +206,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
},
|
||||
}
|
||||
|
||||
kubeletSocket = socketName
|
||||
client, err := getKubeletClient()
|
||||
client, err := getKubeletClient(testKubeletSocket)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
emptyRMap := map[string]*mtypes.ResourceInfo{}
|
||||
|
@@ -17,7 +17,6 @@ package multus
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -31,20 +30,25 @@ import (
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
cnicurrent "github.com/containernetworking/cni/pkg/types/current"
|
||||
cniversion "github.com/containernetworking/cni/pkg/version"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
|
||||
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
|
||||
"github.com/vishvananda/netlink"
|
||||
k8s "gopkg.in/intel/multus-cni.v3/pkg/k8sclient"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/netutils"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
k8s "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/netutils"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
k8snet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
shortPollDuration = 250 * time.Millisecond
|
||||
shortPollTimeout = 2500 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
version = "master@git"
|
||||
commit = "unknown commit"
|
||||
@@ -161,12 +165,12 @@ func validateIfName(nsname string, ifname string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func confAdd(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invoke.Exec) (cnitypes.Result, error) {
|
||||
logging.Debugf("confAdd: %v, %s, %s", rt, string(rawNetconf), binDir)
|
||||
func confAdd(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
|
||||
logging.Debugf("confAdd: %v, %s", rt, string(rawNetconf))
|
||||
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{binDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfig(binDirs, exec)
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
conf, err := libcni.ConfFromBytes(rawNetconf)
|
||||
if err != nil {
|
||||
@@ -181,12 +185,12 @@ func confAdd(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invo
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func confCheck(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invoke.Exec) error {
|
||||
logging.Debugf("confCheck: %v, %s, %s", rt, string(rawNetconf), binDir)
|
||||
func confCheck(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
|
||||
logging.Debugf("confCheck: %v, %s", rt, string(rawNetconf))
|
||||
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{binDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfig(binDirs, exec)
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
conf, err := libcni.ConfFromBytes(rawNetconf)
|
||||
if err != nil {
|
||||
@@ -201,12 +205,12 @@ func confCheck(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec in
|
||||
return err
|
||||
}
|
||||
|
||||
func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistDel: %v, %s, %s", rt, string(rawNetconf), binDir)
|
||||
func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistDel: %v, %s", rt, string(rawNetconf))
|
||||
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{binDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfig(binDirs, exec)
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
conf, err := libcni.ConfFromBytes(rawNetconf)
|
||||
if err != nil {
|
||||
@@ -221,12 +225,12 @@ func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invo
|
||||
return err
|
||||
}
|
||||
|
||||
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) (cnitypes.Result, error) {
|
||||
logging.Debugf("conflistAdd: %v, %s, %s", rt, string(rawnetconflist), binDir)
|
||||
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
|
||||
logging.Debugf("conflistAdd: %v, %s", rt, string(rawnetconflist))
|
||||
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{binDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfig(binDirs, exec)
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
confList, err := libcni.ConfListFromBytes(rawnetconflist)
|
||||
if err != nil {
|
||||
@@ -241,12 +245,12 @@ func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, e
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func conflistCheck(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistCheck: %v, %s, %s", rt, string(rawnetconflist), binDir)
|
||||
func conflistCheck(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistCheck: %v, %s", rt, string(rawnetconflist))
|
||||
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{binDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfig(binDirs, exec)
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
confList, err := libcni.ConfListFromBytes(rawnetconflist)
|
||||
if err != nil {
|
||||
@@ -261,12 +265,12 @@ func conflistCheck(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string,
|
||||
return err
|
||||
}
|
||||
|
||||
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistDel: %v, %s, %s", rt, string(rawnetconflist), binDir)
|
||||
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistDel: %v, %s", rt, string(rawnetconflist))
|
||||
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{binDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfig(binDirs, exec)
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
confList, err := libcni.ConfListFromBytes(rawnetconflist)
|
||||
if err != nil {
|
||||
@@ -281,8 +285,8 @@ func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, e
|
||||
return err
|
||||
}
|
||||
|
||||
func delegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string, cniArgs string) (cnitypes.Result, error) {
|
||||
logging.Debugf("delegateAdd: %v, %s, %v, %v, %s", exec, ifName, delegate, rt, binDir)
|
||||
func delegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf, cniArgs string) (cnitypes.Result, error) {
|
||||
logging.Debugf("delegateAdd: %v, %s, %v, %v", exec, ifName, delegate, rt)
|
||||
if os.Setenv("CNI_IFNAME", ifName) != nil {
|
||||
return nil, logging.Errorf("delegateAdd: error setting envionment variable CNI_IFNAME")
|
||||
}
|
||||
@@ -333,12 +337,12 @@ func delegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, ifNa
|
||||
var result cnitypes.Result
|
||||
var err error
|
||||
if delegate.ConfListPlugin {
|
||||
result, err = conflistAdd(rt, delegate.Bytes, binDir, exec)
|
||||
result, err = conflistAdd(rt, delegate.Bytes, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
result, err = confAdd(rt, delegate.Bytes, binDir, exec)
|
||||
result, err = confAdd(rt, delegate.Bytes, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -379,15 +383,15 @@ func delegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, ifNa
|
||||
kubeClient.Eventf(pod, v1.EventTypeNormal, "AddedInterface", "Add %s %v", rt.IfName, ips)
|
||||
}
|
||||
} else {
|
||||
// for further debug https://github.com/intel/multus-cni/issues/481
|
||||
// for further debug https://github.com/k8snetworkplumbingwg/multus-cni/issues/481
|
||||
logging.Errorf("delegateAdd: pod nil pointer: namespace: %s, name: %s, container id: %s, pod: %v", rt.Args[1][1], rt.Args[2][1], rt.Args[3][1], pod)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func delegateCheck(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
|
||||
logging.Debugf("delegateCheck: %v, %s, %v, %v, %s", exec, ifName, delegateConf, rt, binDir)
|
||||
func delegateCheck(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
|
||||
logging.Debugf("delegateCheck: %v, %s, %v, %v", exec, ifName, delegateConf, rt)
|
||||
if os.Setenv("CNI_IFNAME", ifName) != nil {
|
||||
return logging.Errorf("delegateCheck: error setting envionment variable CNI_IFNAME")
|
||||
}
|
||||
@@ -404,12 +408,12 @@ func delegateCheck(exec invoke.Exec, ifName string, delegateConf *types.Delegate
|
||||
|
||||
var err error
|
||||
if delegateConf.ConfListPlugin {
|
||||
err = conflistCheck(rt, delegateConf.Bytes, binDir, exec)
|
||||
err = conflistCheck(rt, delegateConf.Bytes, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return logging.Errorf("delegateCheck: error invoking ConflistCheck - %q: %v", delegateConf.ConfList.Name, err)
|
||||
}
|
||||
} else {
|
||||
err = confCheck(rt, delegateConf.Bytes, binDir, exec)
|
||||
err = confCheck(rt, delegateConf.Bytes, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return logging.Errorf("delegateCheck: error invoking DelegateCheck - %q: %v", delegateConf.Conf.Type, err)
|
||||
}
|
||||
@@ -418,8 +422,8 @@ func delegateCheck(exec invoke.Exec, ifName string, delegateConf *types.Delegate
|
||||
return err
|
||||
}
|
||||
|
||||
func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
|
||||
logging.Debugf("delegateDel: %v, %v, %s, %v, %v, %s", exec, pod, ifName, delegateConf, rt, binDir)
|
||||
func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
|
||||
logging.Debugf("delegateDel: %v, %v, %s, %v, %v", exec, pod, ifName, delegateConf, rt)
|
||||
if os.Setenv("CNI_IFNAME", ifName) != nil {
|
||||
return logging.Errorf("delegateDel: error setting envionment variable CNI_IFNAME")
|
||||
}
|
||||
@@ -440,12 +444,12 @@ func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *typ
|
||||
|
||||
var err error
|
||||
if delegateConf.ConfListPlugin {
|
||||
err = conflistDel(rt, delegateConf.Bytes, binDir, exec)
|
||||
err = conflistDel(rt, delegateConf.Bytes, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return logging.Errorf("delegateDel: error invoking ConflistDel - %q: %v", delegateConf.ConfList.Name, err)
|
||||
}
|
||||
} else {
|
||||
err = confDel(rt, delegateConf.Bytes, binDir, exec)
|
||||
err = confDel(rt, delegateConf.Bytes, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return logging.Errorf("delegateDel: error invoking DelegateDel - %q: %v", delegateConf.Conf.Type, err)
|
||||
}
|
||||
@@ -457,8 +461,8 @@ func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *typ
|
||||
// delPlugins deletes plugins in reverse order from lastdIdx
|
||||
// Uses netRt as base RuntimeConf (coming from NetConf) but merges it
|
||||
// with each of the delegates' configuration
|
||||
func delPlugins(exec invoke.Exec, pod *v1.Pod, args *skel.CmdArgs, k8sArgs *types.K8sArgs, delegates []*types.DelegateNetConf, lastIdx int, netRt *types.RuntimeConfig, binDir string) error {
|
||||
logging.Debugf("delPlugins: %v, %v, %v, %v, %v, %d, %v, %s", exec, pod, args, k8sArgs, delegates, lastIdx, netRt, binDir)
|
||||
func delPlugins(exec invoke.Exec, pod *v1.Pod, args *skel.CmdArgs, k8sArgs *types.K8sArgs, delegates []*types.DelegateNetConf, lastIdx int, netRt *types.RuntimeConfig, multusNetconf *types.NetConf) error {
|
||||
logging.Debugf("delPlugins: %v, %v, %v, %v, %v, %d, %v", exec, pod, args, k8sArgs, delegates, lastIdx, netRt)
|
||||
if os.Setenv("CNI_COMMAND", "DEL") != nil {
|
||||
return logging.Errorf("delPlugins: error setting environment variable CNI_COMMAND to a value of DEL")
|
||||
}
|
||||
@@ -468,7 +472,7 @@ func delPlugins(exec invoke.Exec, pod *v1.Pod, args *skel.CmdArgs, k8sArgs *type
|
||||
ifName := getIfname(delegates[idx], args.IfName, idx)
|
||||
rt, cniDeviceInfoPath := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, netRt, delegates[idx])
|
||||
// Attempt to delete all but do not error out, instead, collect all errors.
|
||||
if err := delegateDel(exec, pod, ifName, delegates[idx], rt, binDir); err != nil {
|
||||
if err := delegateDel(exec, pod, ifName, delegates[idx], rt, multusNetconf); err != nil {
|
||||
errorstrings = append(errorstrings, err.Error())
|
||||
}
|
||||
if cniDeviceInfoPath != "" {
|
||||
@@ -505,6 +509,18 @@ func cmdPluginErr(k8sArgs *types.K8sArgs, confName string, format string, args .
|
||||
return logging.Errorf(msg+format, args...)
|
||||
}
|
||||
|
||||
func isCriticalRequestRetriable(err error) bool {
|
||||
logging.Debugf("isCriticalRequestRetriable: %v", err)
|
||||
errorTypesAllowingRetry := []func(error) bool{
|
||||
errors.IsServiceUnavailable, errors.IsInternalError, k8snet.IsConnectionReset, k8snet.IsConnectionRefused}
|
||||
for _, f := range errorTypesAllowingRetry {
|
||||
if f(err) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//CmdAdd ...
|
||||
func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (cnitypes.Result, error) {
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
@@ -529,7 +545,7 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
return err == nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, cmdErr(k8sArgs, "PollImmediate error waiting for ReadinessIndicatorFile: %v", err)
|
||||
return nil, cmdErr(k8sArgs, "have you checked that your default network is ready? still waiting for readinessindicatorfile @ %v. pollimmediate error: %v", n.ReadinessIndicatorFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -538,17 +554,15 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
|
||||
if err != nil {
|
||||
var waitErr error
|
||||
// in case of ServiceUnavailable, retry 10 times with 0.5 sec interval
|
||||
if errors.IsServiceUnavailable(err) {
|
||||
pollDuration := 500 * time.Millisecond
|
||||
pollTimeout := 5 * time.Second
|
||||
waitErr = wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
|
||||
// in case of a retriable error, retry 10 times with 0.25 sec interval
|
||||
if isCriticalRequestRetriable(err) {
|
||||
waitErr = wait.PollImmediate(shortPollDuration, shortPollTimeout, func() (bool, error) {
|
||||
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
|
||||
return pod != nil, err
|
||||
})
|
||||
// retry failed, then return error with retry out
|
||||
if waitErr != nil {
|
||||
return nil, cmdErr(k8sArgs, "error getting pod by service unavailable: %v", err)
|
||||
return nil, cmdErr(k8sArgs, "error getting pod with error: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Other case, return error
|
||||
@@ -586,7 +600,7 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
for idx, delegate := range n.Delegates {
|
||||
ifName := getIfname(delegate, args.IfName, idx)
|
||||
rt, cniDeviceInfoPath := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, n.RuntimeConfig, delegate)
|
||||
if cniDeviceInfoPath != "" {
|
||||
if cniDeviceInfoPath != "" && delegate.ResourceName != "" && delegate.DeviceID != "" {
|
||||
err = nadutils.CopyDeviceInfoForCNIFromDP(cniDeviceInfoPath, delegate.ResourceName, delegate.DeviceID)
|
||||
// Even if the filename is set, file may not be present. Ignore error,
|
||||
// but log and in the future may need to filter on specific errors.
|
||||
@@ -595,7 +609,7 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
}
|
||||
}
|
||||
|
||||
tmpResult, err = delegateAdd(exec, kubeClient, pod, ifName, delegate, rt, n.BinDir, cniArgs)
|
||||
tmpResult, err = delegateAdd(exec, kubeClient, pod, ifName, delegate, rt, n, cniArgs)
|
||||
if err != nil {
|
||||
// If the add failed, tear down all networks we already added
|
||||
netName := delegate.Conf.Name
|
||||
@@ -603,7 +617,7 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
netName = delegate.ConfList.Name
|
||||
}
|
||||
// Ignore errors; DEL must be idempotent anyway
|
||||
_ = delPlugins(exec, nil, args, k8sArgs, n.Delegates, idx, n.RuntimeConfig, n.BinDir)
|
||||
_ = delPlugins(exec, nil, args, k8sArgs, n.Delegates, idx, n.RuntimeConfig, n)
|
||||
return nil, cmdPluginErr(k8sArgs, netName, "error adding container to network %q: %v", netName, err)
|
||||
}
|
||||
|
||||
@@ -704,7 +718,7 @@ func CmdCheck(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo)
|
||||
ifName := getIfname(delegate, args.IfName, idx)
|
||||
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, in.RuntimeConfig, delegate)
|
||||
err = delegateCheck(exec, ifName, delegate, rt, in.BinDir)
|
||||
err = delegateCheck(exec, ifName, delegate, rt, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -765,17 +779,15 @@ func CmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) er
|
||||
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
|
||||
if err != nil {
|
||||
var waitErr error
|
||||
// in case of ServiceUnavailable, retry 10 times with 0.5 sec interval
|
||||
if errors.IsServiceUnavailable(err) {
|
||||
pollDuration := 500 * time.Millisecond
|
||||
pollTimeout := 5 * time.Second
|
||||
waitErr = wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
|
||||
// in case of a retriable error, retry 10 times with 0.25 sec interval
|
||||
if isCriticalRequestRetriable(err) {
|
||||
waitErr = wait.PollImmediate(shortPollDuration, shortPollTimeout, func() (bool, error) {
|
||||
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
|
||||
return pod != nil, err
|
||||
})
|
||||
// retry failed, then return error with retry out
|
||||
if waitErr != nil {
|
||||
return cmdErr(k8sArgs, "error getting pod by service unavailable: %v", err)
|
||||
return cmdErr(k8sArgs, "error getting pod with error: %v", err)
|
||||
}
|
||||
} else if errors.IsNotFound(err) {
|
||||
// If not found, proceed to remove interface with cache
|
||||
@@ -855,34 +867,5 @@ func CmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) er
|
||||
}
|
||||
}
|
||||
|
||||
return delPlugins(exec, pod, args, k8sArgs, in.Delegates, len(in.Delegates)-1, in.RuntimeConfig, in.BinDir)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Init command line flags to clear vendored packages' one, especially in init()
|
||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
|
||||
|
||||
// add version flag
|
||||
versionOpt := false
|
||||
flag.BoolVar(&versionOpt, "version", false, "Show application version")
|
||||
flag.BoolVar(&versionOpt, "v", false, "Show application version")
|
||||
flag.Parse()
|
||||
if versionOpt == true {
|
||||
fmt.Printf("%s\n", PrintVersionString())
|
||||
return
|
||||
}
|
||||
|
||||
skel.PluginMain(
|
||||
func(args *skel.CmdArgs) error {
|
||||
result, err := CmdAdd(args, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return result.Print()
|
||||
},
|
||||
func(args *skel.CmdArgs) error {
|
||||
return CmdCheck(args, nil, nil)
|
||||
},
|
||||
func(args *skel.CmdArgs) error { return CmdDel(args, nil, nil) },
|
||||
cniversion.All, "meta-plugin that delegates to other CNI plugins")
|
||||
return delPlugins(exec, pod, args, k8sArgs, in.Delegates, len(in.Delegates)-1, in.RuntimeConfig, in)
|
||||
}
|
||||
|
@@ -35,10 +35,10 @@ import (
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/containernetworking/plugins/pkg/testutils"
|
||||
netfake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/k8sclient"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
testhelpers "gopkg.in/intel/multus-cni.v3/pkg/testing"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/types"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
testhelpers "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/testing"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -1508,7 +1508,7 @@ var _ = Describe("multus operations cniVersion 0.2.0 config", func() {
|
||||
recorder := clientInfo.EventRecorder.(*record.FakeRecorder)
|
||||
events := collectEvents(recorder.Events)
|
||||
Expect(len(events)).To(Equal(3))
|
||||
Expect(events[0]).To(Equal("Normal AddedInterface Add eth0 [1.1.1.2/24]"))
|
||||
Expect(events[0]).To(Equal("Normal AddedInterface Add eth0 [1.1.1.2/24] from weave1"))
|
||||
Expect(events[1]).To(Equal("Normal AddedInterface Add net1 [1.1.1.3/24] from test/net1"))
|
||||
Expect(events[2]).To(Equal("Normal AddedInterface Add net2 [1.1.1.4/24] from test/net2"))
|
||||
})
|
||||
@@ -2033,14 +2033,16 @@ var _ = Describe("multus operations cniVersion 0.2.0 config", func() {
|
||||
os.Setenv("CNI_COMMAND", "ADD")
|
||||
os.Setenv("CNI_IFNAME", "eth0")
|
||||
|
||||
binDir := "/opt/cni/bin"
|
||||
fakeMultusNetConf := types.NetConf{
|
||||
BinDir: "/opt/cni/bin",
|
||||
}
|
||||
// use fExec for the exec param
|
||||
rawnetconflist := []byte(`{"cniVersion":"0.2.0","name":"weave1","type":"weave-net"}`)
|
||||
k8sargs, err := k8sclient.GetK8sArgs(args)
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sargs, args.IfName, n.RuntimeConfig, nil)
|
||||
|
||||
err = conflistDel(rt, rawnetconflist, binDir, fExec)
|
||||
err = conflistDel(rt, rawnetconflist, &fakeMultusNetConf, fExec)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -3279,14 +3281,16 @@ var _ = Describe("multus operations cniVersion 0.4.0 config", func() {
|
||||
os.Setenv("CNI_COMMAND", "ADD")
|
||||
os.Setenv("CNI_IFNAME", "eth0")
|
||||
|
||||
binDir := "/opt/cni/bin"
|
||||
fakeMultusNetConf := types.NetConf{
|
||||
BinDir: "/opt/cni/bin",
|
||||
}
|
||||
// use fExec for the exec param
|
||||
rawnetconflist := []byte(`{"cniVersion":"0.4.0","name":"weave1","type":"weave-net"}`)
|
||||
k8sargs, err := k8sclient.GetK8sArgs(args)
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sargs, args.IfName, n.RuntimeConfig, nil)
|
||||
|
||||
err = conflistDel(rt, rawnetconflist, binDir, fExec)
|
||||
err = conflistDel(rt, rawnetconflist, &fakeMultusNetConf, fExec)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"github.com/containernetworking/cni/pkg/types/current"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/vishvananda/netlink"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/containernetworking/cni/pkg/types/current"
|
||||
"github.com/containernetworking/cni/pkg/version"
|
||||
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
|
||||
"gopkg.in/intel/multus-cni.v3/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -54,6 +54,7 @@ func LoadDelegateNetConfList(bytes []byte, delegateConf *DelegateNetConf) error
|
||||
return logging.Errorf("LoadDelegateNetConfList: a plugin delegate must have the 'type' field")
|
||||
}
|
||||
delegateConf.ConfListPlugin = true
|
||||
delegateConf.Name = delegateConf.ConfList.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -66,6 +67,7 @@ func LoadDelegateNetConf(bytes []byte, net *NetworkSelectionElement, deviceID st
|
||||
if err := json.Unmarshal(bytes, &delegateConf.Conf); err != nil {
|
||||
return nil, logging.Errorf("LoadDelegateNetConf: error unmarshalling delegate config: %v", err)
|
||||
}
|
||||
delegateConf.Name = delegateConf.Conf.Name
|
||||
|
||||
// Do some minimal validation
|
||||
if delegateConf.Conf.Type == "" {
|
||||
@@ -188,13 +190,15 @@ func CreateCNIRuntimeConf(args *skel.CmdArgs, k8sArgs *K8sArgs, ifName string, r
|
||||
|
||||
if delegate != nil {
|
||||
delegateRc = mergeCNIRuntimeConfig(rc, delegate)
|
||||
if delegateRc.CNIDeviceInfoFile != "" {
|
||||
logging.Debugf("Warning: Existing value of CNIDeviceInfoFile will be overwritten %s", delegateRc.CNIDeviceInfoFile)
|
||||
if delegateRc.DeviceID != "" {
|
||||
if delegateRc.CNIDeviceInfoFile != "" {
|
||||
logging.Debugf("Warning: Existing value of CNIDeviceInfoFile will be overwritten %s", delegateRc.CNIDeviceInfoFile)
|
||||
}
|
||||
autoDeviceInfo := fmt.Sprintf("%s-%s_%s", delegate.Name, args.ContainerID, ifName)
|
||||
delegateRc.CNIDeviceInfoFile = nadutils.GetCNIDeviceInfoPath(autoDeviceInfo)
|
||||
cniDeviceInfoFile = delegateRc.CNIDeviceInfoFile
|
||||
logging.Debugf("Adding auto-generated CNIDeviceInfoFile: %s", delegateRc.CNIDeviceInfoFile)
|
||||
}
|
||||
autoDeviceInfo := fmt.Sprintf("%s-%s_%s", delegate.Name, args.ContainerID, ifName)
|
||||
delegateRc.CNIDeviceInfoFile = nadutils.GetCNIDeviceInfoPath(autoDeviceInfo)
|
||||
cniDeviceInfoFile = delegateRc.CNIDeviceInfoFile
|
||||
logging.Debugf("Adding auto-generated CNIDeviceInfoFile: %s", delegateRc.CNIDeviceInfoFile)
|
||||
} else {
|
||||
delegateRc = rc
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/containernetworking/plugins/pkg/testutils"
|
||||
netutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
|
||||
testhelpers "gopkg.in/intel/multus-cni.v3/pkg/testing"
|
||||
testhelpers "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -725,6 +725,7 @@ var _ = Describe("config operations", func() {
|
||||
"type": "multus",
|
||||
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
|
||||
"delegates": [{
|
||||
"name": "weave",
|
||||
"type": "weave-net"
|
||||
}],
|
||||
"runtimeConfig": {
|
||||
@@ -809,4 +810,39 @@ var _ = Describe("config operations", func() {
|
||||
// The original RuntimeConfig must have not been overwritten
|
||||
Expect(origRuntimeConfig).To(Equal(RuntimeConfig{}))
|
||||
})
|
||||
|
||||
It("test DelegateConf Name is delivered", func() {
|
||||
conf := `{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
|
||||
"delegates": [{
|
||||
"name": "weave",
|
||||
"type": "weave-net"
|
||||
}]
|
||||
}`
|
||||
|
||||
n, err := LoadNetConf([]byte(conf))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(n.Delegates)).To(BeEquivalentTo(1))
|
||||
Expect(n.Delegates[0].Name).To(Equal("weave"))
|
||||
})
|
||||
|
||||
It("test DelegateConfList Name is delivered", func() {
|
||||
conf := `{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
|
||||
"delegates": [{
|
||||
"name": "weave-list",
|
||||
"plugins": [ {"type" :"weave"} ]
|
||||
}]
|
||||
}`
|
||||
|
||||
n, err := LoadNetConf([]byte(conf))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(n.Delegates)).To(BeEquivalentTo(1))
|
||||
Expect(n.Delegates[0].Name).To(Equal("weave-list"))
|
||||
})
|
||||
|
||||
})
|
||||
|
2
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
2
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
@@ -5,5 +5,5 @@ go 1.12
|
||||
require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
)
|
||||
|
2
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
2
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
@@ -14,3 +14,5 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
17
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
17
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@@ -182,13 +182,14 @@ func (s pipeAddress) String() string {
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
@@ -197,7 +198,7 @@ func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -210,7 +211,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
absTimeout = time.Now().Add(2 * time.Second)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
@@ -223,9 +224,15 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
||||
}
|
||||
|
||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path)
|
||||
h, err = tryDialPipe(ctx, &path, access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
21
vendor/github.com/blang/semver/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/blang/semver/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
language: go
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.4.3
|
||||
- go: 1.5.4
|
||||
- go: 1.6.3
|
||||
- go: 1.7
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
|
||||
-repotoken $COVERALLS_TOKEN
|
||||
- echo "Build examples" ; cd examples && go build
|
||||
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
|
||||
env:
|
||||
global:
|
||||
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
|
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
Normal file
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
194
vendor/github.com/blang/semver/README.md
generated
vendored
Normal file
194
vendor/github.com/blang/semver/README.md
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
semver for golang [](https://travis-ci.org/blang/semver) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
|
||||
======
|
||||
|
||||
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
||||
|
||||
Usage
|
||||
-----
|
||||
```bash
|
||||
$ go get github.com/blang/semver
|
||||
```
|
||||
Note: Always vendor your dependencies or fix on a specific version tag.
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
v1, err := semver.Make("1.0.0-beta")
|
||||
v2, err := semver.Make("2.0.0-beta")
|
||||
v1.Compare(v2)
|
||||
```
|
||||
|
||||
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
||||
|
||||
Why should I use this lib?
|
||||
-----
|
||||
|
||||
- Fully spec compatible
|
||||
- No reflection
|
||||
- No regex
|
||||
- Fully tested (Coverage >99%)
|
||||
- Readable parsing/validation errors
|
||||
- Fast (See [Benchmarks](#benchmarks))
|
||||
- Only Stdlib
|
||||
- Uses values instead of pointers
|
||||
- Many features, see below
|
||||
|
||||
|
||||
Features
|
||||
-----
|
||||
|
||||
- Parsing and validation at all levels
|
||||
- Comparator-like comparisons
|
||||
- Compare Helper Methods
|
||||
- InPlace manipulation
|
||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
||||
- Wildcards `>=1.x`, `<=2.5.x`
|
||||
- Sortable (implements sort.Interface)
|
||||
- database/sql compatible (sql.Scanner/Valuer)
|
||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||
|
||||
Ranges
|
||||
------
|
||||
|
||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
||||
|
||||
A condition is composed of an operator and a version. The supported operators are:
|
||||
|
||||
- `<1.0.0` Less than `1.0.0`
|
||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
||||
- `>1.0.0` Greater than `1.0.0`
|
||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
||||
|
||||
Note that spaces between the operator and the version will be gracefully tolerated.
|
||||
|
||||
A `Range` can link multiple `Ranges` separated by space:
|
||||
|
||||
Ranges can be linked by logical AND:
|
||||
|
||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
||||
|
||||
Ranges can also be linked by logical OR:
|
||||
|
||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
||||
|
||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
|
||||
Ranges can be combined by both AND and OR
|
||||
|
||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
|
||||
Range usage:
|
||||
|
||||
```
|
||||
v, err := semver.Parse("1.2.3")
|
||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
||||
if range(v) {
|
||||
//valid
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Example
|
||||
-----
|
||||
|
||||
Have a look at full examples in [examples/main.go](examples/main.go)
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
|
||||
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
||||
fmt.Printf("Major: %d\n", v.Major)
|
||||
fmt.Printf("Minor: %d\n", v.Minor)
|
||||
fmt.Printf("Patch: %d\n", v.Patch)
|
||||
fmt.Printf("Pre: %s\n", v.Pre)
|
||||
fmt.Printf("Build: %s\n", v.Build)
|
||||
|
||||
// Prerelease versions array
|
||||
if len(v.Pre) > 0 {
|
||||
fmt.Println("Prerelease versions:")
|
||||
for i, pre := range v.Pre {
|
||||
fmt.Printf("%d: %q\n", i, pre)
|
||||
}
|
||||
}
|
||||
|
||||
// Build meta data array
|
||||
if len(v.Build) > 0 {
|
||||
fmt.Println("Build meta data:")
|
||||
for i, build := range v.Build {
|
||||
fmt.Printf("%d: %q\n", i, build)
|
||||
}
|
||||
}
|
||||
|
||||
v001, err := semver.Make("0.0.1")
|
||||
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
||||
v001.GT(v) == true
|
||||
v.LT(v001) == true
|
||||
v.GTE(v) == true
|
||||
v.LTE(v) == true
|
||||
|
||||
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
||||
v001.Compare(v) == 1
|
||||
v.Compare(v001) == -1
|
||||
v.Compare(v) == 0
|
||||
|
||||
// Manipulate Version in place:
|
||||
v.Pre[0], err = semver.NewPRVersion("beta")
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing pre release version: %q", err)
|
||||
}
|
||||
|
||||
fmt.Println("\nValidate versions:")
|
||||
v.Build[0] = "?"
|
||||
|
||||
err = v.Validate()
|
||||
if err != nil {
|
||||
fmt.Printf("Validation failed: %s\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Benchmarks
|
||||
-----
|
||||
|
||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
||||
|
||||
See benchmark cases at [semver_test.go](semver_test.go)
|
||||
|
||||
|
||||
Motivation
|
||||
-----
|
||||
|
||||
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
||||
|
||||
|
||||
Contribution
|
||||
-----
|
||||
|
||||
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
||||
|
||||
|
||||
License
|
||||
-----
|
||||
|
||||
See [LICENSE](LICENSE) file.
|
23
vendor/github.com/blang/semver/json.go
generated
vendored
Normal file
23
vendor/github.com/blang/semver/json.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// MarshalJSON implements the encoding/json.Marshaler interface.
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
|
||||
func (v *Version) UnmarshalJSON(data []byte) (err error) {
|
||||
var versionString string
|
||||
|
||||
if err = json.Unmarshal(data, &versionString); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
*v, err = Parse(versionString)
|
||||
|
||||
return
|
||||
}
|
17
vendor/github.com/blang/semver/package.json
generated
vendored
Normal file
17
vendor/github.com/blang/semver/package.json
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"author": "blang",
|
||||
"bugs": {
|
||||
"URL": "https://github.com/blang/semver/issues",
|
||||
"url": "https://github.com/blang/semver/issues"
|
||||
},
|
||||
"gx": {
|
||||
"dvcsimport": "github.com/blang/semver"
|
||||
},
|
||||
"gxVersion": "0.10.0",
|
||||
"language": "go",
|
||||
"license": "MIT",
|
||||
"name": "semver",
|
||||
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||
"version": "3.5.1"
|
||||
}
|
||||
|
416
vendor/github.com/blang/semver/range.go
generated
vendored
Normal file
416
vendor/github.com/blang/semver/range.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type wildcardType int
|
||||
|
||||
const (
|
||||
noneWildcard wildcardType = iota
|
||||
majorWildcard wildcardType = 1
|
||||
minorWildcard wildcardType = 2
|
||||
patchWildcard wildcardType = 3
|
||||
)
|
||||
|
||||
func wildcardTypefromInt(i int) wildcardType {
|
||||
switch i {
|
||||
case 1:
|
||||
return majorWildcard
|
||||
case 2:
|
||||
return minorWildcard
|
||||
case 3:
|
||||
return patchWildcard
|
||||
default:
|
||||
return noneWildcard
|
||||
}
|
||||
}
|
||||
|
||||
type comparator func(Version, Version) bool
|
||||
|
||||
var (
|
||||
compEQ comparator = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 0
|
||||
}
|
||||
compNE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) != 0
|
||||
}
|
||||
compGT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 1
|
||||
}
|
||||
compGE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) >= 0
|
||||
}
|
||||
compLT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == -1
|
||||
}
|
||||
compLE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) <= 0
|
||||
}
|
||||
)
|
||||
|
||||
type versionRange struct {
|
||||
v Version
|
||||
c comparator
|
||||
}
|
||||
|
||||
// rangeFunc creates a Range from the given versionRange.
|
||||
func (vr *versionRange) rangeFunc() Range {
|
||||
return Range(func(v Version) bool {
|
||||
return vr.c(v, vr.v)
|
||||
})
|
||||
}
|
||||
|
||||
// Range represents a range of versions.
|
||||
// A Range can be used to check if a Version satisfies it:
|
||||
//
|
||||
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
|
||||
// range(semver.MustParse("1.1.1") // returns true
|
||||
type Range func(Version) bool
|
||||
|
||||
// OR combines the existing Range with another Range using logical OR.
|
||||
func (rf Range) OR(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) || f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AND combines the existing Range with another Range using logical AND.
|
||||
func (rf Range) AND(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) && f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseRange parses a range and returns a Range.
|
||||
// If the range could not be parsed an error is returned.
|
||||
//
|
||||
// Valid ranges are:
|
||||
// - "<1.0.0"
|
||||
// - "<=1.0.0"
|
||||
// - ">1.0.0"
|
||||
// - ">=1.0.0"
|
||||
// - "1.0.0", "=1.0.0", "==1.0.0"
|
||||
// - "!1.0.0", "!=1.0.0"
|
||||
//
|
||||
// A Range can consist of multiple ranges separated by space:
|
||||
// Ranges can be linked by logical AND:
|
||||
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
|
||||
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
|
||||
//
|
||||
// Ranges can also be linked by logical OR:
|
||||
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
|
||||
//
|
||||
// AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
//
|
||||
// Ranges can be combined by both AND and OR
|
||||
//
|
||||
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
func ParseRange(s string) (Range, error) {
|
||||
parts := splitAndTrim(s)
|
||||
orParts, err := splitORParts(parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expandedParts, err := expandWildcardVersion(orParts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var orFn Range
|
||||
for _, p := range expandedParts {
|
||||
var andFn Range
|
||||
for _, ap := range p {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vr, err := buildVersionRange(opStr, vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
|
||||
}
|
||||
rf := vr.rangeFunc()
|
||||
|
||||
// Set function
|
||||
if andFn == nil {
|
||||
andFn = rf
|
||||
} else { // Combine with existing function
|
||||
andFn = andFn.AND(rf)
|
||||
}
|
||||
}
|
||||
if orFn == nil {
|
||||
orFn = andFn
|
||||
} else {
|
||||
orFn = orFn.OR(andFn)
|
||||
}
|
||||
|
||||
}
|
||||
return orFn, nil
|
||||
}
|
||||
|
||||
// splitORParts splits the already cleaned parts by '||'.
|
||||
// Checks for invalid positions of the operator and returns an
|
||||
// error if found.
|
||||
func splitORParts(parts []string) ([][]string, error) {
|
||||
var ORparts [][]string
|
||||
last := 0
|
||||
for i, p := range parts {
|
||||
if p == "||" {
|
||||
if i == 0 {
|
||||
return nil, fmt.Errorf("First element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:i])
|
||||
last = i + 1
|
||||
}
|
||||
}
|
||||
if last == len(parts) {
|
||||
return nil, fmt.Errorf("Last element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:])
|
||||
return ORparts, nil
|
||||
}
|
||||
|
||||
// buildVersionRange takes a slice of 2: operator and version
|
||||
// and builds a versionRange, otherwise an error.
|
||||
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
|
||||
c := parseComparator(opStr)
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
|
||||
}
|
||||
v, err := Parse(vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
|
||||
}
|
||||
|
||||
return &versionRange{
|
||||
v: v,
|
||||
c: c,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// inArray checks if a byte is contained in an array of bytes
|
||||
func inArray(s byte, list []byte) bool {
|
||||
for _, el := range list {
|
||||
if el == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// splitAndTrim splits a range string by spaces and cleans whitespaces
|
||||
func splitAndTrim(s string) (result []string) {
|
||||
last := 0
|
||||
var lastChar byte
|
||||
excludeFromSplit := []byte{'>', '<', '='}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
|
||||
if last < i-1 {
|
||||
result = append(result, s[last:i])
|
||||
}
|
||||
last = i + 1
|
||||
} else if s[i] != ' ' {
|
||||
lastChar = s[i]
|
||||
}
|
||||
}
|
||||
if last < len(s)-1 {
|
||||
result = append(result, s[last:])
|
||||
}
|
||||
|
||||
for i, v := range result {
|
||||
result[i] = strings.Replace(v, " ", "", -1)
|
||||
}
|
||||
|
||||
// parts := strings.Split(s, " ")
|
||||
// for _, x := range parts {
|
||||
// if s := strings.TrimSpace(x); len(s) != 0 {
|
||||
// result = append(result, s)
|
||||
// }
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
// splitComparatorVersion splits the comparator from the version.
|
||||
// Input must be free of leading or trailing spaces.
|
||||
func splitComparatorVersion(s string) (string, string, error) {
|
||||
i := strings.IndexFunc(s, unicode.IsDigit)
|
||||
if i == -1 {
|
||||
return "", "", fmt.Errorf("Could not get version from string: %q", s)
|
||||
}
|
||||
return strings.TrimSpace(s[0:i]), s[i:], nil
|
||||
}
|
||||
|
||||
// getWildcardType will return the type of wildcard that the
|
||||
// passed version contains
|
||||
func getWildcardType(vStr string) wildcardType {
|
||||
parts := strings.Split(vStr, ".")
|
||||
nparts := len(parts)
|
||||
wildcard := parts[nparts-1]
|
||||
|
||||
possibleWildcardType := wildcardTypefromInt(nparts)
|
||||
if wildcard == "x" {
|
||||
return possibleWildcardType
|
||||
}
|
||||
|
||||
return noneWildcard
|
||||
}
|
||||
|
||||
// createVersionFromWildcard will convert a wildcard version
|
||||
// into a regular version, replacing 'x's with '0's, handling
|
||||
// special cases like '1.x.x' and '1.x'
|
||||
func createVersionFromWildcard(vStr string) string {
|
||||
// handle 1.x.x
|
||||
vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
|
||||
vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
|
||||
parts := strings.Split(vStr2, ".")
|
||||
|
||||
// handle 1.x
|
||||
if len(parts) == 2 {
|
||||
return vStr2 + ".0"
|
||||
}
|
||||
|
||||
return vStr2
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the major version
|
||||
// of the passed version
|
||||
func incrementMajorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[0] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the minor version
|
||||
// of the passed version
|
||||
func incrementMinorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[1] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// expandWildcardVersion will expand wildcards inside versions
|
||||
// following these rules:
|
||||
//
|
||||
// * when dealing with patch wildcards:
|
||||
// >= 1.2.x will become >= 1.2.0
|
||||
// <= 1.2.x will become < 1.3.0
|
||||
// > 1.2.x will become >= 1.3.0
|
||||
// < 1.2.x will become < 1.2.0
|
||||
// != 1.2.x will become < 1.2.0 >= 1.3.0
|
||||
//
|
||||
// * when dealing with minor wildcards:
|
||||
// >= 1.x will become >= 1.0.0
|
||||
// <= 1.x will become < 2.0.0
|
||||
// > 1.x will become >= 2.0.0
|
||||
// < 1.0 will become < 1.0.0
|
||||
// != 1.x will become < 1.0.0 >= 2.0.0
|
||||
//
|
||||
// * when dealing with wildcards without
|
||||
// version operator:
|
||||
// 1.2.x will become >= 1.2.0 < 1.3.0
|
||||
// 1.x will become >= 1.0.0 < 2.0.0
|
||||
func expandWildcardVersion(parts [][]string) ([][]string, error) {
|
||||
var expandedParts [][]string
|
||||
for _, p := range parts {
|
||||
var newParts []string
|
||||
for _, ap := range p {
|
||||
if strings.Index(ap, "x") != -1 {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionWildcardType := getWildcardType(vStr)
|
||||
flatVersion := createVersionFromWildcard(vStr)
|
||||
|
||||
var resultOperator string
|
||||
var shouldIncrementVersion bool
|
||||
switch opStr {
|
||||
case ">":
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
case ">=":
|
||||
resultOperator = ">="
|
||||
case "<":
|
||||
resultOperator = "<"
|
||||
case "<=":
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "", "=", "==":
|
||||
newParts = append(newParts, ">="+flatVersion)
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "!=", "!":
|
||||
newParts = append(newParts, "<"+flatVersion)
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
}
|
||||
|
||||
var resultVersion string
|
||||
if shouldIncrementVersion {
|
||||
switch versionWildcardType {
|
||||
case patchWildcard:
|
||||
resultVersion, _ = incrementMinorVersion(flatVersion)
|
||||
case minorWildcard:
|
||||
resultVersion, _ = incrementMajorVersion(flatVersion)
|
||||
}
|
||||
} else {
|
||||
resultVersion = flatVersion
|
||||
}
|
||||
|
||||
ap = resultOperator + resultVersion
|
||||
}
|
||||
newParts = append(newParts, ap)
|
||||
}
|
||||
expandedParts = append(expandedParts, newParts)
|
||||
}
|
||||
|
||||
return expandedParts, nil
|
||||
}
|
||||
|
||||
func parseComparator(s string) comparator {
|
||||
switch s {
|
||||
case "==":
|
||||
fallthrough
|
||||
case "":
|
||||
fallthrough
|
||||
case "=":
|
||||
return compEQ
|
||||
case ">":
|
||||
return compGT
|
||||
case ">=":
|
||||
return compGE
|
||||
case "<":
|
||||
return compLT
|
||||
case "<=":
|
||||
return compLE
|
||||
case "!":
|
||||
fallthrough
|
||||
case "!=":
|
||||
return compNE
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
|
||||
func MustParseRange(s string) Range {
|
||||
r, err := ParseRange(s)
|
||||
if err != nil {
|
||||
panic(`semver: ParseRange(` + s + `): ` + err.Error())
|
||||
}
|
||||
return r
|
||||
}
|
418
vendor/github.com/blang/semver/semver.go
generated
vendored
Normal file
418
vendor/github.com/blang/semver/semver.go
generated
vendored
Normal file
@@ -0,0 +1,418 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
numbers string = "0123456789"
|
||||
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||
alphanum = alphas + numbers
|
||||
)
|
||||
|
||||
// SpecVersion is the latest fully supported spec version of semver
|
||||
var SpecVersion = Version{
|
||||
Major: 2,
|
||||
Minor: 0,
|
||||
Patch: 0,
|
||||
}
|
||||
|
||||
// Version represents a semver compatible version
|
||||
type Version struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
Patch uint64
|
||||
Pre []PRVersion
|
||||
Build []string //No Precendence
|
||||
}
|
||||
|
||||
// Version to string
|
||||
func (v Version) String() string {
|
||||
b := make([]byte, 0, 5)
|
||||
b = strconv.AppendUint(b, v.Major, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Minor, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Patch, 10)
|
||||
|
||||
if len(v.Pre) > 0 {
|
||||
b = append(b, '-')
|
||||
b = append(b, v.Pre[0].String()...)
|
||||
|
||||
for _, pre := range v.Pre[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, pre.String()...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Build) > 0 {
|
||||
b = append(b, '+')
|
||||
b = append(b, v.Build[0]...)
|
||||
|
||||
for _, build := range v.Build[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, build...)
|
||||
}
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Equals checks if v is equal to o.
|
||||
func (v Version) Equals(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// EQ checks if v is equal to o.
|
||||
func (v Version) EQ(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// NE checks if v is not equal to o.
|
||||
func (v Version) NE(o Version) bool {
|
||||
return (v.Compare(o) != 0)
|
||||
}
|
||||
|
||||
// GT checks if v is greater than o.
|
||||
func (v Version) GT(o Version) bool {
|
||||
return (v.Compare(o) == 1)
|
||||
}
|
||||
|
||||
// GTE checks if v is greater than or equal to o.
|
||||
func (v Version) GTE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// GE checks if v is greater than or equal to o.
|
||||
func (v Version) GE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// LT checks if v is less than o.
|
||||
func (v Version) LT(o Version) bool {
|
||||
return (v.Compare(o) == -1)
|
||||
}
|
||||
|
||||
// LTE checks if v is less than or equal to o.
|
||||
func (v Version) LTE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// LE checks if v is less than or equal to o.
|
||||
func (v Version) LE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// Compare compares Versions v to o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v Version) Compare(o Version) int {
|
||||
if v.Major != o.Major {
|
||||
if v.Major > o.Major {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Minor != o.Minor {
|
||||
if v.Minor > o.Minor {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Patch != o.Patch {
|
||||
if v.Patch > o.Patch {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Quick comparison if a version has no prerelease versions
|
||||
if len(v.Pre) == 0 && len(o.Pre) == 0 {
|
||||
return 0
|
||||
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
|
||||
return 1
|
||||
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
|
||||
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
|
||||
continue
|
||||
} else if comp == 1 {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// If all pr versions are the equal but one has further prversion, this one greater
|
||||
if i == len(v.Pre) && i == len(o.Pre) {
|
||||
return 0
|
||||
} else if i == len(v.Pre) && i < len(o.Pre) {
|
||||
return -1
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Validate validates v and returns error in case
|
||||
func (v Version) Validate() error {
|
||||
// Major, Minor, Patch already validated using uint64
|
||||
|
||||
for _, pre := range v.Pre {
|
||||
if !pre.IsNum { //Numeric prerelease versions already uint64
|
||||
if len(pre.VersionStr) == 0 {
|
||||
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
|
||||
}
|
||||
if !containsOnly(pre.VersionStr, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, build := range v.Build {
|
||||
if len(build) == 0 {
|
||||
return fmt.Errorf("Build meta data can not be empty %q", build)
|
||||
}
|
||||
if !containsOnly(build, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
||||
func New(s string) (vp *Version, err error) {
|
||||
v, err := Parse(s)
|
||||
vp = &v
|
||||
return
|
||||
}
|
||||
|
||||
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
||||
func Make(s string) (Version, error) {
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
|
||||
// specs to be parsed by this library. It does so by normalizing versions before passing them to
|
||||
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
|
||||
// with only major and minor components specified
|
||||
func ParseTolerant(s string) (Version, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
s = strings.TrimPrefix(s, "v")
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) < 3 {
|
||||
if strings.ContainsAny(parts[len(parts)-1], "+-") {
|
||||
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
|
||||
}
|
||||
for len(parts) < 3 {
|
||||
parts = append(parts, "0")
|
||||
}
|
||||
s = strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses version string and returns a validated Version or error
|
||||
func Parse(s string) (Version, error) {
|
||||
if len(s) == 0 {
|
||||
return Version{}, errors.New("Version string empty")
|
||||
}
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) != 3 {
|
||||
return Version{}, errors.New("No Major.Minor.Patch elements found")
|
||||
}
|
||||
|
||||
// Major
|
||||
if !containsOnly(parts[0], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
|
||||
}
|
||||
if hasLeadingZeroes(parts[0]) {
|
||||
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
|
||||
}
|
||||
major, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
// Minor
|
||||
if !containsOnly(parts[1], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
|
||||
}
|
||||
if hasLeadingZeroes(parts[1]) {
|
||||
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
|
||||
}
|
||||
minor, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v := Version{}
|
||||
v.Major = major
|
||||
v.Minor = minor
|
||||
|
||||
var build, prerelease []string
|
||||
patchStr := parts[2]
|
||||
|
||||
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
|
||||
build = strings.Split(patchStr[buildIndex+1:], ".")
|
||||
patchStr = patchStr[:buildIndex]
|
||||
}
|
||||
|
||||
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
|
||||
prerelease = strings.Split(patchStr[preIndex+1:], ".")
|
||||
patchStr = patchStr[:preIndex]
|
||||
}
|
||||
|
||||
if !containsOnly(patchStr, numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
|
||||
}
|
||||
if hasLeadingZeroes(patchStr) {
|
||||
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
|
||||
}
|
||||
patch, err := strconv.ParseUint(patchStr, 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v.Patch = patch
|
||||
|
||||
// Prerelease
|
||||
for _, prstr := range prerelease {
|
||||
parsedPR, err := NewPRVersion(prstr)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
v.Pre = append(v.Pre, parsedPR)
|
||||
}
|
||||
|
||||
// Build meta data
|
||||
for _, str := range build {
|
||||
if len(str) == 0 {
|
||||
return Version{}, errors.New("Build meta data is empty")
|
||||
}
|
||||
if !containsOnly(str, alphanum) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
|
||||
}
|
||||
v.Build = append(v.Build, str)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the version cannot be parsed.
|
||||
func MustParse(s string) Version {
|
||||
v, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`semver: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// PRVersion represents a PreRelease Version
|
||||
type PRVersion struct {
|
||||
VersionStr string
|
||||
VersionNum uint64
|
||||
IsNum bool
|
||||
}
|
||||
|
||||
// NewPRVersion creates a new valid prerelease version
|
||||
func NewPRVersion(s string) (PRVersion, error) {
|
||||
if len(s) == 0 {
|
||||
return PRVersion{}, errors.New("Prerelease is empty")
|
||||
}
|
||||
v := PRVersion{}
|
||||
if containsOnly(s, numbers) {
|
||||
if hasLeadingZeroes(s) {
|
||||
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
|
||||
}
|
||||
num, err := strconv.ParseUint(s, 10, 64)
|
||||
|
||||
// Might never be hit, but just in case
|
||||
if err != nil {
|
||||
return PRVersion{}, err
|
||||
}
|
||||
v.VersionNum = num
|
||||
v.IsNum = true
|
||||
} else if containsOnly(s, alphanum) {
|
||||
v.VersionStr = s
|
||||
v.IsNum = false
|
||||
} else {
|
||||
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IsNumeric checks if prerelease-version is numeric
|
||||
func (v PRVersion) IsNumeric() bool {
|
||||
return v.IsNum
|
||||
}
|
||||
|
||||
// Compare compares two PreRelease Versions v and o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v PRVersion) Compare(o PRVersion) int {
|
||||
if v.IsNum && !o.IsNum {
|
||||
return -1
|
||||
} else if !v.IsNum && o.IsNum {
|
||||
return 1
|
||||
} else if v.IsNum && o.IsNum {
|
||||
if v.VersionNum == o.VersionNum {
|
||||
return 0
|
||||
} else if v.VersionNum > o.VersionNum {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
} else { // both are Alphas
|
||||
if v.VersionStr == o.VersionStr {
|
||||
return 0
|
||||
} else if v.VersionStr > o.VersionStr {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PreRelease version to string
|
||||
func (v PRVersion) String() string {
|
||||
if v.IsNum {
|
||||
return strconv.FormatUint(v.VersionNum, 10)
|
||||
}
|
||||
return v.VersionStr
|
||||
}
|
||||
|
||||
func containsOnly(s string, set string) bool {
|
||||
return strings.IndexFunc(s, func(r rune) bool {
|
||||
return !strings.ContainsRune(set, r)
|
||||
}) == -1
|
||||
}
|
||||
|
||||
func hasLeadingZeroes(s string) bool {
|
||||
return len(s) > 1 && s[0] == '0'
|
||||
}
|
||||
|
||||
// NewBuildVersion creates a new valid build version
|
||||
func NewBuildVersion(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return "", errors.New("Buildversion is empty")
|
||||
}
|
||||
if !containsOnly(s, alphanum) {
|
||||
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
|
||||
}
|
||||
return s, nil
|
||||
}
|
28
vendor/github.com/blang/semver/sort.go
generated
vendored
Normal file
28
vendor/github.com/blang/semver/sort.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Versions represents multiple versions.
|
||||
type Versions []Version
|
||||
|
||||
// Len returns length of version collection
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps two versions inside the collection by its indices
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less checks if version at index i is less than version at index j
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LT(s[j])
|
||||
}
|
||||
|
||||
// Sort sorts a slice of versions
|
||||
func Sort(versions []Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
30
vendor/github.com/blang/semver/sql.go
generated
vendored
Normal file
30
vendor/github.com/blang/semver/sql.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements the database/sql.Scanner interface.
|
||||
func (v *Version) Scan(src interface{}) (err error) {
|
||||
var str string
|
||||
switch src := src.(type) {
|
||||
case string:
|
||||
str = src
|
||||
case []byte:
|
||||
str = string(src)
|
||||
default:
|
||||
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
||||
}
|
||||
|
||||
if t, err := Parse(str); err == nil {
|
||||
*v = t
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the database/sql/driver.Valuer interface.
|
||||
func (v Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# xxhash
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
Normal file
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.11
|
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
Normal file
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
Normal file
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
Normal file
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,215 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
20
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
20
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
@@ -26,6 +26,11 @@ import (
|
||||
|
||||
// Returns an object representing the current OS thread's network namespace
|
||||
func GetCurrentNS() (NetNS, error) {
|
||||
// Lock the thread in case other goroutine executes in it and changes its
|
||||
// network namespace after getCurrentThreadNetNSPath(), otherwise it might
|
||||
// return an unexpected network namespace.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
return GetNS(getCurrentThreadNetNSPath())
|
||||
}
|
||||
|
||||
@@ -178,7 +183,16 @@ func (ns *netNS) Do(toRun func(NetNS) error) error {
|
||||
if err = ns.Set(); err != nil {
|
||||
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
|
||||
}
|
||||
defer threadNS.Set() // switch back
|
||||
defer func() {
|
||||
err := threadNS.Set() // switch back
|
||||
if err == nil {
|
||||
// Unlock the current thread only when we successfully switched back
|
||||
// to the original namespace; otherwise leave the thread locked which
|
||||
// will force the runtime to scrap the current thread, that is maybe
|
||||
// not as optimal but at least always safe to do.
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
}()
|
||||
|
||||
return toRun(hostNS)
|
||||
}
|
||||
@@ -193,6 +207,10 @@ func (ns *netNS) Do(toRun func(NetNS) error) error {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Start the callback in a new green thread so that if we later fail
|
||||
// to switch the namespace back to the original one, we can safely
|
||||
// leave the thread locked to die without a risk of the current thread
|
||||
// left lingering with incorrect namespace.
|
||||
var innerError error
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
60
vendor/github.com/containernetworking/plugins/pkg/testutils/dns.go
generated
vendored
Normal file
60
vendor/github.com/containernetworking/plugins/pkg/testutils/dns.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2019 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
)
|
||||
|
||||
// TmpResolvConf will create a temporary file and write the provided DNS settings to
|
||||
// it in the resolv.conf format. It returns the path of the created temporary file or
|
||||
// an error if any occurs while creating/writing the file. It is the caller's
|
||||
// responsibility to remove the file.
|
||||
func TmpResolvConf(dnsConf types.DNS) (string, error) {
|
||||
f, err := ioutil.TempFile("", "cni_test_resolv.conf")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get temp file for CNI test resolv.conf: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
path := f.Name()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
}()
|
||||
|
||||
// see "man 5 resolv.conf" for the format of resolv.conf
|
||||
var resolvConfLines []string
|
||||
for _, nameserver := range dnsConf.Nameservers {
|
||||
resolvConfLines = append(resolvConfLines, fmt.Sprintf("nameserver %s", nameserver))
|
||||
}
|
||||
resolvConfLines = append(resolvConfLines, fmt.Sprintf("domain %s", dnsConf.Domain))
|
||||
resolvConfLines = append(resolvConfLines, fmt.Sprintf("search %s", strings.Join(dnsConf.Search, " ")))
|
||||
resolvConfLines = append(resolvConfLines, fmt.Sprintf("options %s", strings.Join(dnsConf.Options, " ")))
|
||||
|
||||
resolvConf := strings.Join(resolvConfLines, "\n")
|
||||
_, err = f.Write([]byte(resolvConf))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to write temp resolv.conf for CNI test: %v", err)
|
||||
}
|
||||
|
||||
return path, err
|
||||
}
|
23
vendor/github.com/containernetworking/plugins/pkg/testutils/netns_linux.go
generated
vendored
23
vendor/github.com/containernetworking/plugins/pkg/testutils/netns_linux.go
generated
vendored
@@ -22,17 +22,36 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const nsRunDir = "/var/run/netns"
|
||||
func getNsRunDir() string {
|
||||
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
|
||||
/// If XDG_RUNTIME_DIR is set, check if the current user owns /var/run. If
|
||||
// the owner is different, we are most likely running in a user namespace.
|
||||
// In that case use $XDG_RUNTIME_DIR/netns as runtime dir.
|
||||
if xdgRuntimeDir != "" {
|
||||
if s, err := os.Stat("/var/run"); err == nil {
|
||||
st, ok := s.Sys().(*syscall.Stat_t)
|
||||
if ok && int(st.Uid) != os.Geteuid() {
|
||||
return path.Join(xdgRuntimeDir, "netns")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "/var/run/netns"
|
||||
}
|
||||
|
||||
// Creates a new persistent (bind-mounted) network namespace and returns an object
|
||||
// representing that namespace, without switching to it.
|
||||
func NewNS() (ns.NetNS, error) {
|
||||
|
||||
nsRunDir := getNsRunDir()
|
||||
|
||||
b := make([]byte, 16)
|
||||
_, err := rand.Reader.Read(b)
|
||||
if err != nil {
|
||||
@@ -135,7 +154,7 @@ func NewNS() (ns.NetNS, error) {
|
||||
func UnmountNS(ns ns.NetNS) error {
|
||||
nsPath := ns.Path()
|
||||
// Only unmount if it's been bind-mounted (don't touch namespaces in /proc...)
|
||||
if strings.HasPrefix(nsPath, nsRunDir) {
|
||||
if strings.HasPrefix(nsPath, getNsRunDir()) {
|
||||
if err := unix.Unmount(nsPath, 0); err != nil {
|
||||
return fmt.Errorf("failed to unmount NS: at %s: %v", nsPath, err)
|
||||
}
|
||||
|
7
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
7
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
@@ -1,8 +1,8 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7
|
||||
- 1.14
|
||||
- 1.13
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
@@ -11,6 +11,9 @@ install:
|
||||
script:
|
||||
- go get
|
||||
- go test -cover ./...
|
||||
- cd ./v5
|
||||
- go get
|
||||
- go test -cover ./...
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
2
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
2
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
@@ -6,7 +6,7 @@ modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
|
11
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
11
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
@@ -1,5 +1,5 @@
|
||||
# JSON-Patch
|
||||
`jsonpatch` is a library which provides functionallity for both applying
|
||||
`jsonpatch` is a library which provides functionality for both applying
|
||||
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
|
||||
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
@@ -11,10 +11,11 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie
|
||||
|
||||
**Latest and greatest**:
|
||||
```bash
|
||||
go get -u github.com/evanphx/json-patch
|
||||
go get -u github.com/evanphx/json-patch/v5
|
||||
```
|
||||
|
||||
**Stable Versions**:
|
||||
* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
|
||||
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
|
||||
|
||||
(previous versions below `v3` are unavailable)
|
||||
@@ -82,7 +83,7 @@ When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
patch document: {"height":null,"name":"Jane"}
|
||||
updated tina doc: {"age":28,"name":"Jane"}
|
||||
updated alternative doc: {"age":28,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Create and apply a JSON Patch
|
||||
@@ -164,7 +165,7 @@ func main() {
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(original, different) {
|
||||
fmt.Println(`"original" is _not_ structurally equal to "similar"`)
|
||||
fmt.Println(`"original" is _not_ structurally equal to "different"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -173,7 +174,7 @@ When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
"original" is structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "different"
|
||||
```
|
||||
|
||||
## Combine merge patches
|
||||
|
13
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
13
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
@@ -307,13 +307,16 @@ func matchesValue(av, bv interface{}) bool {
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
for key := range at {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
if len(bt) != len(at) {
|
||||
return false
|
||||
}
|
||||
for key := range bt {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
av, aOK := at[key]
|
||||
bv, bOK := bt[key]
|
||||
if aOK != bOK {
|
||||
return false
|
||||
}
|
||||
if !matchesValue(av, bv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
314
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
314
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,6 +26,14 @@ var (
|
||||
AccumulatedCopySizeLimit int64 = 0
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrMissing = errors.New("missing value")
|
||||
ErrUnknownType = errors.New("unknown object type")
|
||||
ErrInvalid = errors.New("invalid state detected")
|
||||
ErrInvalidIndex = errors.New("invalid index referenced")
|
||||
)
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
@@ -31,10 +41,11 @@ type lazyNode struct {
|
||||
which int
|
||||
}
|
||||
|
||||
type operation map[string]*json.RawMessage
|
||||
// Operation is a single JSON-Patch step, such as a single 'add' operation.
|
||||
type Operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of operations.
|
||||
type Patch []operation
|
||||
// Patch is an ordered collection of Operations.
|
||||
type Patch []Operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
@@ -59,7 +70,7 @@ func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown type")
|
||||
return nil, ErrUnknownType
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,7 +102,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
@@ -110,7 +121,7 @@ func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
@@ -191,6 +202,10 @@ func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.doc) != len(o.doc) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
@@ -198,6 +213,10 @@ func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if (v == nil) != (ov == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
@@ -227,7 +246,8 @@ func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o operation) kind() string {
|
||||
// Kind reads the "op" field of the Operation.
|
||||
func (o Operation) Kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
@@ -243,39 +263,41 @@ func (o operation) kind() string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) path() string {
|
||||
// Path reads the "path" field of the Operation.
|
||||
func (o Operation) Path() (string, error) {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
return "unknown", err
|
||||
}
|
||||
|
||||
return op
|
||||
return op, nil
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
|
||||
}
|
||||
|
||||
func (o operation) from() string {
|
||||
// From reads the "from" field of the Operation.
|
||||
func (o Operation) From() (string, error) {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
return "unknown", err
|
||||
}
|
||||
|
||||
return op
|
||||
return op, nil
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
|
||||
}
|
||||
|
||||
func (o operation) value() *lazyNode {
|
||||
func (o Operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
@@ -283,6 +305,23 @@ func (o operation) value() *lazyNode {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueInterface decodes the operation value into an interface.
|
||||
func (o Operation) ValueInterface() (interface{}, error) {
|
||||
if obj, ok := o["value"]; ok && obj != nil {
|
||||
var v interface{}
|
||||
|
||||
err := json.Unmarshal(*obj, &v)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
@@ -359,7 +398,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
|
||||
return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
@@ -385,7 +424,7 @@ func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
|
||||
}
|
||||
|
||||
sz := len(*d) + 1
|
||||
@@ -395,17 +434,17 @@ func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(ary)
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
@@ -424,7 +463,7 @@ func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
@@ -439,17 +478,17 @@ func (d *partialArray) remove(key string) error {
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(cur)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
@@ -462,140 +501,189 @@ func (d *partialArray) remove(key string) error {
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.add(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.remove(key)
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
func (p Patch) add(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
err = con.add(key, op.value())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in add for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error in remove for path: '%s'", path)
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.add(key, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
func (p Patch) replace(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "replace operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
|
||||
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
err = con.set(key, op.value())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in remove for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op Operation) error {
|
||||
from, err := op.From()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "move operation failed to decode from")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", key)
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", key)
|
||||
}
|
||||
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "move operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
err = con.add(key, val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "test operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in test for path: '%s'", path)
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
|
||||
from := op.from()
|
||||
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
|
||||
from, err := op.From()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copy operation failed to decode from")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error in copy for from: '%s'", from)
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
valCopy, sz, err := deepCopy(val)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error while performing deep copy")
|
||||
}
|
||||
|
||||
(*accumulatedCopySize) += int64(sz)
|
||||
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
|
||||
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
|
||||
}
|
||||
|
||||
return con.add(key, valCopy)
|
||||
err = con.add(key, valCopy)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error while adding value during copy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
@@ -651,7 +739,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
var accumulatedCopySize int64
|
||||
|
||||
for _, op := range p {
|
||||
switch op.kind() {
|
||||
switch op.Kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
@@ -665,7 +753,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
case "copy":
|
||||
err = p.copy(&pd, op, &accumulatedCopySize)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.kind())
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.Kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
root = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
go.sum linguist-generated
|
36
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
Normal file
36
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "stable"
|
||||
- "1.11.x"
|
||||
- "1.10.x"
|
||||
- "1.9.x"
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: "stable"
|
||||
env: GOLINT=true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
|
||||
before_install:
|
||||
- if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
|
||||
|
||||
script:
|
||||
- go test --race ./...
|
||||
|
||||
after_script:
|
||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||
- if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
|
||||
- go vet ./...
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
- windows
|
||||
|
||||
notifications:
|
||||
email: false
|
0
vendor/gopkg.in/fsnotify.v1/AUTHORS → vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
0
vendor/gopkg.in/fsnotify.v1/AUTHORS → vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
2
vendor/gopkg.in/fsnotify.v1/LICENSE → vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
2
vendor/gopkg.in/fsnotify.v1/LICENSE → vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
@@ -1,5 +1,5 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
71
vendor/gopkg.in/fsnotify.v1/README.md → vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
71
vendor/gopkg.in/fsnotify.v1/README.md → vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@@ -10,16 +10,16 @@ go get -u golang.org/x/sys/...
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
|Adapter |OS |Status |
|
||||
|----------|----------|----------|
|
||||
|inotify |Linux 2.6.27 or later, Android\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|kqueue |BSD, macOS, iOS\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|
||||
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|
||||
|fanotify |Linux 2.6.37+ | |
|
||||
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|
||||
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| inotify | Linux 2.6.27 or later, Android\* | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
|
||||
| kqueue | BSD, macOS, iOS\* | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
|
||||
| ReadDirectoryChangesW | Windows | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
|
||||
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
@@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add("/tmp/foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
@@ -65,6 +112,10 @@ There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
|
||||
|
||||
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
0
vendor/gopkg.in/fsnotify.v1/fen.go → vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
0
vendor/gopkg.in/fsnotify.v1/fen.go → vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
@@ -63,4 +63,6 @@ func (e Event) String() string {
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
var (
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
)
|
5
vendor/github.com/fsnotify/fsnotify/go.mod
generated
vendored
Normal file
5
vendor/github.com/fsnotify/fsnotify/go.mod
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
module github.com/fsnotify/fsnotify
|
||||
|
||||
go 1.13
|
||||
|
||||
require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9
|
2
vendor/github.com/fsnotify/fsnotify/go.sum
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/go.sum
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
@@ -40,12 +40,12 @@ func newFdPoller(fd int) (*fdPoller, error) {
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(0)
|
||||
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
@@ -8,4 +8,4 @@ package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
@@ -9,4 +9,4 @@ package fsnotify
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY
|
||||
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
183
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
183
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
# A more minimal logging API for Go
|
||||
|
||||
Before you consider this package, please read [this blog post by the
|
||||
inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what
|
||||
he has to say, and it largely aligns with my own experiences. Too many
|
||||
choices of levels means inconsistent logs.
|
||||
|
||||
This package offers a purely abstract interface, based on these ideas but with
|
||||
a few twists. Code can depend on just this interface and have the actual
|
||||
logging implementation be injected from callers. Ideally only `main()` knows
|
||||
what logging implementation is being used.
|
||||
|
||||
# Differences from Dave's ideas
|
||||
|
||||
The main differences are:
|
||||
|
||||
1) Dave basically proposes doing away with the notion of a logging API in favor
|
||||
of `fmt.Printf()`. I disagree, especially when you consider things like output
|
||||
locations, timestamps, file and line decorations, and structured logging. I
|
||||
restrict the API to just 2 types of logs: info and error.
|
||||
|
||||
Info logs are things you want to tell the user which are not errors. Error
|
||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||
function call and is logging that `error` *and not returning it*, use error
|
||||
logs.
|
||||
|
||||
2) Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||
arbitrary grades of importance for info logs, without assigning names with
|
||||
semantic meaning such as "warning", "trace", and "debug". Superficially this
|
||||
may feel very similar, but the primary difference is the lack of semantics.
|
||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||
with higher verbosity means more (and less important) logs will be generated.
|
||||
|
||||
This is a BETA grade API.
|
||||
|
||||
There are implementations for the following logging libraries:
|
||||
|
||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||
- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr)
|
||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||
- **log** (the Go standard library logger):
|
||||
[stdr](https://github.com/go-logr/stdr)
|
||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||
|
||||
# FAQ
|
||||
|
||||
## Conceptual
|
||||
|
||||
## Why structured logging?
|
||||
|
||||
- **Structured logs are more easily queriable**: Since you've got
|
||||
key-value pairs, it's much easier to query your structured logs for
|
||||
particular values by filtering on the contents of a particular key --
|
||||
think searching request logs for error codes, Kubernetes reconcilers for
|
||||
the name and namespace of the reconciled object, etc
|
||||
|
||||
- **Structured logging makes it easier to have cross-referencable logs**:
|
||||
Similarly to searchability, if you maintain conventions around your
|
||||
keys, it becomes easy to gather all log lines related to a particular
|
||||
concept.
|
||||
|
||||
- **Structured logs allow better dimensions of filtering**: if you have
|
||||
structure to your logs, you've got more precise control over how much
|
||||
information is logged -- you might choose in a particular configuration
|
||||
to log certain keys but not others, only log lines where a certain key
|
||||
matches a certain value, etc, instead of just having v-levels and names
|
||||
to key off of.
|
||||
|
||||
- **Structured logs better represent structured data**: sometimes, the
|
||||
data that you want to log is inherently structured (think tuple-link
|
||||
objects). Structured logs allow you to preserve that structure when
|
||||
outputting.
|
||||
|
||||
## Why V-levels?
|
||||
|
||||
**V-levels give operators an easy way to control the chattiness of log
|
||||
operations**. V-levels provide a way for a given package to distinguish
|
||||
the relative importance or verbosity of a given log message. Then, if
|
||||
a particular logger or package is logging too many messages, the user
|
||||
of the package can simply change the v-levels for that library.
|
||||
|
||||
## Why not more named levels, like Warning?
|
||||
|
||||
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||
from Dave's ideas](#differences-from-daves-ideas).
|
||||
|
||||
## Why not allow format strings, too?
|
||||
|
||||
**Format strings negate many of the benefits of structured logs**:
|
||||
|
||||
- They're not easily searchable without resorting to fuzzy searching,
|
||||
regular expressions, etc
|
||||
|
||||
- They don't store structured data well, since contents are flattened into
|
||||
a string
|
||||
|
||||
- They're not cross-referencable
|
||||
|
||||
- They don't compress easily, since the message is not constant
|
||||
|
||||
(unless you turn positional parameters into key-value pairs with numerical
|
||||
keys, at which point you've gotten key-value logging with meaningless
|
||||
keys)
|
||||
|
||||
## Practical
|
||||
|
||||
## Why key-value pairs, and not a map?
|
||||
|
||||
Key-value pairs are *much* easier to optimize, especially around
|
||||
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||
that show this quite nicely.
|
||||
|
||||
While the interface ends up being a little less obvious, you get
|
||||
potentially better performance, plus avoid making users type
|
||||
`map[string]string{}` every time they want to log.
|
||||
|
||||
## What if my V-levels differ between libraries?
|
||||
|
||||
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||
`WithName` function to pass different loggers to different libraries.
|
||||
|
||||
Generally, you should take care to ensure that you have relatively
|
||||
consistent V-levels within a given logger, however, as this makes deciding
|
||||
on what verbosity of logs to request easier.
|
||||
|
||||
## But I *really* want to use a format string!
|
||||
|
||||
That's not actually a question. Assuming your question is "how do
|
||||
I convert my mental model of logging with format strings to logging with
|
||||
constant messages":
|
||||
|
||||
1. figure out what the error actually is, as you'd write in a TL;DR style,
|
||||
and use that as a message
|
||||
|
||||
2. For every place you'd write a format specifier, look to the word before
|
||||
it, and add that as a key value pair
|
||||
|
||||
For instance, consider the following examples (all taken from spots in the
|
||||
Kubernetes codebase):
|
||||
|
||||
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||
error", "code", responseCode)`
|
||||
|
||||
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||
response when requesting url", "attempt", retries, "after
|
||||
seconds", seconds, "url", url)`
|
||||
|
||||
If you *really* must use a format string, place it as a key value, and
|
||||
call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to
|
||||
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||
this is necessary should be few and far between.
|
||||
|
||||
## How do I choose my V-levels?
|
||||
|
||||
This is basically the only hard constraint: increase V-levels to denote
|
||||
more verbose or more debug-y logs.
|
||||
|
||||
Otherwise, you can start out with `0` as "you always want to see this",
|
||||
`1` as "common logging that you might *possibly* want to turn off", and
|
||||
`10` as "I would like to performance-test your log collection stack".
|
||||
|
||||
Then gradually choose levels in between as you need them, working your way
|
||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||
info-type logs).
|
||||
|
||||
## How do I choose my keys
|
||||
|
||||
- make your keys human-readable
|
||||
- constant keys are generally a good idea
|
||||
- be consistent across your codebase
|
||||
- keys should naturally match parts of the message string
|
||||
|
||||
While key names are mostly unrestricted (and spaces are acceptable),
|
||||
it's generally a good idea to stick to printable ascii characters, or at
|
||||
least match the general character set of your log lines.
|
||||
|
||||
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
51
vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
51
vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright 2020 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// Discard returns a valid Logger that discards all messages logged to it.
|
||||
// It can be used whenever the caller is not interested in the logs.
|
||||
func Discard() Logger {
|
||||
return DiscardLogger{}
|
||||
}
|
||||
|
||||
// DiscardLogger is a Logger that discards all messages.
|
||||
type DiscardLogger struct{}
|
||||
|
||||
func (l DiscardLogger) Enabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
}
|
||||
|
||||
func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
}
|
||||
|
||||
func (l DiscardLogger) V(level int) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l DiscardLogger) WithName(name string) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
// Verify that it actually implements the interface
|
||||
var _ Logger = DiscardLogger{}
|
3
vendor/github.com/go-logr/logr/go.mod
generated
vendored
Normal file
3
vendor/github.com/go-logr/logr/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/go-logr/logr
|
||||
|
||||
go 1.14
|
266
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
266
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
@@ -0,0 +1,266 @@
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This design derives from Dave Cheney's blog:
|
||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||
//
|
||||
// This is a BETA grade API. Until there is a significant 2nd implementation,
|
||||
// I don't really know how it will change.
|
||||
|
||||
// Package logr defines abstract interfaces for logging. Packages can depend on
|
||||
// these interfaces and callers can implement logging in whatever way is
|
||||
// appropriate.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// Logging is done using a Logger. Loggers can have name prefixes and named
|
||||
// values attached, so that all log messages logged with that Logger have some
|
||||
// base context associated.
|
||||
//
|
||||
// The term "key" is used to refer to the name associated with a particular
|
||||
// value, to disambiguate it from the general Logger name.
|
||||
//
|
||||
// For instance, suppose we're trying to reconcile the state of an object, and
|
||||
// we want to log that we've made some decision.
|
||||
//
|
||||
// With the traditional log package, we might write:
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr's structured logging, we'd write:
|
||||
// // elsewhere in the file, set up the logger to log with the prefix of
|
||||
// // "reconcilers", and the named value target-type=Foo, for extra context.
|
||||
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
|
||||
//
|
||||
// // later on...
|
||||
// log.Info("setting foo on object", "value", targetValue, "object", object)
|
||||
//
|
||||
// Depending on our logging implementation, we could then make logging decisions
|
||||
// based on field values (like only logging such events for objects in a certain
|
||||
// namespace), or copy the structured information into a structured log store.
|
||||
//
|
||||
// For logging errors, Logger has a method called Error. Suppose we wanted to
|
||||
// log an error while reconciling. With the traditional log package, we might
|
||||
// write:
|
||||
// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err)
|
||||
//
|
||||
// With logr, we'd instead write:
|
||||
// // assuming the above setup for log
|
||||
// log.Error(err, "unable to reconcile object", "object", object)
|
||||
//
|
||||
// This functions similarly to:
|
||||
// log.Info("unable to reconcile object", "error", err, "object", object)
|
||||
//
|
||||
// However, it ensures that a standard key for the error value ("error") is used
|
||||
// across all error logging. Furthermore, certain implementations may choose to
|
||||
// attach additional information (such as stack traces) on calls to Error, so
|
||||
// it's preferred to use Error to log errors.
|
||||
//
|
||||
// Parts of a log line
|
||||
//
|
||||
// Each log message from a Logger has four types of context:
|
||||
// logger name, log verbosity, log message, and the named values.
|
||||
//
|
||||
// The Logger name consists of a series of name "segments" added by successive
|
||||
// calls to WithName. These name segments will be joined in some way by the
|
||||
// underlying implementation. It is strongly recommended that name segments
|
||||
// contain simple identifiers (letters, digits, and hyphen), and do not contain
|
||||
// characters that could muddle the log output or confuse the joining operation
|
||||
// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
|
||||
//
|
||||
// Log verbosity represents how little a log matters. Level zero, the default,
|
||||
// matters most. Increasing levels matter less and less. Try to avoid lots of
|
||||
// different verbosity levels, and instead provide useful keys, logger names,
|
||||
// and log messages for users to filter on. It's illegal to pass a log level
|
||||
// below zero.
|
||||
//
|
||||
// The log message consists of a constant message attached to the log line.
|
||||
// This should generally be a simple description of what's occurring, and should
|
||||
// never be a format string.
|
||||
//
|
||||
// Variable information can then be attached using named values (key/value
|
||||
// pairs). Keys are arbitrary strings, while values may be any Go value.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
// it is recommended that they:
|
||||
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// * be constant (not dependent on input data)
|
||||
// * contain only printable characters
|
||||
// * not contain whitespace or punctuation
|
||||
//
|
||||
// These guidelines help ensure that log data is processed properly regardless
|
||||
// of the log implementation. For example, log implementations will try to
|
||||
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||
//
|
||||
// While users are generally free to use key names of their choice, it's
|
||||
// generally best to avoid using the following keys, as they're frequently used
|
||||
// by implementations:
|
||||
//
|
||||
// * `"caller"`: the calling information (file/line) of a particular log line.
|
||||
// * `"error"`: the underlying error value in the `Error` method.
|
||||
// * `"level"`: the log level.
|
||||
// * `"logger"`: the name of the associated logger.
|
||||
// * `"msg"`: the log message.
|
||||
// * `"stacktrace"`: the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message).
|
||||
// * `"ts"`: the timestamp for a log line.
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the
|
||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||
// would be necessary to represent at least message and timestamp as ordinary
|
||||
// named values).
|
||||
//
|
||||
// Implementations may choose to give callers access to the underlying
|
||||
// logging implementation. The recommended pattern for this is:
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// TODO: consider adding back in format strings if they're really needed
|
||||
// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
|
||||
// TODO: consider other bits of glog functionality like Flush, OutputStats
|
||||
|
||||
// Logger represents the ability to log messages, both errors and not.
|
||||
type Logger interface {
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info
|
||||
// logs.
|
||||
Enabled() bool
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to
|
||||
// the log line. The key/value pairs can then be used to add additional
|
||||
// variable information. The key/value pairs should alternate string
|
||||
// keys and arbitrary values.
|
||||
Info(msg string, keysAndValues ...interface{})
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to calling Info with the "error" named value, but may
|
||||
// have unique behavior, and should be preferred for logging errors (see the
|
||||
// package documentations for more information).
|
||||
//
|
||||
// The msg field should be used to add context to any underlying error,
|
||||
// while the err field should be used to attach the actual error that
|
||||
// triggered this log line, if present.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
|
||||
// V returns an Logger value for a specific verbosity level, relative to
|
||||
// this Logger. In other words, V values are additive. V higher verbosity
|
||||
// level means a log message is less important. It's illegal to pass a log
|
||||
// level less than zero.
|
||||
V(level int) Logger
|
||||
|
||||
// WithValues adds some key-value pairs of context to a logger.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
WithValues(keysAndValues ...interface{}) Logger
|
||||
|
||||
// WithName adds a new element to the logger's name.
|
||||
// Successive calls with WithName continue to append
|
||||
// suffixes to the logger's name. It's strongly recommended
|
||||
// that name segments contain only letters, digits, and hyphens
|
||||
// (see the package documentation for more information).
|
||||
WithName(name string) Logger
|
||||
}
|
||||
|
||||
// InfoLogger provides compatibility with code that relies on the v0.1.0
|
||||
// interface.
|
||||
//
|
||||
// Deprecated: InfoLogger is an artifact of early versions of this API. New
|
||||
// users should never use it and existing users should use Logger instead. This
|
||||
// will be removed in a future release.
|
||||
type InfoLogger = Logger
|
||||
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger constructed from ctx or nil if no
|
||||
// logger details are found.
|
||||
func FromContext(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger constructed from ctx or a Logger
|
||||
// that discards all messages if no logger details are found.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new context derived from ctx that embeds the Logger.
|
||||
func NewContext(ctx context.Context, l Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, l)
|
||||
}
|
||||
|
||||
// CallDepthLogger represents a Logger that knows how to climb the call stack
|
||||
// to identify the original call site and can offset the depth by a specified
|
||||
// number of frames. This is useful for users who have helper functions
|
||||
// between the "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as file,
|
||||
// function, or line) would otherwise log information about the intermediate
|
||||
// helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required to
|
||||
// support it.
|
||||
type CallDepthLogger interface {
|
||||
Logger
|
||||
|
||||
// WithCallDepth returns a Logger that will offset the call stack by the
|
||||
// specified number of frames when logging call site information. If depth
|
||||
// is 0 the attribution should be to the direct caller of this method. If
|
||||
// depth is 1 the attribution should skip 1 call frame, and so on.
|
||||
// Successive calls to this are additive.
|
||||
WithCallDepth(depth int) Logger
|
||||
}
|
||||
|
||||
// WithCallDepth returns a Logger that will offset the call stack by the
|
||||
// specified number of frames when logging call site information, if possible.
|
||||
// This is useful for users who have helper functions between the "real" call
|
||||
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
||||
// should be to the direct caller of this function. If depth is 1 the
|
||||
// attribution should skip 1 call frame, and so on. Successive calls to this
|
||||
// are additive.
|
||||
//
|
||||
// If the underlying log implementation supports the CallDepthLogger interface,
|
||||
// the WithCallDepth method will be called and the result returned. If the
|
||||
// implementation does not support CallDepthLogger, the original Logger will be
|
||||
// returned.
|
||||
//
|
||||
// Callers which care about whether this was supported or not should test for
|
||||
// CallDepthLogger support themselves.
|
||||
func WithCallDepth(logger Logger, depth int) Logger {
|
||||
if decorator, ok := logger.(CallDepthLogger); ok {
|
||||
return decorator.WithCallDepth(depth)
|
||||
}
|
||||
return logger
|
||||
}
|
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@@ -1,253 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@@ -1,427 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@@ -1,63 +1,113 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
// Deprecated: do not use.
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: No longer returned.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||
type InternalMessageInfo struct{}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Size(m Message) int {
|
||||
return protoV2.Size(MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user