mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
124 Commits
2.4.0-alph
...
2.3.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1af292c9e6 | ||
|
|
67947b5f05 | ||
|
|
f2cbfad8b0 | ||
|
|
977f1f5bb6 | ||
|
|
e9aaefb135 | ||
|
|
99ed596ae4 | ||
|
|
13b7d93b4f | ||
|
|
b8463224c8 | ||
|
|
8c8571f4ba | ||
|
|
620bb97e3f | ||
|
|
770d4acf8b | ||
|
|
cedb01d295 | ||
|
|
1ccc95fba1 | ||
|
|
a661e53892 | ||
|
|
5475d7a7e9 | ||
|
|
bed0f3c801 | ||
|
|
04426d65ba | ||
|
|
3e1955effd | ||
|
|
52dd41dacb | ||
|
|
786c667e60 | ||
|
|
cf5a79cfe1 | ||
|
|
e3b00f398b | ||
|
|
67950aefd5 | ||
|
|
bd4ab0c4d5 | ||
|
|
3260adc4a1 | ||
|
|
cc64461fc8 | ||
|
|
f2c6cd0808 | ||
|
|
78afa10ab9 | ||
|
|
a829867674 | ||
|
|
87f9a69035 | ||
|
|
fc012a2bab | ||
|
|
63c5a8aa53 | ||
|
|
365e358115 | ||
|
|
a2e524f356 | ||
|
|
3d4dedefda | ||
|
|
919fc56daa | ||
|
|
dfbe74c489 | ||
|
|
9e7eed7c4b | ||
|
|
a0bb8c5599 | ||
|
|
53cf1dd042 | ||
|
|
a4dee6a591 | ||
|
|
fd87b60c7a | ||
|
|
2cb4f7ba70 | ||
|
|
993dcc94ff | ||
|
|
bbd7cc2f93 | ||
|
|
9837ec728c | ||
|
|
8785106f6c | ||
|
|
a915f08266 | ||
|
|
ec3faab892 | ||
|
|
1f61be842d | ||
|
|
d2d8f9ac65 | ||
|
|
ca30eee3e2 | ||
|
|
0217abce24 | ||
|
|
572b25dd35 | ||
|
|
84e69ecb22 | ||
|
|
57a6d46376 | ||
|
|
77b6cfbd15 | ||
|
|
d1530afa19 | ||
|
|
0e1cb124b7 | ||
|
|
24085c9553 | ||
|
|
514bf74f8f | ||
|
|
77a2502a0f | ||
|
|
6413ecf459 | ||
|
|
a31b5b9ee8 | ||
|
|
a0bed72d49 | ||
|
|
d61bcb8a44 | ||
|
|
d03e05e803 | ||
|
|
0f7db91c0f | ||
|
|
25ee73ceb3 | ||
|
|
64ae76e967 | ||
|
|
271d67a831 | ||
|
|
f42c7d5125 | ||
|
|
7c15335dc9 | ||
|
|
15080f20e7 | ||
|
|
c2b8eb3c2c | ||
|
|
fe0fbab574 | ||
|
|
89f9672f56 | ||
|
|
0a32a1793d | ||
|
|
be5468fda7 | ||
|
|
18bb9a5d9b | ||
|
|
f068057073 | ||
|
|
3458073d09 | ||
|
|
f9c09ad5bc | ||
|
|
0e91503cd4 | ||
|
|
185f96d170 | ||
|
|
9bc543f5db | ||
|
|
198e0d1666 | ||
|
|
bf183c5f7f | ||
|
|
df34e91978 | ||
|
|
5995efc0a6 | ||
|
|
000f878417 | ||
|
|
a6a76bb092 | ||
|
|
f61e31cd84 | ||
|
|
cb7891e0b4 | ||
|
|
2667e0286a | ||
|
|
3542cba8f3 | ||
|
|
117b920230 | ||
|
|
5694749ce5 | ||
|
|
db9cd1078f | ||
|
|
a51a1f6d06 | ||
|
|
5bc1c209b2 | ||
|
|
b2851ffc9c | ||
|
|
45eafafdf3 | ||
|
|
34a1b5396a | ||
|
|
f1cd3b6300 | ||
|
|
e0b74bb413 | ||
|
|
8a705f74b5 | ||
|
|
ac5ab86ebd | ||
|
|
d22ec59920 | ||
|
|
440657b36d | ||
|
|
0c00a9d463 | ||
|
|
f9bde321e9 | ||
|
|
b821511992 | ||
|
|
a9d5377bd9 | ||
|
|
ea83ff1fc3 | ||
|
|
03f7a5e49b | ||
|
|
91003c2751 | ||
|
|
57ffe14940 | ||
|
|
5e9b807ba0 | ||
|
|
de6fe98ec0 | ||
|
|
de0eea5f44 | ||
|
|
73d7929c10 | ||
|
|
96b66d2cb4 | ||
|
|
62a51d51a2 |
1
.github/workflows/PR-wip-checks.yaml
vendored
1
.github/workflows/PR-wip-checks.yaml
vendored
@@ -15,6 +15,7 @@ jobs:
|
||||
name: WIP Check
|
||||
steps:
|
||||
- name: WIP Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/wip-check@1c2a1ca6c110026b3e2297bb2ef39e1747b5a755
|
||||
with:
|
||||
labels: '["do-not-merge", "wip", "rfc"]'
|
||||
|
||||
14
.github/workflows/commit-message-check.yaml
vendored
14
.github/workflows/commit-message-check.yaml
vendored
@@ -5,6 +5,8 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
env:
|
||||
error_msg: |+
|
||||
@@ -18,24 +20,26 @@ jobs:
|
||||
name: Commit Message Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@v1.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: DCO Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Commit Body Missing Check
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-body-check@v1.0.2
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Check Subject Line Length
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
@@ -44,7 +48,7 @@ jobs:
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
- name: Check Body Line Length
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
@@ -71,7 +75,7 @@ jobs:
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
- name: Check Fixes
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
@@ -82,7 +86,7 @@ jobs:
|
||||
one_pass_all_pass: 'true'
|
||||
|
||||
- name: Check Subsystem
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
18
.github/workflows/kata-deploy-push.yaml
vendored
18
.github/workflows/kata-deploy-push.yaml
vendored
@@ -1,6 +1,15 @@
|
||||
name: kata deploy build
|
||||
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
push:
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
@@ -19,11 +28,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
@@ -33,6 +44,7 @@ jobs:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -45,14 +57,17 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: build
|
||||
- name: merge-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make merge-builds
|
||||
- name: store-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
@@ -63,6 +78,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: make kata-tarball
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make kata-tarball
|
||||
sudo make install-tarball
|
||||
|
||||
162
.github/workflows/kata-deploy-test.yaml
vendored
162
.github/workflows/kata-deploy-test.yaml
vendored
@@ -5,46 +5,152 @@ on:
|
||||
name: test-kata-deploy
|
||||
|
||||
jobs:
|
||||
create-and-test-container:
|
||||
check-comment-and-membership:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/test_kata_deploy')
|
||||
steps:
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
echo
|
||||
echo Ensure you change your Organization Visibility to Public and
|
||||
echo trigger the test again.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-comment-and-membership
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
# As Github action event `issue_comment` does not provide the right ref
|
||||
# (commit/branch) to be tested, let's use this third part action to work
|
||||
# this limitation around.
|
||||
- name: resolve pr refs
|
||||
id: refs
|
||||
uses: kata-containers/resolve-pr-refs@v0.0.3
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.refs.outputs.base_ref }}
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
# As Github action event `issue_comment` does not provide the right ref
|
||||
# (commit/branch) to be tested, let's use this third part action to work
|
||||
# this limitation around.
|
||||
- name: resolve pr refs
|
||||
id: refs
|
||||
uses: kata-containers/resolve-pr-refs@v0.0.3
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.refs.outputs.base_ref }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
|
||||
- name: check out
|
||||
uses: actions/checkout@v2
|
||||
# As Github action event `issue_comment` does not provide the right ref
|
||||
# (commit/branch) to be tested, let's use this third part action to work
|
||||
# this limitation around.
|
||||
- name: resolve pr refs
|
||||
id: refs
|
||||
uses: kata-containers/resolve-pr-refs@v0.0.3
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: build-container-image
|
||||
id: build-container-image
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.refs.outputs.base_ref }}
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
PR_SHA=$(git log --format=format:%H -n1)
|
||||
VERSION="2.0.0"
|
||||
ARTIFACT_URL="https://github.com/kata-containers/kata-containers/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
|
||||
wget "${ARTIFACT_URL}" -O tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} -t quay.io/kata-containers/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$PR_SHA
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA
|
||||
echo "##[set-output name=pr-sha;]${PR_SHA}"
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./tools/packaging/kata-deploy/action
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
295
.github/workflows/main.yaml
vendored
295
.github/workflows/main.yaml
vendored
@@ -1,295 +0,0 @@
|
||||
name: Publish release tarball
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '1.*'
|
||||
|
||||
jobs:
|
||||
get-artifact-list:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get the list
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
$GITHUB_WORKSPACE/tools/packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
path: artifact-list.txt
|
||||
|
||||
build-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-kernel
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kernel.tar.gz
|
||||
|
||||
build-experimental-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_experimental_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-experimental-kernel
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-experimental-kernel.tar.gz
|
||||
|
||||
build-qemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
# Job for building the image
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_image"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-image
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-image.tar.gz
|
||||
|
||||
# Job for building firecracker hypervisor
|
||||
build-firecracker:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_firecracker"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-firecracker
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-firecracker.tar.gz
|
||||
|
||||
# Job for building cloud-hypervisor
|
||||
build-clh:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_clh"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-clh
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-clh.tar.gz
|
||||
|
||||
# Job for building kata components
|
||||
build-kata-components:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kata_components"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-kata-components
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kata-components.tar.gz
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
- name: colate-artifacts
|
||||
run: |
|
||||
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: gather-artifacts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git clone https://github.com/kata-containers/packaging
|
||||
pushd packaging
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv release-candidate/kata-static.tar.xz ./packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
- name: push-tarball
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
wget -q -O- https://github.com/github/hub/releases/download/v$HUB_VER/hub-linux-amd64-$HUB_VER.tgz | \
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && sudo mv hub /usr/local/bin/hub
|
||||
- name: push static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-x86_64.tar.xz"
|
||||
repo="https://github.com/kata-containers/runtime.git"
|
||||
mv release-candidate/kata-static.tar.xz "release-candidate/${tarball}"
|
||||
git clone "${repo}"
|
||||
cd runtime
|
||||
echo "uploading asset '${tarball}' to '${repo}' tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "../release-candidate/${tarball}" "${tag}"
|
||||
@@ -10,12 +10,15 @@ on:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
move-linked-issues-to-in-progress:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
@@ -26,6 +29,7 @@ jobs:
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Install hub extension script
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
@@ -35,9 +39,11 @@ jobs:
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Move issue to "In progress"
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
|
||||
5
.github/workflows/release.yaml
vendored
5
.github/workflows/release.yaml
vendored
@@ -158,13 +158,14 @@ jobs:
|
||||
- name: download-and-upload-tarball
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }}
|
||||
GOPATH: ${HOME}/go
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE
|
||||
./ci/install_yq.sh
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
versions_yaml="versions.yaml"
|
||||
version=$(yq read ${versions_yaml} "externals.libseccomp.version")
|
||||
repo_url=$(yq read ${versions_yaml} "externals.libseccomp.url")
|
||||
version=$(${GOPATH}/bin/yq read ${versions_yaml} "externals.libseccomp.version")
|
||||
repo_url=$(${GOPATH}/bin/yq read ${versions_yaml} "externals.libseccomp.url")
|
||||
download_url="${repo_url}/releases/download/v${version}"
|
||||
tarball="libseccomp-${version}.tar.gz"
|
||||
asc="${tarball}.asc"
|
||||
|
||||
@@ -20,6 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
@@ -30,6 +31,7 @@ jobs:
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install porting checker script
|
||||
@@ -42,6 +44,7 @@ jobs:
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Stop PR being merged unless it has a correct set of porting labels
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
|
||||
14
.github/workflows/snap.yaml
vendored
14
.github/workflows/snap.yaml
vendored
@@ -1,17 +1,29 @@
|
||||
name: snap CI
|
||||
on: ["pull_request"]
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- edited
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Check out
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
|
||||
- name: Build snap
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
snapcraft -d snap --destructive-mode
|
||||
|
||||
2
.github/workflows/static-checks.yaml
vendored
2
.github/workflows/static-checks.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x, 1.16.x]
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
|
||||
@@ -36,7 +36,7 @@ run_static_checks()
|
||||
# Make sure we have the targeting branch
|
||||
git remote set-branches --add origin "${branch}"
|
||||
git fetch -a
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "$@"
|
||||
}
|
||||
|
||||
run_go_test()
|
||||
|
||||
@@ -6,4 +6,9 @@
|
||||
#
|
||||
FROM registry.centos.org/centos:8
|
||||
|
||||
RUN yum -y update && yum -y install git sudo wget
|
||||
RUN yum -y update && \
|
||||
yum -y install \
|
||||
git \
|
||||
sudo \
|
||||
wget && \
|
||||
yum clean all
|
||||
|
||||
@@ -9,4 +9,4 @@ set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
run_static_checks
|
||||
run_static_checks "${@:-github.com/kata-containers/kata-containers}"
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
|
||||
### Check Git-hub Actions
|
||||
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/main.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/release.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
|
||||
Check the [actions status page](https://github.com/kata-containers/kata-containers/actions) to verify all steps in the actions workflow have completed successfully. On success, a static tarball containing Kata release artifacts will be uploaded to the [Release page](https://github.com/kata-containers/kata-containers/releases).
|
||||
|
||||
|
||||
@@ -242,8 +242,8 @@ On the other hand, running all non vCPU threads under a dedicated overhead cgrou
|
||||
accurate metrics on the actual Kata Container pod overhead, allowing for tuning the overhead
|
||||
cgroup size and constraints accordingly.
|
||||
|
||||
[linux-config]: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md
|
||||
[cgroupspath]: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#cgroups-path
|
||||
[linux-config]: https://github.com/opencontainers/runtime-spec/blob/main/config-linux.md
|
||||
[cgroupspath]: https://github.com/opencontainers/runtime-spec/blob/main/config-linux.md#cgroups-path
|
||||
|
||||
# Supported cgroups
|
||||
|
||||
|
||||
@@ -207,7 +207,7 @@ Metrics for Firecracker vmm.
|
||||
| `kata_firecracker_uart`: <br> Metrics specific to the UART device. | `GAUGE` | | <ul><li>`item`<ul><li>`error_count`</li><li>`flush_count`</li><li>`missed_read_count`</li><li>`missed_write_count`</li><li>`read_count`</li><li>`write_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vcpu`: <br> Metrics specific to VCPUs' mode of functioning. | `GAUGE` | | <ul><li>`item`<ul><li>`exit_io_in`</li><li>`exit_io_out`</li><li>`exit_mmio_read`</li><li>`exit_mmio_write`</li><li>`failures`</li><li>`filter_cpuid`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vmm`: <br> Metrics specific to the machine manager as a whole. | `GAUGE` | | <ul><li>`item`<ul><li>`device_events`</li><li>`panic_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vsock`: <br> Vsock-related metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`conn_event_fails`</li><li>`conns_added`</li><li>`conns_killed`</li><li>`conns_removed`</li><li>`ev_queue_event_fails`</li><li>`killq_resync`</li><li>`muxer_event_fails`</li><li>`rx_bytes_count`</li><li>`rx_packets_count`</li><li>`rx_queue_event_count`</li><li>`rx_queue_event_fails`</li><li>`rx_read_fails`</li><li>`tx_bytes_count`</li><li>`tx_flush_fails`</li><li>`tx_packets_count`</li><li>`tx_queue_event_count`</li><li>`tx_queue_event_fails`</li><li>`tx_write_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vsock`: <br> VSOCK-related metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`conn_event_fails`</li><li>`conns_added`</li><li>`conns_killed`</li><li>`conns_removed`</li><li>`ev_queue_event_fails`</li><li>`killq_resync`</li><li>`muxer_event_fails`</li><li>`rx_bytes_count`</li><li>`rx_packets_count`</li><li>`rx_queue_event_count`</li><li>`rx_queue_event_fails`</li><li>`rx_read_fails`</li><li>`tx_bytes_count`</li><li>`tx_flush_fails`</li><li>`tx_packets_count`</li><li>`tx_queue_event_count`</li><li>`tx_queue_event_fails`</li><li>`tx_write_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
|
||||
### Kata guest OS metrics
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ required to spawn pods and containers, and this is the preferred way to run Kata
|
||||
An equivalent shim implementation for CRI-O is planned.
|
||||
|
||||
### CRI-O
|
||||
For CRI-O installation instructions, refer to the [CRI-O Tutorial](https://github.com/kubernetes-incubator/cri-o/blob/master/tutorial.md) page.
|
||||
For CRI-O installation instructions, refer to the [CRI-O Tutorial](https://github.com/cri-o/cri-o/blob/main/tutorial.md) page.
|
||||
|
||||
The following sections show how to set up the CRI-O configuration file (default path: `/etc/crio/crio.conf`) for Kata.
|
||||
|
||||
@@ -30,7 +30,7 @@ Unless otherwise stated, all the following settings are specific to the `crio.ru
|
||||
# runtime used and options for how to set up and manage the OCI runtime.
|
||||
[crio.runtime]
|
||||
```
|
||||
A comprehensive documentation of the configuration file can be found [here](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md).
|
||||
A comprehensive documentation of the configuration file can be found [here](https://github.com/cri-o/cri-o/blob/main/docs/crio.conf.5.md).
|
||||
|
||||
> **Note**: After any change to this file, the CRI-O daemon have to be restarted with:
|
||||
>````
|
||||
|
||||
@@ -203,12 +203,11 @@ is highly recommended. For working with the agent, you may also wish to
|
||||
[enable a debug console][setup-debug-console]
|
||||
to allow you to access the VM environment.
|
||||
|
||||
[agent-ctl]: https://github.com/kata-containers/kata-containers/blob/main/tools/agent-ctl
|
||||
[enable-full-debug]: https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md#enable-full-debug
|
||||
[jaeger-all-in-one]: https://www.jaegertracing.io/docs/getting-started/
|
||||
[jaeger-tracing]: https://www.jaegertracing.io
|
||||
[opentelemetry]: https://opentelemetry.io
|
||||
[osbuilder]: https://github.com/kata-containers/kata-containers/blob/main/tools/osbuilder
|
||||
[setup-debug-console]: https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md#set-up-a-debug-console
|
||||
[trace-forwarder]: https://github.com/kata-containers/kata-containers/blob/main/src/trace-forwarder
|
||||
[trace-forwarder]: /src/trace-forwarder
|
||||
[vsock]: https://wiki.qemu.org/Features/VirtioVsock
|
||||
|
||||
@@ -7,15 +7,15 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0.39"
|
||||
serde_json = "1.0.73"
|
||||
# slog:
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug"] }
|
||||
slog-json = "2.3.0"
|
||||
slog-async = "2.3.0"
|
||||
slog-scope = "4.1.2"
|
||||
slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug"] }
|
||||
slog-json = "2.4.0"
|
||||
slog-async = "2.7.0"
|
||||
slog-scope = "4.4.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
tempfile = "3.2.0"
|
||||
|
||||
746
src/agent/Cargo.lock
generated
746
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -24,7 +24,7 @@ serial_test = "0.5.1"
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
async-recursion = "0.3.2"
|
||||
futures = "0.3.12"
|
||||
futures = "0.3.17"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
@@ -45,10 +45,10 @@ slog-scope = "4.1.2"
|
||||
slog-stdlog = "4.0.0"
|
||||
log = "0.4.11"
|
||||
|
||||
prometheus = { version = "0.9.0", features = ["process"] }
|
||||
procfs = "0.7.9"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.5" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.8" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.26"
|
||||
|
||||
@@ -5,7 +5,7 @@ authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0.91"
|
||||
serde_derive = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
libc = "0.2.58"
|
||||
serde = "1.0.131"
|
||||
serde_derive = "1.0.131"
|
||||
serde_json = "1.0.73"
|
||||
libc = "0.2.112"
|
||||
|
||||
@@ -23,7 +23,7 @@ scan_fmt = "0.2"
|
||||
regex = "1.1"
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.5" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.8" }
|
||||
rlimit = "0.5.3"
|
||||
|
||||
tokio = { version = "1.2.0", features = ["sync", "io-util", "process", "time", "macros"] }
|
||||
|
||||
@@ -664,8 +664,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let _ = unistd::close(crfd);
|
||||
let _ = unistd::close(cwfd);
|
||||
|
||||
unistd::setsid().context("create a new session")?;
|
||||
if oci_process.terminal {
|
||||
unistd::setsid()?;
|
||||
unsafe {
|
||||
libc::ioctl(0, libc::TIOCSCTTY);
|
||||
}
|
||||
|
||||
@@ -35,17 +35,9 @@ use crate::log_child;
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
#[derive(std::fmt::Debug)]
|
||||
pub struct Info {
|
||||
id: i32,
|
||||
parent: i32,
|
||||
major: i32,
|
||||
minor: i32,
|
||||
root: String,
|
||||
mount_point: String,
|
||||
opts: String,
|
||||
optional: String,
|
||||
fstype: String,
|
||||
source: String,
|
||||
vfs_opts: String,
|
||||
}
|
||||
|
||||
const MOUNTINFOFORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
@@ -112,6 +104,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(test))]
|
||||
pub fn mount<
|
||||
P1: ?Sized + NixPath,
|
||||
P2: ?Sized + NixPath,
|
||||
@@ -124,21 +117,42 @@ pub fn mount<
|
||||
flags: MsFlags,
|
||||
data: Option<&P4>,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return mount::mount(source, target, fstype, flags, data);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
mount::mount(source, target, fstype, flags, data)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(test)]
|
||||
pub fn mount<
|
||||
P1: ?Sized + NixPath,
|
||||
P2: ?Sized + NixPath,
|
||||
P3: ?Sized + NixPath,
|
||||
P4: ?Sized + NixPath,
|
||||
>(
|
||||
_source: Option<&P1>,
|
||||
_target: &P2,
|
||||
_fstype: Option<&P3>,
|
||||
_flags: MsFlags,
|
||||
_data: Option<&P4>,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(test))]
|
||||
pub fn umount2<P: ?Sized + NixPath>(
|
||||
target: &P,
|
||||
flags: MntFlags,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return mount::umount2(target, flags);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
mount::umount2(target, flags)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(test)]
|
||||
pub fn umount2<P: ?Sized + NixPath>(
|
||||
_target: &P,
|
||||
_flags: MntFlags,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn init_rootfs(
|
||||
@@ -450,14 +464,20 @@ fn mount_cgroups(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
fn pivot_root<P1: ?Sized + NixPath, P2: ?Sized + NixPath>(
|
||||
new_root: &P1,
|
||||
put_old: &P2,
|
||||
) -> anyhow::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::pivot_root(new_root, put_old);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
unistd::pivot_root(new_root, put_old)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn pivot_root<P1: ?Sized + NixPath, P2: ?Sized + NixPath>(
|
||||
_new_root: &P1,
|
||||
_put_old: &P2,
|
||||
) -> anyhow::Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pivot_rootfs<P: ?Sized + NixPath + std::fmt::Debug>(path: &P) -> Result<()> {
|
||||
@@ -535,7 +555,20 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
for (_index, line) in reader.lines().enumerate() {
|
||||
let line = line?;
|
||||
|
||||
let (id, parent, major, minor, root, mount_point, opts, optional) = scan_fmt!(
|
||||
//Example mountinfo format:
|
||||
// id
|
||||
// | / parent
|
||||
// | | / major:minor
|
||||
// | | | / root
|
||||
// | | | | / mount_point
|
||||
// | | | | | / opts
|
||||
// | | | | | | / optional
|
||||
// | | | | | | | / fstype
|
||||
// | | | | | | | | / source
|
||||
// | | | | | | | | | / vfs_opts
|
||||
// 22 96 0:21 / /sys rw,nosuid,nodev,noexec,relatime shared:2 - sysfs sysfs rw,seclabel
|
||||
|
||||
let (_id, _parent, _major, _minor, _root, mount_point, _opts, optional) = scan_fmt!(
|
||||
&line,
|
||||
MOUNTINFOFORMAT,
|
||||
i32,
|
||||
@@ -550,7 +583,7 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
|
||||
let fields: Vec<&str> = line.split(" - ").collect();
|
||||
if fields.len() == 2 {
|
||||
let (fstype, source, vfs_opts) =
|
||||
let (fstype, _source, _vfs_opts) =
|
||||
scan_fmt!(fields[1], "{} {} {}", String, String, String)?;
|
||||
|
||||
let mut optional_new = String::new();
|
||||
@@ -559,17 +592,9 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
}
|
||||
|
||||
let info = Info {
|
||||
id,
|
||||
parent,
|
||||
major,
|
||||
minor,
|
||||
root,
|
||||
mount_point,
|
||||
opts,
|
||||
optional: optional_new,
|
||||
fstype,
|
||||
source,
|
||||
vfs_opts,
|
||||
};
|
||||
|
||||
infos.push(info);
|
||||
@@ -582,11 +607,15 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(test))]
|
||||
fn chroot<P: ?Sized + NixPath>(path: &P) -> Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::chroot(path);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
unistd::chroot(path)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(test)]
|
||||
fn chroot<P: ?Sized + NixPath>(_path: &P) -> Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
@@ -745,7 +774,7 @@ fn mount_from(
|
||||
let _ = fs::create_dir_all(&dir).map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"creat dir {}: {}",
|
||||
"create dir {}: {}",
|
||||
dir.to_str().unwrap(),
|
||||
e.to_string()
|
||||
)
|
||||
@@ -1382,7 +1411,7 @@ mod tests {
|
||||
|
||||
for (i, t) in tests.iter().enumerate() {
|
||||
// Create a string containing details of the test
|
||||
let msg = format!("test[{}]: {:?}", i, t);
|
||||
let msg = format!("test[{}]: {:?}", i, t.name);
|
||||
|
||||
// if is_symlink, then should be prepare the softlink environment
|
||||
if t.symlink_path != "" {
|
||||
|
||||
@@ -23,50 +23,50 @@ macro_rules! sl {
|
||||
lazy_static! {
|
||||
|
||||
static ref AGENT_SCRAPE_COUNT: IntCounter =
|
||||
prometheus::register_int_counter!(format!("{}_{}",NAMESPACE_KATA_AGENT,"scrape_count").as_ref(), "Metrics scrape count").unwrap();
|
||||
prometheus::register_int_counter!(format!("{}_{}",NAMESPACE_KATA_AGENT,"scrape_count"), "Metrics scrape count").unwrap();
|
||||
|
||||
static ref AGENT_THREADS: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"threads").as_ref(), "Agent process threads").unwrap();
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"threads"), "Agent process threads").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_TIME: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_time").as_ref(), "Agent process total time").unwrap();
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_time"), "Agent process total time").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_VM: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_vm").as_ref(), "Agent process total VM size").unwrap();
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_vm"), "Agent process total VM size").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_RSS: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_rss").as_ref(), "Agent process total RSS size").unwrap();
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_rss"), "Agent process total RSS size").unwrap();
|
||||
|
||||
static ref AGENT_PROC_STATUS: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_status").as_ref(), "Agent process status.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_status"), "Agent process status.", &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_IO_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"io_stat").as_ref(), "Agent process IO statistics.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"io_stat"), "Agent process IO statistics.", &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_PROC_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_stat").as_ref(), "Agent process statistics.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_stat"), "Agent process statistics.", &["item"]).unwrap();
|
||||
|
||||
// guest os metrics
|
||||
static ref GUEST_LOAD: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"load").as_ref() , "Guest system load.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"load") , "Guest system load.", &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_TASKS: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"tasks").as_ref() , "Guest system load.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"tasks") , "Guest system load.", &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_CPU_TIME: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"cpu_time").as_ref() , "Guest CPU statistics.", &["cpu","item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"cpu_time") , "Guest CPU statistics.", &["cpu","item"]).unwrap();
|
||||
|
||||
static ref GUEST_VM_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"vm_stat").as_ref() , "Guest virtual memory statistics.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"vm_stat") , "Guest virtual memory statistics.", &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_NETDEV_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"netdev_stat").as_ref() , "Guest net devices statistics.", &["interface","item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"netdev_stat") , "Guest net devices statistics.", &["interface","item"]).unwrap();
|
||||
|
||||
static ref GUEST_DISKSTAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"diskstat").as_ref() , "Disks statistics in system.", &["disk","item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"diskstat") , "Disks statistics in system.", &["disk","item"]).unwrap();
|
||||
|
||||
static ref GUEST_MEMINFO: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"meminfo").as_ref() , "Statistics about memory usage in the system.", &["item"]).unwrap();
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"meminfo") , "Statistics about memory usage in the system.", &["item"]).unwrap();
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
@@ -348,17 +348,17 @@ fn set_gauge_vec_cpu_time(gv: &prometheus::GaugeVec, cpu: &str, cpu_time: &procf
|
||||
gv.with_label_values(&[cpu, "idle"])
|
||||
.set(cpu_time.idle as f64);
|
||||
gv.with_label_values(&[cpu, "iowait"])
|
||||
.set(cpu_time.iowait.unwrap_or(0.0) as f64);
|
||||
.set(cpu_time.iowait.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&[cpu, "irq"])
|
||||
.set(cpu_time.irq.unwrap_or(0.0) as f64);
|
||||
.set(cpu_time.irq.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&[cpu, "softirq"])
|
||||
.set(cpu_time.softirq.unwrap_or(0.0) as f64);
|
||||
.set(cpu_time.softirq.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&[cpu, "steal"])
|
||||
.set(cpu_time.steal.unwrap_or(0.0) as f64);
|
||||
.set(cpu_time.steal.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&[cpu, "guest"])
|
||||
.set(cpu_time.guest.unwrap_or(0.0) as f64);
|
||||
.set(cpu_time.guest.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&[cpu, "guest_nice"])
|
||||
.set(cpu_time.guest_nice.unwrap_or(0.0) as f64);
|
||||
.set(cpu_time.guest_nice.unwrap_or(0) as f64);
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
@@ -470,7 +470,7 @@ fn set_gauge_vec_proc_status(gv: &prometheus::GaugeVec, status: &procfs::process
|
||||
gv.with_label_values(&["vmswap"])
|
||||
.set(status.vmswap.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&["hugetlbpages"])
|
||||
.set(status.hugetblpages.unwrap_or(0) as f64);
|
||||
.set(status.hugetlbpages.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&["voluntary_ctxt_switches"])
|
||||
.set(status.voluntary_ctxt_switches.unwrap_or(0) as f64);
|
||||
gv.with_label_values(&["nonvoluntary_ctxt_switches"])
|
||||
|
||||
@@ -405,14 +405,18 @@ async fn bind_watcher_storage_handler(
|
||||
logger: &Logger,
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
cid: Option<String>,
|
||||
) -> Result<()> {
|
||||
let mut locked = sandbox.lock().await;
|
||||
let container_id = locked.id.clone();
|
||||
|
||||
locked
|
||||
.bind_watcher
|
||||
.add_container(container_id, iter::once(storage.clone()), logger)
|
||||
.await
|
||||
if let Some(cid) = cid {
|
||||
locked
|
||||
.bind_watcher
|
||||
.add_container(cid, iter::once(storage.clone()), logger)
|
||||
.await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// mount_storage performs the mount described by the storage structure.
|
||||
@@ -518,6 +522,7 @@ pub async fn add_storages(
|
||||
logger: Logger,
|
||||
storages: Vec<Storage>,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
cid: Option<String>,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut mount_list = Vec::new();
|
||||
|
||||
@@ -548,7 +553,8 @@ pub async fn add_storages(
|
||||
}
|
||||
DRIVER_NVDIMM_TYPE => nvdimm_storage_handler(&logger, &storage, sandbox.clone()).await,
|
||||
DRIVER_WATCHABLE_BIND_TYPE => {
|
||||
bind_watcher_storage_handler(&logger, &storage, sandbox.clone()).await?;
|
||||
bind_watcher_storage_handler(&logger, &storage, sandbox.clone(), cid.clone())
|
||||
.await?;
|
||||
// Don't register watch mounts, they're handled separately by the watcher.
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
@@ -5,30 +5,22 @@
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use nix::mount::{self, MsFlags};
|
||||
use protocols::types::{Interface, Route};
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
|
||||
const KATA_GUEST_SANDBOX_DNS_FILE: &str = "/run/kata-containers/sandbox/resolv.conf";
|
||||
const GUEST_DNS_FILE: &str = "/etc/resolv.conf";
|
||||
|
||||
// Network fully describes a sandbox network with its interfaces, routes and dns
|
||||
// Network describes a sandbox network, includings its dns
|
||||
// related information.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Network {
|
||||
ifaces: HashMap<String, Interface>,
|
||||
routes: Vec<Route>,
|
||||
dns: Vec<String>,
|
||||
}
|
||||
|
||||
impl Network {
|
||||
pub fn new() -> Network {
|
||||
Network {
|
||||
ifaces: HashMap::new(),
|
||||
routes: Vec::new(),
|
||||
dns: Vec::new(),
|
||||
}
|
||||
Network { dns: Vec::new() }
|
||||
}
|
||||
|
||||
pub fn set_dns(&mut self, dns: String) {
|
||||
|
||||
@@ -148,6 +148,10 @@ impl AgentService {
|
||||
};
|
||||
|
||||
info!(sl!(), "receive createcontainer, spec: {:?}", &oci);
|
||||
info!(
|
||||
sl!(),
|
||||
"receive createcontainer, storages: {:?}", &req.storages
|
||||
);
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
@@ -163,7 +167,13 @@ impl AgentService {
|
||||
// After all those storages have been processed, no matter the order
|
||||
// here, the agent will rely on rustjail (using the oci.Mounts
|
||||
// list) to bind mount all of them inside the container.
|
||||
let m = add_storages(sl!(), req.storages.to_vec(), self.sandbox.clone()).await?;
|
||||
let m = add_storages(
|
||||
sl!(),
|
||||
req.storages.to_vec(),
|
||||
self.sandbox.clone(),
|
||||
Some(req.container_id.clone()),
|
||||
)
|
||||
.await?;
|
||||
{
|
||||
sandbox = self.sandbox.clone();
|
||||
s = sandbox.lock().await;
|
||||
@@ -573,6 +583,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "remove_container", req);
|
||||
is_allowed!(req);
|
||||
|
||||
match self.do_remove_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
@@ -1002,7 +1013,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
}
|
||||
|
||||
match add_storages(sl!(), req.storages.to_vec(), self.sandbox.clone()).await {
|
||||
match add_storages(sl!(), req.storages.to_vec(), self.sandbox.clone(), None).await {
|
||||
Ok(m) => {
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().await;
|
||||
@@ -1709,6 +1720,7 @@ mod tests {
|
||||
fd: -1,
|
||||
mh: MessageHeader::default(),
|
||||
metadata: std::collections::HashMap::new(),
|
||||
timeout_nano: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -240,7 +240,6 @@ pub(crate) fn spawn_test_watcher(sandbox: Arc<Mutex<Sandbox>>, uev: Uevent) {
|
||||
if matcher.is_match(&uev) {
|
||||
let (_, sender) = watch.take().unwrap();
|
||||
let _ = sender.send(uev.clone());
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -86,7 +86,6 @@ mod tests {
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct BufWriter {
|
||||
data: Arc<Mutex<Vec<u8>>>,
|
||||
slow_write: bool,
|
||||
write_delay: Duration,
|
||||
}
|
||||
|
||||
@@ -94,7 +93,6 @@ mod tests {
|
||||
fn new() -> Self {
|
||||
BufWriter {
|
||||
data: Arc::new(Mutex::new(Vec::<u8>::new())),
|
||||
slow_write: false,
|
||||
write_delay: Duration::new(0, 0),
|
||||
}
|
||||
}
|
||||
@@ -173,45 +171,35 @@ mod tests {
|
||||
#[derive(Debug)]
|
||||
struct TestData {
|
||||
reader_value: String,
|
||||
result: io::Result<u64>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
reader_value: "".into(),
|
||||
result: Ok(0),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "a".into(),
|
||||
result: Ok(1),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "foo".into(),
|
||||
result: Ok(3),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "b".repeat(BUF_SIZE - 1),
|
||||
result: Ok((BUF_SIZE - 1) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "c".repeat(BUF_SIZE),
|
||||
result: Ok((BUF_SIZE) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "d".repeat(BUF_SIZE + 1),
|
||||
result: Ok((BUF_SIZE + 1) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "e".repeat((2 * BUF_SIZE) - 1),
|
||||
result: Ok(((2 * BUF_SIZE) - 1) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "f".repeat(2 * BUF_SIZE),
|
||||
result: Ok((2 * BUF_SIZE) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "g".repeat((2 * BUF_SIZE) + 1),
|
||||
result: Ok(((2 * BUF_SIZE) + 1) as u64),
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ struct Storage {
|
||||
/// the source becomes too large, either in number of files (>16) or total size (>1MB).
|
||||
watch: bool,
|
||||
|
||||
/// The list of files to watch from the source mount point and updated in the target one.
|
||||
/// The list of files, directories, symlinks to watch from the source mount point and updated in the target one.
|
||||
watched_files: HashMap<PathBuf, SystemTime>,
|
||||
}
|
||||
|
||||
@@ -79,6 +79,20 @@ impl Drop for Storage {
|
||||
}
|
||||
}
|
||||
|
||||
async fn copy(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
|
||||
if fs::symlink_metadata(&from).await?.file_type().is_symlink() {
|
||||
// if source is a symlink, create new symlink with same link source. If
|
||||
// the symlink exists, remove and create new one:
|
||||
if fs::symlink_metadata(&to).await.is_ok() {
|
||||
fs::remove_file(&to).await?;
|
||||
}
|
||||
fs::symlink(fs::read_link(&from).await?, &to).await?;
|
||||
} else {
|
||||
fs::copy(from, to).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
async fn new(storage: protos::Storage) -> Result<Storage> {
|
||||
let entry = Storage {
|
||||
@@ -93,6 +107,17 @@ impl Storage {
|
||||
async fn update_target(&self, logger: &Logger, source_path: impl AsRef<Path>) -> Result<()> {
|
||||
let source_file_path = source_path.as_ref();
|
||||
|
||||
// if we are creating a directory: just create it, nothing more to do
|
||||
if source_file_path.symlink_metadata()?.file_type().is_dir() {
|
||||
let dest_file_path = self.make_target_path(&source_file_path)?;
|
||||
|
||||
fs::create_dir_all(&dest_file_path)
|
||||
.await
|
||||
.with_context(|| format!("Unable to mkdir all for {}", dest_file_path.display()))?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Assume we are dealing with either a file or a symlink now:
|
||||
let dest_file_path = if self.source_mount_point.is_file() {
|
||||
// Simple file to file copy
|
||||
// Assume target mount is a file path
|
||||
@@ -110,19 +135,13 @@ impl Storage {
|
||||
dest_file_path
|
||||
};
|
||||
|
||||
debug!(
|
||||
logger,
|
||||
"Copy from {} to {}",
|
||||
source_file_path.display(),
|
||||
dest_file_path.display()
|
||||
);
|
||||
fs::copy(&source_file_path, &dest_file_path)
|
||||
copy(&source_file_path, &dest_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Copy from {} to {} failed",
|
||||
source_file_path.display(),
|
||||
dest_file_path.display()
|
||||
dest_file_path.display(),
|
||||
)
|
||||
})?;
|
||||
|
||||
@@ -135,7 +154,7 @@ impl Storage {
|
||||
let mut remove_list = Vec::new();
|
||||
let mut updated_files: Vec<PathBuf> = Vec::new();
|
||||
|
||||
// Remove deleted files for tracking list
|
||||
// Remove deleted files for tracking list.
|
||||
self.watched_files.retain(|st, _| {
|
||||
if st.exists() {
|
||||
true
|
||||
@@ -147,10 +166,19 @@ impl Storage {
|
||||
|
||||
// Delete from target
|
||||
for path in remove_list {
|
||||
// File has been deleted, remove it from target mount
|
||||
let target = self.make_target_path(path)?;
|
||||
debug!(logger, "Removing file from mount: {}", target.display());
|
||||
let _ = fs::remove_file(target).await;
|
||||
// The target may be a directory or a file. If it is a directory that is removed,
|
||||
// we'll remove all files under that directory as well. Because of this, there's a
|
||||
// chance the target (a subdirectory or file under a prior removed target) was already
|
||||
// removed. Make sure we check if the target exists before checking the metadata, and
|
||||
// don't return an error if the remove fails
|
||||
if target.exists() && target.symlink_metadata()?.file_type().is_dir() {
|
||||
debug!(logger, "Removing a directory: {}", target.display());
|
||||
let _ = fs::remove_dir_all(target).await;
|
||||
} else {
|
||||
debug!(logger, "Removing a file: {}", target.display());
|
||||
let _ = fs::remove_file(target).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Scan new & changed files
|
||||
@@ -182,15 +210,16 @@ impl Storage {
|
||||
let mut size: u64 = 0;
|
||||
debug!(logger, "Scanning path: {}", path.display());
|
||||
|
||||
if path.is_file() {
|
||||
let metadata = path
|
||||
.metadata()
|
||||
.with_context(|| format!("Failed to query metadata for: {}", path.display()))?;
|
||||
let metadata = path
|
||||
.symlink_metadata()
|
||||
.with_context(|| format!("Failed to query metadata for: {}", path.display()))?;
|
||||
|
||||
let modified = metadata
|
||||
.modified()
|
||||
.with_context(|| format!("Failed to get modified date for: {}", path.display()))?;
|
||||
let modified = metadata
|
||||
.modified()
|
||||
.with_context(|| format!("Failed to get modified date for: {}", path.display()))?;
|
||||
|
||||
// Treat files and symlinks the same:
|
||||
if path.is_file() || metadata.file_type().is_symlink() {
|
||||
size += metadata.len();
|
||||
|
||||
// Insert will return old entry if any
|
||||
@@ -212,6 +241,16 @@ impl Storage {
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// Handling regular directories - check to see if this directory is already being tracked, and
|
||||
// track if not:
|
||||
if self
|
||||
.watched_files
|
||||
.insert(path.to_path_buf(), modified)
|
||||
.is_none()
|
||||
{
|
||||
update_list.push(path.to_path_buf());
|
||||
}
|
||||
|
||||
// Scan dir recursively
|
||||
let mut entries = fs::read_dir(path)
|
||||
.await
|
||||
@@ -612,7 +651,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// setup storage3: many files, but still watchable
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE + 1 {
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE {
|
||||
fs::write(src3_path.join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
@@ -622,6 +661,9 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage0), &logger)
|
||||
.await
|
||||
@@ -674,7 +716,7 @@ mod tests {
|
||||
std::fs::read_dir(entries.0[3].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
MAX_ENTRIES_PER_STORAGE - 1
|
||||
);
|
||||
|
||||
// Add two files to storage 0, verify it is updated without needing to run check:
|
||||
@@ -692,6 +734,9 @@ mod tests {
|
||||
"updated"
|
||||
);
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
//
|
||||
// Prepare for second check: update mount sources
|
||||
//
|
||||
@@ -744,7 +789,7 @@ mod tests {
|
||||
std::fs::read_dir(entries.0[3].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
MAX_ENTRIES_PER_STORAGE + 1
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
);
|
||||
|
||||
// verify that we can remove files as well, but that it isn't observed until check is run
|
||||
@@ -822,15 +867,20 @@ mod tests {
|
||||
fs::remove_file(source_dir.path().join("big.txt")).unwrap();
|
||||
fs::remove_file(source_dir.path().join("too-big.txt")).unwrap();
|
||||
|
||||
// Up to 16 files should be okay:
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE + 1 {
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Up to 15 files should be okay (can watch 15 files + 1 directory)
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE {
|
||||
fs::write(source_dir.path().join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), MAX_ENTRIES_PER_STORAGE);
|
||||
assert_eq!(
|
||||
entry.scan(&logger).await.unwrap(),
|
||||
MAX_ENTRIES_PER_STORAGE - 1
|
||||
);
|
||||
|
||||
// 17 files is too many:
|
||||
fs::write(source_dir.path().join("17.txt"), "updated").unwrap();
|
||||
// 16 files wll be too many:
|
||||
fs::write(source_dir.path().join("16.txt"), "updated").unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
|
||||
// Expect to receive a MountTooManyFiles error
|
||||
@@ -843,6 +893,180 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_copy() {
|
||||
// prepare tmp src/destination
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
// verify copy of a regular file
|
||||
let src_file = source_dir.path().join("file.txt");
|
||||
let dst_file = dest_dir.path().join("file.txt");
|
||||
fs::write(&src_file, "foo").unwrap();
|
||||
copy(&src_file, &dst_file).await.unwrap();
|
||||
// verify destination:
|
||||
assert!(!fs::symlink_metadata(dst_file)
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink());
|
||||
|
||||
// verify copy of a symlink
|
||||
let src_symlink_file = source_dir.path().join("symlink_file.txt");
|
||||
let dst_symlink_file = dest_dir.path().join("symlink_file.txt");
|
||||
tokio::fs::symlink(&src_file, &src_symlink_file)
|
||||
.await
|
||||
.unwrap();
|
||||
copy(src_symlink_file, &dst_symlink_file).await.unwrap();
|
||||
// verify destination:
|
||||
assert!(fs::symlink_metadata(&dst_symlink_file)
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink());
|
||||
assert_eq!(fs::read_link(&dst_symlink_file).unwrap(), src_file);
|
||||
assert_eq!(fs::read_to_string(&dst_symlink_file).unwrap(), "foo");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_directory_verify_dir_removal() {
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let mut entry = Storage::new(protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
// create a path we'll remove later
|
||||
fs::create_dir_all(source_dir.path().join("tmp")).unwrap();
|
||||
fs::write(&source_dir.path().join("tmp/test-file"), "foo").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 3); // root, ./tmp, test-file
|
||||
|
||||
// Verify expected directory, file:
|
||||
assert_eq!(
|
||||
std::fs::read_dir(dest_dir.path().join("tmp"))
|
||||
.unwrap()
|
||||
.count(),
|
||||
1
|
||||
);
|
||||
assert_eq!(std::fs::read_dir(&dest_dir).unwrap().count(), 1);
|
||||
|
||||
// Now, remove directory, and verify that the directory (and its file) are removed:
|
||||
fs::remove_dir_all(source_dir.path().join("tmp")).unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
assert_eq!(std::fs::read_dir(&dest_dir).unwrap().count(), 0);
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_directory_with_symlinks() {
|
||||
// Prepare source directory:
|
||||
// ..2021_10_29_03_10_48.161654083/file.txt
|
||||
// ..data -> ..2021_10_29_03_10_48.161654083
|
||||
// file.txt -> ..data/file.txt
|
||||
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let actual_dir = source_dir.path().join("..2021_10_29_03_10_48.161654083");
|
||||
let actual_file = actual_dir.join("file.txt");
|
||||
let sym_dir = source_dir.path().join("..data");
|
||||
let sym_file = source_dir.path().join("file.txt");
|
||||
|
||||
let relative_to_dir = PathBuf::from("..2021_10_29_03_10_48.161654083");
|
||||
|
||||
// create backing file/path
|
||||
fs::create_dir_all(&actual_dir).unwrap();
|
||||
fs::write(&actual_file, "two").unwrap();
|
||||
|
||||
// create indirection symlink directory that points to the directory that holds the actual file:
|
||||
tokio::fs::symlink(&relative_to_dir, &sym_dir)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// create presented data file symlink:
|
||||
tokio::fs::symlink(PathBuf::from("..data/file.txt"), sym_file)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
let mut entry = Storage::new(protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 5);
|
||||
|
||||
// Should copy no files since nothing is changed since last check
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// now what, what is updated?
|
||||
fs::write(actual_file, "updated").unwrap();
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
|
||||
assert_eq!(
|
||||
fs::read_to_string(dest_dir.path().join("file.txt")).unwrap(),
|
||||
"updated"
|
||||
);
|
||||
|
||||
// Verify that resulting file.txt is a symlink:
|
||||
assert!(
|
||||
tokio::fs::symlink_metadata(dest_dir.path().join("file.txt"))
|
||||
.await
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink()
|
||||
);
|
||||
|
||||
// Verify that .data directory is a symlink:
|
||||
assert!(tokio::fs::symlink_metadata(&dest_dir.path().join("..data"))
|
||||
.await
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink());
|
||||
|
||||
// Should copy no new files after copy happened
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Now, simulate configmap update.
|
||||
// - create a new actual dir/file,
|
||||
// - update the symlink directory to point to this one
|
||||
// - remove old dir/file
|
||||
let new_actual_dir = source_dir.path().join("..2021_10_31");
|
||||
let new_actual_file = new_actual_dir.join("file.txt");
|
||||
fs::create_dir_all(&new_actual_dir).unwrap();
|
||||
fs::write(&new_actual_file, "new configmap").unwrap();
|
||||
|
||||
tokio::fs::remove_file(&sym_dir).await.unwrap();
|
||||
tokio::fs::symlink(PathBuf::from("..2021_10_31"), &sym_dir)
|
||||
.await
|
||||
.unwrap();
|
||||
tokio::fs::remove_dir_all(&actual_dir).await.unwrap();
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 3); // file, file-dir, symlink
|
||||
assert_eq!(
|
||||
fs::read_to_string(dest_dir.path().join("file.txt")).unwrap(),
|
||||
"new configmap"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_directory() {
|
||||
// Prepare source directory:
|
||||
@@ -853,6 +1077,13 @@ mod tests {
|
||||
fs::create_dir_all(source_dir.path().join("A/B")).unwrap();
|
||||
fs::write(source_dir.path().join("A/B/1.txt"), "two").unwrap();
|
||||
|
||||
// A/C is an empty directory
|
||||
let empty_dir = "A/C";
|
||||
fs::create_dir_all(source_dir.path().join(empty_dir)).unwrap();
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let mut entry = Storage::new(protos::Storage {
|
||||
@@ -865,13 +1096,14 @@ mod tests {
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 2);
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 6);
|
||||
|
||||
// check empty directory
|
||||
assert!(dest_dir.path().join(empty_dir).exists());
|
||||
|
||||
// Should copy no files since nothing is changed since last check
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Should copy 1 file
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
fs::write(source_dir.path().join("A/B/1.txt"), "updated").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert_eq!(
|
||||
@@ -879,12 +1111,21 @@ mod tests {
|
||||
"updated"
|
||||
);
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
// Should copy no new files after copy happened
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Update another file
|
||||
fs::write(source_dir.path().join("1.txt"), "updated").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
|
||||
// create another empty directory A/C/D
|
||||
let empty_dir = "A/C/D";
|
||||
fs::create_dir_all(source_dir.path().join(empty_dir)).unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert!(dest_dir.path().join(empty_dir).exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -909,7 +1150,9 @@ mod tests {
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
fs::write(&source_file, "two").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert_eq!(fs::read_to_string(&dest_file).unwrap(), "two");
|
||||
@@ -935,8 +1178,9 @@ mod tests {
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert_eq!(entry.watched_files.len(), 1);
|
||||
// expect the root directory and the file:
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 2);
|
||||
assert_eq!(entry.watched_files.len(), 2);
|
||||
|
||||
assert!(target_file.exists());
|
||||
assert!(entry.watched_files.contains_key(&source_file));
|
||||
@@ -946,7 +1190,7 @@ mod tests {
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
assert_eq!(entry.watched_files.len(), 0);
|
||||
assert_eq!(entry.watched_files.len(), 1);
|
||||
assert!(!target_file.exists());
|
||||
}
|
||||
|
||||
@@ -992,6 +1236,8 @@ mod tests {
|
||||
watcher.mount(&logger).await.unwrap();
|
||||
assert!(is_mounted(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
watcher.cleanup();
|
||||
assert!(!is_mounted(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
}
|
||||
|
||||
@@ -496,7 +496,11 @@ BUILDFLAGS := -buildmode=pie -mod=vendor ${BUILDTAGS}
|
||||
|
||||
# whether stipping the binary
|
||||
ifeq ($(STRIP),yes)
|
||||
KATA_LDFLAGS := -ldflags "-w -s"
|
||||
KATA_LDFLAGS = -w -s
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),s390x)
|
||||
KATA_LDFLAGS += -extldflags=-Wl,--s390-pgste
|
||||
endif
|
||||
|
||||
# Return non-empty string if specified directory exists
|
||||
@@ -528,7 +532,7 @@ monitor: $(MONITOR_OUTPUT)
|
||||
netmon: $(NETMON_RUNTIME_OUTPUT)
|
||||
|
||||
$(NETMON_RUNTIME_OUTPUT): $(SOURCES) VERSION
|
||||
$(QUIET_BUILD)(cd $(NETMON_DIR) && go build $(BUILDFLAGS) -o $@ -ldflags "-X main.version=$(VERSION)" $(KATA_LDFLAGS))
|
||||
$(QUIET_BUILD)(cd $(NETMON_DIR) && go build $(BUILDFLAGS) -o $@ -ldflags "-X main.version=$(VERSION)" -ldflags "$(KATA_LDFLAGS)")
|
||||
|
||||
runtime: $(RUNTIME_OUTPUT) $(CONFIGS)
|
||||
.DEFAULT: default
|
||||
@@ -564,10 +568,10 @@ endef
|
||||
GENERATED_FILES += pkg/katautils/config-settings.go
|
||||
|
||||
$(RUNTIME_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) | show-summary
|
||||
$(QUIET_BUILD)(cd $(RUNTIME_DIR) && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
$(QUIET_BUILD)(cd $(RUNTIME_DIR) && go build -ldflags "$(KATA_LDFLAGS)" $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build -ldflags "$(KATA_LDFLAGS)" $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && CGO_ENABLED=0 go build \
|
||||
|
||||
@@ -9,10 +9,12 @@ require (
|
||||
github.com/containerd/cgroups v1.0.1
|
||||
github.com/containerd/console v1.0.2
|
||||
github.com/containerd/containerd v1.5.7
|
||||
github.com/containerd/cri-containerd v1.11.1-0.20190125013620-4dd6735020f5
|
||||
github.com/containerd/fifo v1.0.0
|
||||
github.com/containerd/ttrpc v1.0.2
|
||||
github.com/containerd/ttrpc v1.1.0
|
||||
github.com/containerd/typeurl v1.0.2
|
||||
github.com/containernetworking/plugins v0.9.1
|
||||
github.com/coreos/go-systemd/v22 v22.3.2
|
||||
github.com/cri-o/cri-o v1.0.0-rc2.0.20170928185954-3394b3b2d6af
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/go-ini/ini v1.28.2
|
||||
@@ -21,6 +23,7 @@ require (
|
||||
github.com/go-openapi/strfmt v0.18.0
|
||||
github.com/go-openapi/swag v0.19.5
|
||||
github.com/go-openapi/validate v0.18.0
|
||||
github.com/godbus/dbus/v5 v5.0.4
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/intel-go/cpuid v0.0.0-20210602155658-5747e5cec0d9
|
||||
@@ -58,7 +61,7 @@ require (
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/containerd/containerd => github.com/containerd/containerd v1.5.7
|
||||
github.com/containerd/containerd => github.com/containerd/containerd v1.5.8
|
||||
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.1
|
||||
github.com/uber-go/atomic => go.uber.org/atomic v1.5.1
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
|
||||
|
||||
@@ -52,8 +52,8 @@ github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O
|
||||
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM=
|
||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M=
|
||||
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
@@ -82,6 +82,7 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
@@ -93,6 +94,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
@@ -106,11 +108,13 @@ github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f2
|
||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM=
|
||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||
github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw=
|
||||
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
||||
github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/cri-containerd v1.11.1-0.20190125013620-4dd6735020f5 h1:/srF029I+oDfm/qeltxCGJyJ8urmlqWGOQmQ7HvwrRc=
|
||||
github.com/containerd/cri-containerd v1.11.1-0.20190125013620-4dd6735020f5/go.mod h1:wxbGdReWGCalzGOEpifoHeYCK4xAgnj4o/4bVB+9voU=
|
||||
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
|
||||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||
@@ -119,8 +123,9 @@ github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nN
|
||||
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
|
||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
|
||||
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
|
||||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
|
||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||
@@ -151,6 +156,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/cri-o/cri-o v1.0.0-rc2.0.20170928185954-3394b3b2d6af h1:H6nLV96F1LkWizYLQtrMtqJBrlJxnpjgisHsTsOS2HU=
|
||||
github.com/cri-o/cri-o v1.0.0-rc2.0.20170928185954-3394b3b2d6af/go.mod h1:POmDVglzQ2jWTlL9ZCfZ8d1QjLhmk0oB36O8T0oG75Y=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
@@ -181,6 +187,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
@@ -480,6 +487,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -838,8 +846,9 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -27,11 +27,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
// only register the proto type
|
||||
crioption "github.com/containerd/containerd/pkg/runtimeoptions/v1"
|
||||
_ "github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
_ "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
|
||||
crioption "github.com/containerd/containerd/pkg/runtimeoptions/v1"
|
||||
oldcrioption "github.com/containerd/containerd/pkg/runtimeoptions/v1"
|
||||
oldcrioption "github.com/containerd/cri-containerd/pkg/api/runtimeoptions/v1"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
crioption "github.com/containerd/containerd/pkg/runtimeoptions/v1"
|
||||
taskAPI "github.com/containerd/containerd/runtime/v2/task"
|
||||
crioption "github.com/containerd/cri-containerd/pkg/api/runtimeoptions/v1"
|
||||
"github.com/containerd/typeurl"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -24,7 +24,7 @@ const (
|
||||
RuntimeContainerd = "containerd"
|
||||
RuntimeCRIO = "cri-o"
|
||||
fsMonitorRetryDelaySeconds = 60
|
||||
podCacheRefreshDelaySeconds = 5
|
||||
podCacheRefreshDelaySeconds = 60
|
||||
)
|
||||
|
||||
// SetLogger sets the logger for katamonitor package.
|
||||
@@ -85,7 +85,7 @@ func (km *KataMonitor) startPodCacheUpdater() {
|
||||
break
|
||||
}
|
||||
// we refresh the pod cache once if we get multiple add/delete pod events in a short time (< podCacheRefreshDelaySeconds)
|
||||
cacheUpdateTimer := time.NewTimer(podCacheRefreshDelaySeconds * time.Second)
|
||||
cacheUpdateTimer := time.NewTimer(5 * time.Second)
|
||||
cacheUpdateTimerWasSet := false
|
||||
for {
|
||||
select {
|
||||
|
||||
10
src/runtime/vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
10
src/runtime/vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
@@ -4,21 +4,25 @@ go 1.13
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.17
|
||||
github.com/cenkalti/backoff/v4 v4.1.1
|
||||
github.com/containerd/cgroups v1.0.1
|
||||
github.com/containerd/console v1.0.2
|
||||
github.com/containerd/containerd v1.5.1
|
||||
github.com/containerd/containerd v1.4.9
|
||||
github.com/containerd/continuity v0.1.0 // indirect
|
||||
github.com/containerd/fifo v1.0.0 // indirect
|
||||
github.com/containerd/go-runc v1.0.0
|
||||
github.com/containerd/ttrpc v1.0.2
|
||||
github.com/containerd/ttrpc v1.1.0
|
||||
github.com/containerd/typeurl v1.0.2
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/urfave/cli v1.22.2
|
||||
go.opencensus.io v0.22.3
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492
|
||||
google.golang.org/grpc v1.33.2
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
|
||||
681
src/runtime/vendor/github.com/Microsoft/hcsshim/go.sum
generated
vendored
681
src/runtime/vendor/github.com/Microsoft/hcsshim/go.sum
generated
vendored
@@ -1,779 +1,218 @@
|
||||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
|
||||
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
|
||||
github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
||||
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
|
||||
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
|
||||
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
|
||||
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
|
||||
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
|
||||
github.com/containerd/containerd v1.5.1 h1:xWHPAoe6VkUiI9GAvndJM7s/0MTrmwX3AQiYTr3olf0=
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
||||
github.com/containerd/containerd v1.4.9 h1:JIw9mjVw4LsGmnA/Bqg9j9e+XB7soOJufrKUpA6n2Ns=
|
||||
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
|
||||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
||||
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
|
||||
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
|
||||
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
|
||||
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
||||
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
|
||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
||||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||
github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
|
||||
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
|
||||
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
|
||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
|
||||
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
|
||||
github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||
github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||
github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -781,105 +220,23 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
||||
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
17
src/runtime/vendor/github.com/cilium/ebpf/.clang-format
generated
vendored
Normal file
17
src/runtime/vendor/github.com/cilium/ebpf/.clang-format
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
Language: Cpp
|
||||
BasedOnStyle: LLVM
|
||||
AlignAfterOpenBracket: DontAlign
|
||||
AlignConsecutiveAssignments: true
|
||||
AlignEscapedNewlines: DontAlign
|
||||
AlwaysBreakBeforeMultilineStrings: true
|
||||
AlwaysBreakTemplateDeclarations: false
|
||||
AllowAllParametersOfDeclarationOnNextLine: false
|
||||
AllowShortFunctionsOnASingleLine: false
|
||||
BreakBeforeBraces: Attach
|
||||
IndentWidth: 4
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
TabWidth: 4
|
||||
UseTab: ForContinuationAndIndentation
|
||||
ColumnLimit: 1000
|
||||
...
|
||||
13
src/runtime/vendor/github.com/cilium/ebpf/.gitignore
generated
vendored
Normal file
13
src/runtime/vendor/github.com/cilium/ebpf/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.o
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
29
src/runtime/vendor/github.com/cilium/ebpf/.golangci.yaml
generated
vendored
Normal file
29
src/runtime/vendor/github.com/cilium/ebpf/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
issues:
|
||||
exclude-rules:
|
||||
# syscall param structs will have unused fields in Go code.
|
||||
- path: syscall.*.go
|
||||
linters:
|
||||
- structcheck
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
# Could be enabled later:
|
||||
# - gocyclo
|
||||
# - prealloc
|
||||
# - maligned
|
||||
# - gosec
|
||||
80
src/runtime/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
generated
vendored
Normal file
80
src/runtime/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
Architecture of the library
|
||||
===
|
||||
|
||||
ELF -> Specifications -> Objects -> Links
|
||||
|
||||
ELF
|
||||
---
|
||||
|
||||
BPF is usually produced by using Clang to compile a subset of C. Clang outputs
|
||||
an ELF file which contains program byte code (aka BPF), but also metadata for
|
||||
maps used by the program. The metadata follows the conventions set by libbpf
|
||||
shipped with the kernel. Certain ELF sections have special meaning
|
||||
and contain structures defined by libbpf. Newer versions of clang emit
|
||||
additional metadata in BPF Type Format (aka BTF).
|
||||
|
||||
The library aims to be compatible with libbpf so that moving from a C toolchain
|
||||
to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
|
||||
is tested against the Linux selftests and avoids introducing custom behaviour
|
||||
if possible.
|
||||
|
||||
The output of the ELF reader is a `CollectionSpec` which encodes
|
||||
all of the information contained in the ELF in a form that is easy to work with
|
||||
in Go.
|
||||
|
||||
### BTF
|
||||
|
||||
The BPF Type Format describes more than just the types used by a BPF program. It
|
||||
includes debug aids like which source line corresponds to which instructions and
|
||||
what global variables are used.
|
||||
|
||||
[BTF parsing](internal/btf/) lives in a separate internal package since exposing
|
||||
it would mean an additional maintenance burden, and because the API still
|
||||
has sharp corners. The most important concept is the `btf.Type` interface, which
|
||||
also describes things that aren't really types like `.rodata` or `.bss` sections.
|
||||
`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if
|
||||
one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as
|
||||
we write more code that deals with it.
|
||||
|
||||
Specifications
|
||||
---
|
||||
|
||||
`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel
|
||||
objects and contain everything necessary to execute the relevant `bpf(2)`
|
||||
syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to
|
||||
modify clang-compiled BPF code, for example to rewrite constants. At the same
|
||||
time the [asm](asm/) package provides an assembler that can be used to generate
|
||||
`ProgramSpec` on the fly.
|
||||
|
||||
Creating a spec should never require any privileges or be restricted in any way,
|
||||
for example by only allowing programs in native endianness. This ensures that
|
||||
the library stays flexible.
|
||||
|
||||
Objects
|
||||
---
|
||||
|
||||
`Program` and `Map` are the result of loading specs into the kernel. Sometimes
|
||||
loading a spec will fail because the kernel is too old, or a feature is not
|
||||
enabled. There are multiple ways the library deals with that:
|
||||
|
||||
* Fallback: older kernels don't allowing naming programs and maps. The library
|
||||
automatically detects support for names, and omits them during load if
|
||||
necessary. This works since name is primarily a debug aid.
|
||||
|
||||
* Sentinel error: sometimes it's possible to detect that a feature isn't available.
|
||||
In that case the library will return an error wrapping `ErrNotSupported`.
|
||||
This is also useful to skip tests that can't run on the current kernel.
|
||||
|
||||
Once program and map objects are loaded they expose the kernel's low-level API,
|
||||
e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
|
||||
wrappers on top of the low-level API, like `MapIterator`. The low-level API is
|
||||
useful as an out when our higher-level API doesn't support a particular use case.
|
||||
|
||||
Links
|
||||
---
|
||||
|
||||
BPF can be attached to many different points in the kernel and newer BPF hooks
|
||||
tend to use bpf_link to do so. Older hooks unfortunately use a combination of
|
||||
syscalls, netlink messages, etc. Adding support for a new link type should not
|
||||
pull in large dependencies like netlink, so XDP programs or tracepoints are
|
||||
out of scope.
|
||||
46
src/runtime/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md
generated
vendored
Normal file
46
src/runtime/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
40
src/runtime/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
generated
vendored
Normal file
40
src/runtime/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# How to contribute
|
||||
|
||||
Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in
|
||||
the form of pull requests and issues reporting bugs or suggesting new features
|
||||
are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
|
||||
a better understanding for the high-level goals.
|
||||
|
||||
New features must be accompanied by tests. Before starting work on any large
|
||||
feature, please [join](https://cilium.herokuapp.com/) the
|
||||
[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to
|
||||
discuss the design first.
|
||||
|
||||
When submitting pull requests, consider writing details about what problem you
|
||||
are solving and why the proposed approach solves that problem in commit messages
|
||||
and/or pull request description to help future library users and maintainers to
|
||||
reason about the proposed changes.
|
||||
|
||||
## Running the tests
|
||||
|
||||
Many of the tests require privileges to set resource limits and load eBPF code.
|
||||
The easiest way to obtain these is to run the tests with `sudo`.
|
||||
|
||||
To test the current package with your local kernel you can simply run:
|
||||
```
|
||||
go test -exec sudo ./...
|
||||
```
|
||||
|
||||
To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script.
|
||||
It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed.
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
# Run all tests on a 5.4 kernel
|
||||
./run-tests.sh 5.4
|
||||
|
||||
# Run a subset of tests:
|
||||
./run-tests.sh 5.4 go test ./link
|
||||
```
|
||||
|
||||
23
src/runtime/vendor/github.com/cilium/ebpf/LICENSE
generated
vendored
Normal file
23
src/runtime/vendor/github.com/cilium/ebpf/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Nathan Sweet
|
||||
Copyright (c) 2018, 2019 Cloudflare
|
||||
Copyright (c) 2019 Authors of Cilium
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
70
src/runtime/vendor/github.com/cilium/ebpf/Makefile
generated
vendored
Normal file
70
src/runtime/vendor/github.com/cilium/ebpf/Makefile
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
# The development version of clang is distributed as the 'clang' binary,
|
||||
# while stable/released versions have a version number attached.
|
||||
# Pin the default clang to a stable version.
|
||||
CLANG ?= clang-12
|
||||
CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS)
|
||||
|
||||
# Obtain an absolute path to the directory of the Makefile.
|
||||
# Assume the Makefile is in the root of the repository.
|
||||
REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
|
||||
|
||||
IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
|
||||
VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
|
||||
|
||||
# clang <8 doesn't tag relocs properly (STT_NOTYPE)
|
||||
# clang 9 is the first version emitting BTF
|
||||
TARGETS := \
|
||||
testdata/loader-clang-7 \
|
||||
testdata/loader-clang-9 \
|
||||
testdata/loader-$(CLANG) \
|
||||
testdata/invalid_map \
|
||||
testdata/raw_tracepoint \
|
||||
testdata/invalid_map_static \
|
||||
testdata/initialized_btf_map \
|
||||
testdata/strings \
|
||||
internal/btf/testdata/relocs
|
||||
|
||||
.PHONY: all clean docker-all docker-shell
|
||||
|
||||
.DEFAULT_TARGET = docker-all
|
||||
|
||||
# Build all ELF binaries using a Dockerized LLVM toolchain.
|
||||
docker-all:
|
||||
docker run --rm --user "${UIDGID}" \
|
||||
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
|
||||
--env CFLAGS="-fdebug-prefix-map=/ebpf=." \
|
||||
"${IMAGE}:${VERSION}" \
|
||||
make all
|
||||
|
||||
# (debug) Drop the user into a shell inside the Docker container as root.
|
||||
docker-shell:
|
||||
docker run --rm -ti \
|
||||
-v "${REPODIR}":/ebpf -w /ebpf \
|
||||
"${IMAGE}:${VERSION}"
|
||||
|
||||
clean:
|
||||
-$(RM) testdata/*.elf
|
||||
-$(RM) internal/btf/testdata/*.elf
|
||||
|
||||
all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS))
|
||||
ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
|
||||
ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
|
||||
|
||||
testdata/loader-%-el.elf: testdata/loader.c
|
||||
$* $(CFLAGS) -mlittle-endian -c $< -o $@
|
||||
|
||||
testdata/loader-%-eb.elf: testdata/loader.c
|
||||
$* $(CFLAGS) -mbig-endian -c $< -o $@
|
||||
|
||||
%-el.elf: %.c
|
||||
$(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@
|
||||
|
||||
%-eb.elf : %.c
|
||||
$(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@
|
||||
|
||||
# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf
|
||||
.PHONY: vmlinux-btf
|
||||
vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz
|
||||
internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX)
|
||||
objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@"
|
||||
62
src/runtime/vendor/github.com/cilium/ebpf/README.md
generated
vendored
Normal file
62
src/runtime/vendor/github.com/cilium/ebpf/README.md
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# eBPF
|
||||
|
||||
[](https://pkg.go.dev/github.com/cilium/ebpf)
|
||||
|
||||
eBPF is a pure Go library that provides utilities for loading, compiling, and
|
||||
debugging eBPF programs. It has minimal external dependencies and is intended to
|
||||
be used in long running processes.
|
||||
|
||||
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
|
||||
assembler
|
||||
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
|
||||
to various hooks
|
||||
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
|
||||
`PERF_EVENT_ARRAY`
|
||||
* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
|
||||
compiling and embedding eBPF programs in Go code
|
||||
|
||||
The library is maintained by [Cloudflare](https://www.cloudflare.com) and
|
||||
[Cilium](https://www.cilium.io). Feel free to
|
||||
[join](https://cilium.herokuapp.com/) the
|
||||
[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack.
|
||||
|
||||
## Current status
|
||||
|
||||
The package is production ready, but **the API is explicitly unstable right
|
||||
now**. Expect to update your code if you want to follow along.
|
||||
|
||||
## Getting Started
|
||||
|
||||
A small collection of Go and eBPF programs that serve as examples for building
|
||||
your own tools can be found under [examples/](examples/).
|
||||
|
||||
Contributions are highly encouraged, as they highlight certain use cases of
|
||||
eBPF and the library, and help shape the future of the project.
|
||||
|
||||
## Requirements
|
||||
|
||||
* A version of Go that is [supported by
|
||||
upstream](https://golang.org/doc/devel/release.html#policy)
|
||||
* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested)
|
||||
|
||||
## Useful resources
|
||||
|
||||
* [eBPF.io](https://ebpf.io) (recommended)
|
||||
* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide)
|
||||
(recommended)
|
||||
* [Linux documentation on
|
||||
BPF](https://www.kernel.org/doc/html/latest/networking/filter.html)
|
||||
* [eBPF features by Linux
|
||||
version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md)
|
||||
|
||||
## Regenerating Testdata
|
||||
|
||||
Run `make` in the root of this repository to rebuild testdata in all
|
||||
subpackages. This requires Docker, as it relies on a standardized build
|
||||
environment to keep the build output stable.
|
||||
|
||||
The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
149
src/runtime/vendor/github.com/cilium/ebpf/asm/alu.go
generated
vendored
Normal file
149
src/runtime/vendor/github.com/cilium/ebpf/asm/alu.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package asm
|
||||
|
||||
//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp
|
||||
|
||||
// Source of ALU / ALU64 / Branch operations
|
||||
//
|
||||
// msb lsb
|
||||
// +----+-+---+
|
||||
// |op |S|cls|
|
||||
// +----+-+---+
|
||||
type Source uint8
|
||||
|
||||
const sourceMask OpCode = 0x08
|
||||
|
||||
// Source bitmask
|
||||
const (
|
||||
// InvalidSource is returned by getters when invoked
|
||||
// on non ALU / branch OpCodes.
|
||||
InvalidSource Source = 0xff
|
||||
// ImmSource src is from constant
|
||||
ImmSource Source = 0x00
|
||||
// RegSource src is from register
|
||||
RegSource Source = 0x08
|
||||
)
|
||||
|
||||
// The Endianness of a byte swap instruction.
|
||||
type Endianness uint8
|
||||
|
||||
const endianMask = sourceMask
|
||||
|
||||
// Endian flags
|
||||
const (
|
||||
InvalidEndian Endianness = 0xff
|
||||
// Convert to little endian
|
||||
LE Endianness = 0x00
|
||||
// Convert to big endian
|
||||
BE Endianness = 0x08
|
||||
)
|
||||
|
||||
// ALUOp are ALU / ALU64 operations
|
||||
//
|
||||
// msb lsb
|
||||
// +----+-+---+
|
||||
// |OP |s|cls|
|
||||
// +----+-+---+
|
||||
type ALUOp uint8
|
||||
|
||||
const aluMask OpCode = 0xf0
|
||||
|
||||
const (
|
||||
// InvalidALUOp is returned by getters when invoked
|
||||
// on non ALU OpCodes
|
||||
InvalidALUOp ALUOp = 0xff
|
||||
// Add - addition
|
||||
Add ALUOp = 0x00
|
||||
// Sub - subtraction
|
||||
Sub ALUOp = 0x10
|
||||
// Mul - multiplication
|
||||
Mul ALUOp = 0x20
|
||||
// Div - division
|
||||
Div ALUOp = 0x30
|
||||
// Or - bitwise or
|
||||
Or ALUOp = 0x40
|
||||
// And - bitwise and
|
||||
And ALUOp = 0x50
|
||||
// LSh - bitwise shift left
|
||||
LSh ALUOp = 0x60
|
||||
// RSh - bitwise shift right
|
||||
RSh ALUOp = 0x70
|
||||
// Neg - sign/unsign signing bit
|
||||
Neg ALUOp = 0x80
|
||||
// Mod - modulo
|
||||
Mod ALUOp = 0x90
|
||||
// Xor - bitwise xor
|
||||
Xor ALUOp = 0xa0
|
||||
// Mov - move value from one place to another
|
||||
Mov ALUOp = 0xb0
|
||||
// ArSh - arithmatic shift
|
||||
ArSh ALUOp = 0xc0
|
||||
// Swap - endian conversions
|
||||
Swap ALUOp = 0xd0
|
||||
)
|
||||
|
||||
// HostTo converts from host to another endianness.
|
||||
func HostTo(endian Endianness, dst Register, size Size) Instruction {
|
||||
var imm int64
|
||||
switch size {
|
||||
case Half:
|
||||
imm = 16
|
||||
case Word:
|
||||
imm = 32
|
||||
case DWord:
|
||||
imm = 64
|
||||
default:
|
||||
return Instruction{OpCode: InvalidOpCode}
|
||||
}
|
||||
|
||||
return Instruction{
|
||||
OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)),
|
||||
Dst: dst,
|
||||
Constant: imm,
|
||||
}
|
||||
}
|
||||
|
||||
// Op returns the OpCode for an ALU operation with a given source.
|
||||
func (op ALUOp) Op(source Source) OpCode {
|
||||
return OpCode(ALU64Class).SetALUOp(op).SetSource(source)
|
||||
}
|
||||
|
||||
// Reg emits `dst (op) src`.
|
||||
func (op ALUOp) Reg(dst, src Register) Instruction {
|
||||
return Instruction{
|
||||
OpCode: op.Op(RegSource),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
}
|
||||
}
|
||||
|
||||
// Imm emits `dst (op) value`.
|
||||
func (op ALUOp) Imm(dst Register, value int32) Instruction {
|
||||
return Instruction{
|
||||
OpCode: op.Op(ImmSource),
|
||||
Dst: dst,
|
||||
Constant: int64(value),
|
||||
}
|
||||
}
|
||||
|
||||
// Op32 returns the OpCode for a 32-bit ALU operation with a given source.
|
||||
func (op ALUOp) Op32(source Source) OpCode {
|
||||
return OpCode(ALUClass).SetALUOp(op).SetSource(source)
|
||||
}
|
||||
|
||||
// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst.
|
||||
func (op ALUOp) Reg32(dst, src Register) Instruction {
|
||||
return Instruction{
|
||||
OpCode: op.Op32(RegSource),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
}
|
||||
}
|
||||
|
||||
// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst.
|
||||
func (op ALUOp) Imm32(dst Register, value int32) Instruction {
|
||||
return Instruction{
|
||||
OpCode: op.Op32(ImmSource),
|
||||
Dst: dst,
|
||||
Constant: int64(value),
|
||||
}
|
||||
}
|
||||
107
src/runtime/vendor/github.com/cilium/ebpf/asm/alu_string.go
generated
vendored
Normal file
107
src/runtime/vendor/github.com/cilium/ebpf/asm/alu_string.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT.
|
||||
|
||||
package asm
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidSource-255]
|
||||
_ = x[ImmSource-0]
|
||||
_ = x[RegSource-8]
|
||||
}
|
||||
|
||||
const (
|
||||
_Source_name_0 = "ImmSource"
|
||||
_Source_name_1 = "RegSource"
|
||||
_Source_name_2 = "InvalidSource"
|
||||
)
|
||||
|
||||
func (i Source) String() string {
|
||||
switch {
|
||||
case i == 0:
|
||||
return _Source_name_0
|
||||
case i == 8:
|
||||
return _Source_name_1
|
||||
case i == 255:
|
||||
return _Source_name_2
|
||||
default:
|
||||
return "Source(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidEndian-255]
|
||||
_ = x[LE-0]
|
||||
_ = x[BE-8]
|
||||
}
|
||||
|
||||
const (
|
||||
_Endianness_name_0 = "LE"
|
||||
_Endianness_name_1 = "BE"
|
||||
_Endianness_name_2 = "InvalidEndian"
|
||||
)
|
||||
|
||||
func (i Endianness) String() string {
|
||||
switch {
|
||||
case i == 0:
|
||||
return _Endianness_name_0
|
||||
case i == 8:
|
||||
return _Endianness_name_1
|
||||
case i == 255:
|
||||
return _Endianness_name_2
|
||||
default:
|
||||
return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidALUOp-255]
|
||||
_ = x[Add-0]
|
||||
_ = x[Sub-16]
|
||||
_ = x[Mul-32]
|
||||
_ = x[Div-48]
|
||||
_ = x[Or-64]
|
||||
_ = x[And-80]
|
||||
_ = x[LSh-96]
|
||||
_ = x[RSh-112]
|
||||
_ = x[Neg-128]
|
||||
_ = x[Mod-144]
|
||||
_ = x[Xor-160]
|
||||
_ = x[Mov-176]
|
||||
_ = x[ArSh-192]
|
||||
_ = x[Swap-208]
|
||||
}
|
||||
|
||||
const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp"
|
||||
|
||||
var _ALUOp_map = map[ALUOp]string{
|
||||
0: _ALUOp_name[0:3],
|
||||
16: _ALUOp_name[3:6],
|
||||
32: _ALUOp_name[6:9],
|
||||
48: _ALUOp_name[9:12],
|
||||
64: _ALUOp_name[12:14],
|
||||
80: _ALUOp_name[14:17],
|
||||
96: _ALUOp_name[17:20],
|
||||
112: _ALUOp_name[20:23],
|
||||
128: _ALUOp_name[23:26],
|
||||
144: _ALUOp_name[26:29],
|
||||
160: _ALUOp_name[29:32],
|
||||
176: _ALUOp_name[32:35],
|
||||
192: _ALUOp_name[35:39],
|
||||
208: _ALUOp_name[39:43],
|
||||
255: _ALUOp_name[43:55],
|
||||
}
|
||||
|
||||
func (i ALUOp) String() string {
|
||||
if str, ok := _ALUOp_map[i]; ok {
|
||||
return str
|
||||
}
|
||||
return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
2
src/runtime/vendor/github.com/cilium/ebpf/asm/doc.go
generated
vendored
Normal file
2
src/runtime/vendor/github.com/cilium/ebpf/asm/doc.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package asm is an assembler for eBPF bytecode.
|
||||
package asm
|
||||
195
src/runtime/vendor/github.com/cilium/ebpf/asm/func.go
generated
vendored
Normal file
195
src/runtime/vendor/github.com/cilium/ebpf/asm/func.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
package asm
|
||||
|
||||
//go:generate stringer -output func_string.go -type=BuiltinFunc
|
||||
|
||||
// BuiltinFunc is a built-in eBPF function.
|
||||
type BuiltinFunc int32
|
||||
|
||||
// eBPF built-in functions
|
||||
//
|
||||
// You can regenerate this list using the following gawk script:
|
||||
//
|
||||
// /FN\(.+\),/ {
|
||||
// match($1, /\((.+)\)/, r)
|
||||
// split(r[1], p, "_")
|
||||
// printf "Fn"
|
||||
// for (i in p) {
|
||||
// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
|
||||
// }
|
||||
// print ""
|
||||
// }
|
||||
//
|
||||
// The script expects include/uapi/linux/bpf.h as it's input.
|
||||
const (
|
||||
FnUnspec BuiltinFunc = iota
|
||||
FnMapLookupElem
|
||||
FnMapUpdateElem
|
||||
FnMapDeleteElem
|
||||
FnProbeRead
|
||||
FnKtimeGetNs
|
||||
FnTracePrintk
|
||||
FnGetPrandomU32
|
||||
FnGetSmpProcessorId
|
||||
FnSkbStoreBytes
|
||||
FnL3CsumReplace
|
||||
FnL4CsumReplace
|
||||
FnTailCall
|
||||
FnCloneRedirect
|
||||
FnGetCurrentPidTgid
|
||||
FnGetCurrentUidGid
|
||||
FnGetCurrentComm
|
||||
FnGetCgroupClassid
|
||||
FnSkbVlanPush
|
||||
FnSkbVlanPop
|
||||
FnSkbGetTunnelKey
|
||||
FnSkbSetTunnelKey
|
||||
FnPerfEventRead
|
||||
FnRedirect
|
||||
FnGetRouteRealm
|
||||
FnPerfEventOutput
|
||||
FnSkbLoadBytes
|
||||
FnGetStackid
|
||||
FnCsumDiff
|
||||
FnSkbGetTunnelOpt
|
||||
FnSkbSetTunnelOpt
|
||||
FnSkbChangeProto
|
||||
FnSkbChangeType
|
||||
FnSkbUnderCgroup
|
||||
FnGetHashRecalc
|
||||
FnGetCurrentTask
|
||||
FnProbeWriteUser
|
||||
FnCurrentTaskUnderCgroup
|
||||
FnSkbChangeTail
|
||||
FnSkbPullData
|
||||
FnCsumUpdate
|
||||
FnSetHashInvalid
|
||||
FnGetNumaNodeId
|
||||
FnSkbChangeHead
|
||||
FnXdpAdjustHead
|
||||
FnProbeReadStr
|
||||
FnGetSocketCookie
|
||||
FnGetSocketUid
|
||||
FnSetHash
|
||||
FnSetsockopt
|
||||
FnSkbAdjustRoom
|
||||
FnRedirectMap
|
||||
FnSkRedirectMap
|
||||
FnSockMapUpdate
|
||||
FnXdpAdjustMeta
|
||||
FnPerfEventReadValue
|
||||
FnPerfProgReadValue
|
||||
FnGetsockopt
|
||||
FnOverrideReturn
|
||||
FnSockOpsCbFlagsSet
|
||||
FnMsgRedirectMap
|
||||
FnMsgApplyBytes
|
||||
FnMsgCorkBytes
|
||||
FnMsgPullData
|
||||
FnBind
|
||||
FnXdpAdjustTail
|
||||
FnSkbGetXfrmState
|
||||
FnGetStack
|
||||
FnSkbLoadBytesRelative
|
||||
FnFibLookup
|
||||
FnSockHashUpdate
|
||||
FnMsgRedirectHash
|
||||
FnSkRedirectHash
|
||||
FnLwtPushEncap
|
||||
FnLwtSeg6StoreBytes
|
||||
FnLwtSeg6AdjustSrh
|
||||
FnLwtSeg6Action
|
||||
FnRcRepeat
|
||||
FnRcKeydown
|
||||
FnSkbCgroupId
|
||||
FnGetCurrentCgroupId
|
||||
FnGetLocalStorage
|
||||
FnSkSelectReuseport
|
||||
FnSkbAncestorCgroupId
|
||||
FnSkLookupTcp
|
||||
FnSkLookupUdp
|
||||
FnSkRelease
|
||||
FnMapPushElem
|
||||
FnMapPopElem
|
||||
FnMapPeekElem
|
||||
FnMsgPushData
|
||||
FnMsgPopData
|
||||
FnRcPointerRel
|
||||
FnSpinLock
|
||||
FnSpinUnlock
|
||||
FnSkFullsock
|
||||
FnTcpSock
|
||||
FnSkbEcnSetCe
|
||||
FnGetListenerSock
|
||||
FnSkcLookupTcp
|
||||
FnTcpCheckSyncookie
|
||||
FnSysctlGetName
|
||||
FnSysctlGetCurrentValue
|
||||
FnSysctlGetNewValue
|
||||
FnSysctlSetNewValue
|
||||
FnStrtol
|
||||
FnStrtoul
|
||||
FnSkStorageGet
|
||||
FnSkStorageDelete
|
||||
FnSendSignal
|
||||
FnTcpGenSyncookie
|
||||
FnSkbOutput
|
||||
FnProbeReadUser
|
||||
FnProbeReadKernel
|
||||
FnProbeReadUserStr
|
||||
FnProbeReadKernelStr
|
||||
FnTcpSendAck
|
||||
FnSendSignalThread
|
||||
FnJiffies64
|
||||
FnReadBranchRecords
|
||||
FnGetNsCurrentPidTgid
|
||||
FnXdpOutput
|
||||
FnGetNetnsCookie
|
||||
FnGetCurrentAncestorCgroupId
|
||||
FnSkAssign
|
||||
FnKtimeGetBootNs
|
||||
FnSeqPrintf
|
||||
FnSeqWrite
|
||||
FnSkCgroupId
|
||||
FnSkAncestorCgroupId
|
||||
FnRingbufOutput
|
||||
FnRingbufReserve
|
||||
FnRingbufSubmit
|
||||
FnRingbufDiscard
|
||||
FnRingbufQuery
|
||||
FnCsumLevel
|
||||
FnSkcToTcp6Sock
|
||||
FnSkcToTcpSock
|
||||
FnSkcToTcpTimewaitSock
|
||||
FnSkcToTcpRequestSock
|
||||
FnSkcToUdp6Sock
|
||||
FnGetTaskStack
|
||||
FnLoadHdrOpt
|
||||
FnStoreHdrOpt
|
||||
FnReserveHdrOpt
|
||||
FnInodeStorageGet
|
||||
FnInodeStorageDelete
|
||||
FnDPath
|
||||
FnCopyFromUser
|
||||
FnSnprintfBtf
|
||||
FnSeqPrintfBtf
|
||||
FnSkbCgroupClassid
|
||||
FnRedirectNeigh
|
||||
FnPerCpuPtr
|
||||
FnThisCpuPtr
|
||||
FnRedirectPeer
|
||||
FnTaskStorageGet
|
||||
FnTaskStorageDelete
|
||||
FnGetCurrentTaskBtf
|
||||
FnBprmOptsSet
|
||||
FnKtimeGetCoarseNs
|
||||
FnImaInodeHash
|
||||
FnSockFromFile
|
||||
)
|
||||
|
||||
// Call emits a function call.
|
||||
func (fn BuiltinFunc) Call() Instruction {
|
||||
return Instruction{
|
||||
OpCode: OpCode(JumpClass).SetJumpOp(Call),
|
||||
Constant: int64(fn),
|
||||
}
|
||||
}
|
||||
185
src/runtime/vendor/github.com/cilium/ebpf/asm/func_string.go
generated
vendored
Normal file
185
src/runtime/vendor/github.com/cilium/ebpf/asm/func_string.go
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT.
|
||||
|
||||
package asm
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[FnUnspec-0]
|
||||
_ = x[FnMapLookupElem-1]
|
||||
_ = x[FnMapUpdateElem-2]
|
||||
_ = x[FnMapDeleteElem-3]
|
||||
_ = x[FnProbeRead-4]
|
||||
_ = x[FnKtimeGetNs-5]
|
||||
_ = x[FnTracePrintk-6]
|
||||
_ = x[FnGetPrandomU32-7]
|
||||
_ = x[FnGetSmpProcessorId-8]
|
||||
_ = x[FnSkbStoreBytes-9]
|
||||
_ = x[FnL3CsumReplace-10]
|
||||
_ = x[FnL4CsumReplace-11]
|
||||
_ = x[FnTailCall-12]
|
||||
_ = x[FnCloneRedirect-13]
|
||||
_ = x[FnGetCurrentPidTgid-14]
|
||||
_ = x[FnGetCurrentUidGid-15]
|
||||
_ = x[FnGetCurrentComm-16]
|
||||
_ = x[FnGetCgroupClassid-17]
|
||||
_ = x[FnSkbVlanPush-18]
|
||||
_ = x[FnSkbVlanPop-19]
|
||||
_ = x[FnSkbGetTunnelKey-20]
|
||||
_ = x[FnSkbSetTunnelKey-21]
|
||||
_ = x[FnPerfEventRead-22]
|
||||
_ = x[FnRedirect-23]
|
||||
_ = x[FnGetRouteRealm-24]
|
||||
_ = x[FnPerfEventOutput-25]
|
||||
_ = x[FnSkbLoadBytes-26]
|
||||
_ = x[FnGetStackid-27]
|
||||
_ = x[FnCsumDiff-28]
|
||||
_ = x[FnSkbGetTunnelOpt-29]
|
||||
_ = x[FnSkbSetTunnelOpt-30]
|
||||
_ = x[FnSkbChangeProto-31]
|
||||
_ = x[FnSkbChangeType-32]
|
||||
_ = x[FnSkbUnderCgroup-33]
|
||||
_ = x[FnGetHashRecalc-34]
|
||||
_ = x[FnGetCurrentTask-35]
|
||||
_ = x[FnProbeWriteUser-36]
|
||||
_ = x[FnCurrentTaskUnderCgroup-37]
|
||||
_ = x[FnSkbChangeTail-38]
|
||||
_ = x[FnSkbPullData-39]
|
||||
_ = x[FnCsumUpdate-40]
|
||||
_ = x[FnSetHashInvalid-41]
|
||||
_ = x[FnGetNumaNodeId-42]
|
||||
_ = x[FnSkbChangeHead-43]
|
||||
_ = x[FnXdpAdjustHead-44]
|
||||
_ = x[FnProbeReadStr-45]
|
||||
_ = x[FnGetSocketCookie-46]
|
||||
_ = x[FnGetSocketUid-47]
|
||||
_ = x[FnSetHash-48]
|
||||
_ = x[FnSetsockopt-49]
|
||||
_ = x[FnSkbAdjustRoom-50]
|
||||
_ = x[FnRedirectMap-51]
|
||||
_ = x[FnSkRedirectMap-52]
|
||||
_ = x[FnSockMapUpdate-53]
|
||||
_ = x[FnXdpAdjustMeta-54]
|
||||
_ = x[FnPerfEventReadValue-55]
|
||||
_ = x[FnPerfProgReadValue-56]
|
||||
_ = x[FnGetsockopt-57]
|
||||
_ = x[FnOverrideReturn-58]
|
||||
_ = x[FnSockOpsCbFlagsSet-59]
|
||||
_ = x[FnMsgRedirectMap-60]
|
||||
_ = x[FnMsgApplyBytes-61]
|
||||
_ = x[FnMsgCorkBytes-62]
|
||||
_ = x[FnMsgPullData-63]
|
||||
_ = x[FnBind-64]
|
||||
_ = x[FnXdpAdjustTail-65]
|
||||
_ = x[FnSkbGetXfrmState-66]
|
||||
_ = x[FnGetStack-67]
|
||||
_ = x[FnSkbLoadBytesRelative-68]
|
||||
_ = x[FnFibLookup-69]
|
||||
_ = x[FnSockHashUpdate-70]
|
||||
_ = x[FnMsgRedirectHash-71]
|
||||
_ = x[FnSkRedirectHash-72]
|
||||
_ = x[FnLwtPushEncap-73]
|
||||
_ = x[FnLwtSeg6StoreBytes-74]
|
||||
_ = x[FnLwtSeg6AdjustSrh-75]
|
||||
_ = x[FnLwtSeg6Action-76]
|
||||
_ = x[FnRcRepeat-77]
|
||||
_ = x[FnRcKeydown-78]
|
||||
_ = x[FnSkbCgroupId-79]
|
||||
_ = x[FnGetCurrentCgroupId-80]
|
||||
_ = x[FnGetLocalStorage-81]
|
||||
_ = x[FnSkSelectReuseport-82]
|
||||
_ = x[FnSkbAncestorCgroupId-83]
|
||||
_ = x[FnSkLookupTcp-84]
|
||||
_ = x[FnSkLookupUdp-85]
|
||||
_ = x[FnSkRelease-86]
|
||||
_ = x[FnMapPushElem-87]
|
||||
_ = x[FnMapPopElem-88]
|
||||
_ = x[FnMapPeekElem-89]
|
||||
_ = x[FnMsgPushData-90]
|
||||
_ = x[FnMsgPopData-91]
|
||||
_ = x[FnRcPointerRel-92]
|
||||
_ = x[FnSpinLock-93]
|
||||
_ = x[FnSpinUnlock-94]
|
||||
_ = x[FnSkFullsock-95]
|
||||
_ = x[FnTcpSock-96]
|
||||
_ = x[FnSkbEcnSetCe-97]
|
||||
_ = x[FnGetListenerSock-98]
|
||||
_ = x[FnSkcLookupTcp-99]
|
||||
_ = x[FnTcpCheckSyncookie-100]
|
||||
_ = x[FnSysctlGetName-101]
|
||||
_ = x[FnSysctlGetCurrentValue-102]
|
||||
_ = x[FnSysctlGetNewValue-103]
|
||||
_ = x[FnSysctlSetNewValue-104]
|
||||
_ = x[FnStrtol-105]
|
||||
_ = x[FnStrtoul-106]
|
||||
_ = x[FnSkStorageGet-107]
|
||||
_ = x[FnSkStorageDelete-108]
|
||||
_ = x[FnSendSignal-109]
|
||||
_ = x[FnTcpGenSyncookie-110]
|
||||
_ = x[FnSkbOutput-111]
|
||||
_ = x[FnProbeReadUser-112]
|
||||
_ = x[FnProbeReadKernel-113]
|
||||
_ = x[FnProbeReadUserStr-114]
|
||||
_ = x[FnProbeReadKernelStr-115]
|
||||
_ = x[FnTcpSendAck-116]
|
||||
_ = x[FnSendSignalThread-117]
|
||||
_ = x[FnJiffies64-118]
|
||||
_ = x[FnReadBranchRecords-119]
|
||||
_ = x[FnGetNsCurrentPidTgid-120]
|
||||
_ = x[FnXdpOutput-121]
|
||||
_ = x[FnGetNetnsCookie-122]
|
||||
_ = x[FnGetCurrentAncestorCgroupId-123]
|
||||
_ = x[FnSkAssign-124]
|
||||
_ = x[FnKtimeGetBootNs-125]
|
||||
_ = x[FnSeqPrintf-126]
|
||||
_ = x[FnSeqWrite-127]
|
||||
_ = x[FnSkCgroupId-128]
|
||||
_ = x[FnSkAncestorCgroupId-129]
|
||||
_ = x[FnRingbufOutput-130]
|
||||
_ = x[FnRingbufReserve-131]
|
||||
_ = x[FnRingbufSubmit-132]
|
||||
_ = x[FnRingbufDiscard-133]
|
||||
_ = x[FnRingbufQuery-134]
|
||||
_ = x[FnCsumLevel-135]
|
||||
_ = x[FnSkcToTcp6Sock-136]
|
||||
_ = x[FnSkcToTcpSock-137]
|
||||
_ = x[FnSkcToTcpTimewaitSock-138]
|
||||
_ = x[FnSkcToTcpRequestSock-139]
|
||||
_ = x[FnSkcToUdp6Sock-140]
|
||||
_ = x[FnGetTaskStack-141]
|
||||
_ = x[FnLoadHdrOpt-142]
|
||||
_ = x[FnStoreHdrOpt-143]
|
||||
_ = x[FnReserveHdrOpt-144]
|
||||
_ = x[FnInodeStorageGet-145]
|
||||
_ = x[FnInodeStorageDelete-146]
|
||||
_ = x[FnDPath-147]
|
||||
_ = x[FnCopyFromUser-148]
|
||||
_ = x[FnSnprintfBtf-149]
|
||||
_ = x[FnSeqPrintfBtf-150]
|
||||
_ = x[FnSkbCgroupClassid-151]
|
||||
_ = x[FnRedirectNeigh-152]
|
||||
_ = x[FnPerCpuPtr-153]
|
||||
_ = x[FnThisCpuPtr-154]
|
||||
_ = x[FnRedirectPeer-155]
|
||||
_ = x[FnTaskStorageGet-156]
|
||||
_ = x[FnTaskStorageDelete-157]
|
||||
_ = x[FnGetCurrentTaskBtf-158]
|
||||
_ = x[FnBprmOptsSet-159]
|
||||
_ = x[FnKtimeGetCoarseNs-160]
|
||||
_ = x[FnImaInodeHash-161]
|
||||
_ = x[FnSockFromFile-162]
|
||||
}
|
||||
|
||||
const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFile"
|
||||
|
||||
var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424}
|
||||
|
||||
func (i BuiltinFunc) String() string {
|
||||
if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
|
||||
return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]]
|
||||
}
|
||||
506
src/runtime/vendor/github.com/cilium/ebpf/asm/instruction.go
generated
vendored
Normal file
506
src/runtime/vendor/github.com/cilium/ebpf/asm/instruction.go
generated
vendored
Normal file
@@ -0,0 +1,506 @@
|
||||
package asm
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// InstructionSize is the size of a BPF instruction in bytes
|
||||
const InstructionSize = 8
|
||||
|
||||
// RawInstructionOffset is an offset in units of raw BPF instructions.
|
||||
type RawInstructionOffset uint64
|
||||
|
||||
// Bytes returns the offset of an instruction in bytes.
|
||||
func (rio RawInstructionOffset) Bytes() uint64 {
|
||||
return uint64(rio) * InstructionSize
|
||||
}
|
||||
|
||||
// Instruction is a single eBPF instruction.
|
||||
type Instruction struct {
|
||||
OpCode OpCode
|
||||
Dst Register
|
||||
Src Register
|
||||
Offset int16
|
||||
Constant int64
|
||||
Reference string
|
||||
Symbol string
|
||||
}
|
||||
|
||||
// Sym creates a symbol.
|
||||
func (ins Instruction) Sym(name string) Instruction {
|
||||
ins.Symbol = name
|
||||
return ins
|
||||
}
|
||||
|
||||
// Unmarshal decodes a BPF instruction.
|
||||
func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) {
|
||||
var bi bpfInstruction
|
||||
err := binary.Read(r, bo, &bi)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ins.OpCode = bi.OpCode
|
||||
ins.Offset = bi.Offset
|
||||
ins.Constant = int64(bi.Constant)
|
||||
ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
|
||||
}
|
||||
|
||||
if !bi.OpCode.IsDWordLoad() {
|
||||
return InstructionSize, nil
|
||||
}
|
||||
|
||||
var bi2 bpfInstruction
|
||||
if err := binary.Read(r, bo, &bi2); err != nil {
|
||||
// No Wrap, to avoid io.EOF clash
|
||||
return 0, errors.New("64bit immediate is missing second half")
|
||||
}
|
||||
if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 {
|
||||
return 0, errors.New("64bit immediate has non-zero fields")
|
||||
}
|
||||
ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant)))
|
||||
|
||||
return 2 * InstructionSize, nil
|
||||
}
|
||||
|
||||
// Marshal encodes a BPF instruction.
|
||||
func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
|
||||
if ins.OpCode == InvalidOpCode {
|
||||
return 0, errors.New("invalid opcode")
|
||||
}
|
||||
|
||||
isDWordLoad := ins.OpCode.IsDWordLoad()
|
||||
|
||||
cons := int32(ins.Constant)
|
||||
if isDWordLoad {
|
||||
// Encode least significant 32bit first for 64bit operations.
|
||||
cons = int32(uint32(ins.Constant))
|
||||
}
|
||||
|
||||
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("can't marshal registers: %s", err)
|
||||
}
|
||||
|
||||
bpfi := bpfInstruction{
|
||||
ins.OpCode,
|
||||
regs,
|
||||
ins.Offset,
|
||||
cons,
|
||||
}
|
||||
|
||||
if err := binary.Write(w, bo, &bpfi); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !isDWordLoad {
|
||||
return InstructionSize, nil
|
||||
}
|
||||
|
||||
bpfi = bpfInstruction{
|
||||
Constant: int32(ins.Constant >> 32),
|
||||
}
|
||||
|
||||
if err := binary.Write(w, bo, &bpfi); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return 2 * InstructionSize, nil
|
||||
}
|
||||
|
||||
// RewriteMapPtr changes an instruction to use a new map fd.
|
||||
//
|
||||
// Returns an error if the instruction doesn't load a map.
|
||||
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
||||
if !ins.OpCode.IsDWordLoad() {
|
||||
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||
}
|
||||
|
||||
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
|
||||
return errors.New("not a load from a map")
|
||||
}
|
||||
|
||||
// Preserve the offset value for direct map loads.
|
||||
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
|
||||
rawFd := uint64(uint32(fd))
|
||||
ins.Constant = int64(offset | rawFd)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapPtr returns the map fd for this instruction.
|
||||
//
|
||||
// The result is undefined if the instruction is not a load from a map,
|
||||
// see IsLoadFromMap.
|
||||
func (ins *Instruction) MapPtr() int {
|
||||
return int(int32(uint64(ins.Constant) & math.MaxUint32))
|
||||
}
|
||||
|
||||
// RewriteMapOffset changes the offset of a direct load from a map.
|
||||
//
|
||||
// Returns an error if the instruction is not a direct load.
|
||||
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
|
||||
if !ins.OpCode.IsDWordLoad() {
|
||||
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||
}
|
||||
|
||||
if ins.Src != PseudoMapValue {
|
||||
return errors.New("not a direct load from a map")
|
||||
}
|
||||
|
||||
fd := uint64(ins.Constant) & math.MaxUint32
|
||||
ins.Constant = int64(uint64(offset)<<32 | fd)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ins *Instruction) mapOffset() uint32 {
|
||||
return uint32(uint64(ins.Constant) >> 32)
|
||||
}
|
||||
|
||||
// IsLoadFromMap returns true if the instruction loads from a map.
|
||||
//
|
||||
// This covers both loading the map pointer and direct map value loads.
|
||||
func (ins *Instruction) IsLoadFromMap() bool {
|
||||
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
|
||||
}
|
||||
|
||||
// IsFunctionCall returns true if the instruction calls another BPF function.
|
||||
//
|
||||
// This is not the same thing as a BPF helper call.
|
||||
func (ins *Instruction) IsFunctionCall() bool {
|
||||
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
|
||||
}
|
||||
|
||||
// IsConstantLoad returns true if the instruction loads a constant of the
|
||||
// given size.
|
||||
func (ins *Instruction) IsConstantLoad(size Size) bool {
|
||||
return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter.
|
||||
func (ins Instruction) Format(f fmt.State, c rune) {
|
||||
if c != 'v' {
|
||||
fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c)
|
||||
return
|
||||
}
|
||||
|
||||
op := ins.OpCode
|
||||
|
||||
if op == InvalidOpCode {
|
||||
fmt.Fprint(f, "INVALID")
|
||||
return
|
||||
}
|
||||
|
||||
// Omit trailing space for Exit
|
||||
if op.JumpOp() == Exit {
|
||||
fmt.Fprint(f, op)
|
||||
return
|
||||
}
|
||||
|
||||
if ins.IsLoadFromMap() {
|
||||
fd := ins.MapPtr()
|
||||
switch ins.Src {
|
||||
case PseudoMapFD:
|
||||
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
|
||||
|
||||
case PseudoMapValue:
|
||||
fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
|
||||
}
|
||||
|
||||
goto ref
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%v ", op)
|
||||
switch cls := op.Class(); cls {
|
||||
case LdClass, LdXClass, StClass, StXClass:
|
||||
switch op.Mode() {
|
||||
case ImmMode:
|
||||
fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
|
||||
case AbsMode:
|
||||
fmt.Fprintf(f, "imm: %d", ins.Constant)
|
||||
case IndMode:
|
||||
fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant)
|
||||
case MemMode:
|
||||
fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant)
|
||||
case XAddMode:
|
||||
fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src)
|
||||
}
|
||||
|
||||
case ALU64Class, ALUClass:
|
||||
fmt.Fprintf(f, "dst: %s ", ins.Dst)
|
||||
if op.ALUOp() == Swap || op.Source() == ImmSource {
|
||||
fmt.Fprintf(f, "imm: %d", ins.Constant)
|
||||
} else {
|
||||
fmt.Fprintf(f, "src: %s", ins.Src)
|
||||
}
|
||||
|
||||
case JumpClass:
|
||||
switch jop := op.JumpOp(); jop {
|
||||
case Call:
|
||||
if ins.Src == PseudoCall {
|
||||
// bpf-to-bpf call
|
||||
fmt.Fprint(f, ins.Constant)
|
||||
} else {
|
||||
fmt.Fprint(f, BuiltinFunc(ins.Constant))
|
||||
}
|
||||
|
||||
default:
|
||||
fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset)
|
||||
if op.Source() == ImmSource {
|
||||
fmt.Fprintf(f, "imm: %d", ins.Constant)
|
||||
} else {
|
||||
fmt.Fprintf(f, "src: %s", ins.Src)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ref:
|
||||
if ins.Reference != "" {
|
||||
fmt.Fprintf(f, " <%s>", ins.Reference)
|
||||
}
|
||||
}
|
||||
|
||||
// Instructions is an eBPF program.
|
||||
type Instructions []Instruction
|
||||
|
||||
func (insns Instructions) String() string {
|
||||
return fmt.Sprint(insns)
|
||||
}
|
||||
|
||||
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
|
||||
//
|
||||
// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
|
||||
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
|
||||
if symbol == "" {
|
||||
return errors.New("empty symbol")
|
||||
}
|
||||
|
||||
found := false
|
||||
for i := range insns {
|
||||
ins := &insns[i]
|
||||
if ins.Reference != symbol {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := ins.RewriteMapPtr(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
return &unreferencedSymbolError{symbol}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SymbolOffsets returns the set of symbols and their offset in
|
||||
// the instructions.
|
||||
func (insns Instructions) SymbolOffsets() (map[string]int, error) {
|
||||
offsets := make(map[string]int)
|
||||
|
||||
for i, ins := range insns {
|
||||
if ins.Symbol == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := offsets[ins.Symbol]; ok {
|
||||
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||
}
|
||||
|
||||
offsets[ins.Symbol] = i
|
||||
}
|
||||
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
// ReferenceOffsets returns the set of references and their offset in
|
||||
// the instructions.
|
||||
func (insns Instructions) ReferenceOffsets() map[string][]int {
|
||||
offsets := make(map[string][]int)
|
||||
|
||||
for i, ins := range insns {
|
||||
if ins.Reference == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
offsets[ins.Reference] = append(offsets[ins.Reference], i)
|
||||
}
|
||||
|
||||
return offsets
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter.
|
||||
//
|
||||
// You can control indentation of symbols by
|
||||
// specifying a width. Setting a precision controls the indentation of
|
||||
// instructions.
|
||||
// The default character is a tab, which can be overridden by specifying
|
||||
// the ' ' space flag.
|
||||
func (insns Instructions) Format(f fmt.State, c rune) {
|
||||
if c != 's' && c != 'v' {
|
||||
fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c)
|
||||
return
|
||||
}
|
||||
|
||||
// Precision is better in this case, because it allows
|
||||
// specifying 0 padding easily.
|
||||
padding, ok := f.Precision()
|
||||
if !ok {
|
||||
padding = 1
|
||||
}
|
||||
|
||||
indent := strings.Repeat("\t", padding)
|
||||
if f.Flag(' ') {
|
||||
indent = strings.Repeat(" ", padding)
|
||||
}
|
||||
|
||||
symPadding, ok := f.Width()
|
||||
if !ok {
|
||||
symPadding = padding - 1
|
||||
}
|
||||
if symPadding < 0 {
|
||||
symPadding = 0
|
||||
}
|
||||
|
||||
symIndent := strings.Repeat("\t", symPadding)
|
||||
if f.Flag(' ') {
|
||||
symIndent = strings.Repeat(" ", symPadding)
|
||||
}
|
||||
|
||||
// Guess how many digits we need at most, by assuming that all instructions
|
||||
// are double wide.
|
||||
highestOffset := len(insns) * 2
|
||||
offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
|
||||
|
||||
iter := insns.Iterate()
|
||||
for iter.Next() {
|
||||
if iter.Ins.Symbol != "" {
|
||||
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol)
|
||||
}
|
||||
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal encodes a BPF program into the kernel format.
|
||||
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
||||
for i, ins := range insns {
|
||||
_, err := ins.Marshal(w, bo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("instruction %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tag calculates the kernel tag for a series of instructions.
|
||||
//
|
||||
// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
|
||||
// to ProgramInfo.Tag to figure out whether a loaded program matches
|
||||
// certain instructions.
|
||||
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
|
||||
h := sha1.New()
|
||||
for i, ins := range insns {
|
||||
if ins.IsLoadFromMap() {
|
||||
ins.Constant = 0
|
||||
}
|
||||
_, err := ins.Marshal(h, bo)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("instruction %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
|
||||
}
|
||||
|
||||
// Iterate allows iterating a BPF program while keeping track of
|
||||
// various offsets.
|
||||
//
|
||||
// Modifying the instruction slice will lead to undefined behaviour.
|
||||
func (insns Instructions) Iterate() *InstructionIterator {
|
||||
return &InstructionIterator{insns: insns}
|
||||
}
|
||||
|
||||
// InstructionIterator iterates over a BPF program.
|
||||
type InstructionIterator struct {
|
||||
insns Instructions
|
||||
// The instruction in question.
|
||||
Ins *Instruction
|
||||
// The index of the instruction in the original instruction slice.
|
||||
Index int
|
||||
// The offset of the instruction in raw BPF instructions. This accounts
|
||||
// for double-wide instructions.
|
||||
Offset RawInstructionOffset
|
||||
}
|
||||
|
||||
// Next returns true as long as there are any instructions remaining.
|
||||
func (iter *InstructionIterator) Next() bool {
|
||||
if len(iter.insns) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if iter.Ins != nil {
|
||||
iter.Index++
|
||||
iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
|
||||
}
|
||||
iter.Ins = &iter.insns[0]
|
||||
iter.insns = iter.insns[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
type bpfInstruction struct {
|
||||
OpCode OpCode
|
||||
Registers bpfRegisters
|
||||
Offset int16
|
||||
Constant int32
|
||||
}
|
||||
|
||||
type bpfRegisters uint8
|
||||
|
||||
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
|
||||
switch bo {
|
||||
case binary.LittleEndian:
|
||||
return bpfRegisters((src << 4) | (dst & 0xF)), nil
|
||||
case binary.BigEndian:
|
||||
return bpfRegisters((dst << 4) | (src & 0xF)), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||
}
|
||||
}
|
||||
|
||||
func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) {
|
||||
switch bo {
|
||||
case binary.LittleEndian:
|
||||
return Register(r & 0xF), Register(r >> 4), nil
|
||||
case binary.BigEndian:
|
||||
return Register(r >> 4), Register(r & 0xf), nil
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||
}
|
||||
}
|
||||
|
||||
type unreferencedSymbolError struct {
|
||||
symbol string
|
||||
}
|
||||
|
||||
func (use *unreferencedSymbolError) Error() string {
|
||||
return fmt.Sprintf("unreferenced symbol %s", use.symbol)
|
||||
}
|
||||
|
||||
// IsUnreferencedSymbol returns true if err was caused by
|
||||
// an unreferenced symbol.
|
||||
func IsUnreferencedSymbol(err error) bool {
|
||||
_, ok := err.(*unreferencedSymbolError)
|
||||
return ok
|
||||
}
|
||||
109
src/runtime/vendor/github.com/cilium/ebpf/asm/jump.go
generated
vendored
Normal file
109
src/runtime/vendor/github.com/cilium/ebpf/asm/jump.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
package asm
|
||||
|
||||
//go:generate stringer -output jump_string.go -type=JumpOp
|
||||
|
||||
// JumpOp affect control flow.
|
||||
//
|
||||
// msb lsb
|
||||
// +----+-+---+
|
||||
// |OP |s|cls|
|
||||
// +----+-+---+
|
||||
type JumpOp uint8
|
||||
|
||||
const jumpMask OpCode = aluMask
|
||||
|
||||
const (
|
||||
// InvalidJumpOp is returned by getters when invoked
|
||||
// on non branch OpCodes
|
||||
InvalidJumpOp JumpOp = 0xff
|
||||
// Ja jumps by offset unconditionally
|
||||
Ja JumpOp = 0x00
|
||||
// JEq jumps by offset if r == imm
|
||||
JEq JumpOp = 0x10
|
||||
// JGT jumps by offset if r > imm
|
||||
JGT JumpOp = 0x20
|
||||
// JGE jumps by offset if r >= imm
|
||||
JGE JumpOp = 0x30
|
||||
// JSet jumps by offset if r & imm
|
||||
JSet JumpOp = 0x40
|
||||
// JNE jumps by offset if r != imm
|
||||
JNE JumpOp = 0x50
|
||||
// JSGT jumps by offset if signed r > signed imm
|
||||
JSGT JumpOp = 0x60
|
||||
// JSGE jumps by offset if signed r >= signed imm
|
||||
JSGE JumpOp = 0x70
|
||||
// Call builtin or user defined function from imm
|
||||
Call JumpOp = 0x80
|
||||
// Exit ends execution, with value in r0
|
||||
Exit JumpOp = 0x90
|
||||
// JLT jumps by offset if r < imm
|
||||
JLT JumpOp = 0xa0
|
||||
// JLE jumps by offset if r <= imm
|
||||
JLE JumpOp = 0xb0
|
||||
// JSLT jumps by offset if signed r < signed imm
|
||||
JSLT JumpOp = 0xc0
|
||||
// JSLE jumps by offset if signed r <= signed imm
|
||||
JSLE JumpOp = 0xd0
|
||||
)
|
||||
|
||||
// Return emits an exit instruction.
|
||||
//
|
||||
// Requires a return value in R0.
|
||||
func Return() Instruction {
|
||||
return Instruction{
|
||||
OpCode: OpCode(JumpClass).SetJumpOp(Exit),
|
||||
}
|
||||
}
|
||||
|
||||
// Op returns the OpCode for a given jump source.
|
||||
func (op JumpOp) Op(source Source) OpCode {
|
||||
return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
|
||||
}
|
||||
|
||||
// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled.
|
||||
func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
|
||||
if op == Exit || op == Call || op == Ja {
|
||||
return Instruction{OpCode: InvalidOpCode}
|
||||
}
|
||||
|
||||
return Instruction{
|
||||
OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource),
|
||||
Dst: dst,
|
||||
Offset: -1,
|
||||
Constant: int64(value),
|
||||
Reference: label,
|
||||
}
|
||||
}
|
||||
|
||||
// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled.
|
||||
func (op JumpOp) Reg(dst, src Register, label string) Instruction {
|
||||
if op == Exit || op == Call || op == Ja {
|
||||
return Instruction{OpCode: InvalidOpCode}
|
||||
}
|
||||
|
||||
return Instruction{
|
||||
OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
Offset: -1,
|
||||
Reference: label,
|
||||
}
|
||||
}
|
||||
|
||||
// Label adjusts PC to the address of the label.
|
||||
func (op JumpOp) Label(label string) Instruction {
|
||||
if op == Call {
|
||||
return Instruction{
|
||||
OpCode: OpCode(JumpClass).SetJumpOp(Call),
|
||||
Src: PseudoCall,
|
||||
Constant: -1,
|
||||
Reference: label,
|
||||
}
|
||||
}
|
||||
|
||||
return Instruction{
|
||||
OpCode: OpCode(JumpClass).SetJumpOp(op),
|
||||
Offset: -1,
|
||||
Reference: label,
|
||||
}
|
||||
}
|
||||
53
src/runtime/vendor/github.com/cilium/ebpf/asm/jump_string.go
generated
vendored
Normal file
53
src/runtime/vendor/github.com/cilium/ebpf/asm/jump_string.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT.
|
||||
|
||||
package asm
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidJumpOp-255]
|
||||
_ = x[Ja-0]
|
||||
_ = x[JEq-16]
|
||||
_ = x[JGT-32]
|
||||
_ = x[JGE-48]
|
||||
_ = x[JSet-64]
|
||||
_ = x[JNE-80]
|
||||
_ = x[JSGT-96]
|
||||
_ = x[JSGE-112]
|
||||
_ = x[Call-128]
|
||||
_ = x[Exit-144]
|
||||
_ = x[JLT-160]
|
||||
_ = x[JLE-176]
|
||||
_ = x[JSLT-192]
|
||||
_ = x[JSLE-208]
|
||||
}
|
||||
|
||||
const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp"
|
||||
|
||||
var _JumpOp_map = map[JumpOp]string{
|
||||
0: _JumpOp_name[0:2],
|
||||
16: _JumpOp_name[2:5],
|
||||
32: _JumpOp_name[5:8],
|
||||
48: _JumpOp_name[8:11],
|
||||
64: _JumpOp_name[11:15],
|
||||
80: _JumpOp_name[15:18],
|
||||
96: _JumpOp_name[18:22],
|
||||
112: _JumpOp_name[22:26],
|
||||
128: _JumpOp_name[26:30],
|
||||
144: _JumpOp_name[30:34],
|
||||
160: _JumpOp_name[34:37],
|
||||
176: _JumpOp_name[37:40],
|
||||
192: _JumpOp_name[40:44],
|
||||
208: _JumpOp_name[44:48],
|
||||
255: _JumpOp_name[48:61],
|
||||
}
|
||||
|
||||
func (i JumpOp) String() string {
|
||||
if str, ok := _JumpOp_map[i]; ok {
|
||||
return str
|
||||
}
|
||||
return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
204
src/runtime/vendor/github.com/cilium/ebpf/asm/load_store.go
generated
vendored
Normal file
204
src/runtime/vendor/github.com/cilium/ebpf/asm/load_store.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
package asm
|
||||
|
||||
//go:generate stringer -output load_store_string.go -type=Mode,Size
|
||||
|
||||
// Mode for load and store operations
|
||||
//
|
||||
// msb lsb
|
||||
// +---+--+---+
|
||||
// |MDE|sz|cls|
|
||||
// +---+--+---+
|
||||
type Mode uint8
|
||||
|
||||
const modeMask OpCode = 0xe0
|
||||
|
||||
const (
|
||||
// InvalidMode is returned by getters when invoked
|
||||
// on non load / store OpCodes
|
||||
InvalidMode Mode = 0xff
|
||||
// ImmMode - immediate value
|
||||
ImmMode Mode = 0x00
|
||||
// AbsMode - immediate value + offset
|
||||
AbsMode Mode = 0x20
|
||||
// IndMode - indirect (imm+src)
|
||||
IndMode Mode = 0x40
|
||||
// MemMode - load from memory
|
||||
MemMode Mode = 0x60
|
||||
// XAddMode - add atomically across processors.
|
||||
XAddMode Mode = 0xc0
|
||||
)
|
||||
|
||||
// Size of load and store operations
|
||||
//
|
||||
// msb lsb
|
||||
// +---+--+---+
|
||||
// |mde|SZ|cls|
|
||||
// +---+--+---+
|
||||
type Size uint8
|
||||
|
||||
const sizeMask OpCode = 0x18
|
||||
|
||||
const (
|
||||
// InvalidSize is returned by getters when invoked
|
||||
// on non load / store OpCodes
|
||||
InvalidSize Size = 0xff
|
||||
// DWord - double word; 64 bits
|
||||
DWord Size = 0x18
|
||||
// Word - word; 32 bits
|
||||
Word Size = 0x00
|
||||
// Half - half-word; 16 bits
|
||||
Half Size = 0x08
|
||||
// Byte - byte; 8 bits
|
||||
Byte Size = 0x10
|
||||
)
|
||||
|
||||
// Sizeof returns the size in bytes.
|
||||
func (s Size) Sizeof() int {
|
||||
switch s {
|
||||
case DWord:
|
||||
return 8
|
||||
case Word:
|
||||
return 4
|
||||
case Half:
|
||||
return 2
|
||||
case Byte:
|
||||
return 1
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMemOp returns the OpCode to load a value of given size from memory.
|
||||
func LoadMemOp(size Size) OpCode {
|
||||
return OpCode(LdXClass).SetMode(MemMode).SetSize(size)
|
||||
}
|
||||
|
||||
// LoadMem emits `dst = *(size *)(src + offset)`.
|
||||
func LoadMem(dst, src Register, offset int16, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: LoadMemOp(size),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
Offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadImmOp returns the OpCode to load an immediate of given size.
|
||||
//
|
||||
// As of kernel 4.20, only DWord size is accepted.
|
||||
func LoadImmOp(size Size) OpCode {
|
||||
return OpCode(LdClass).SetMode(ImmMode).SetSize(size)
|
||||
}
|
||||
|
||||
// LoadImm emits `dst = (size)value`.
|
||||
//
|
||||
// As of kernel 4.20, only DWord size is accepted.
|
||||
func LoadImm(dst Register, value int64, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: LoadImmOp(size),
|
||||
Dst: dst,
|
||||
Constant: value,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMapPtr stores a pointer to a map in dst.
|
||||
func LoadMapPtr(dst Register, fd int) Instruction {
|
||||
if fd < 0 {
|
||||
return Instruction{OpCode: InvalidOpCode}
|
||||
}
|
||||
|
||||
return Instruction{
|
||||
OpCode: LoadImmOp(DWord),
|
||||
Dst: dst,
|
||||
Src: PseudoMapFD,
|
||||
Constant: int64(uint32(fd)),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMapValue stores a pointer to the value at a certain offset of a map.
|
||||
func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
|
||||
if fd < 0 {
|
||||
return Instruction{OpCode: InvalidOpCode}
|
||||
}
|
||||
|
||||
fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
|
||||
return Instruction{
|
||||
OpCode: LoadImmOp(DWord),
|
||||
Dst: dst,
|
||||
Src: PseudoMapValue,
|
||||
Constant: int64(fdAndOffset),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
|
||||
func LoadIndOp(size Size) OpCode {
|
||||
return OpCode(LdClass).SetMode(IndMode).SetSize(size)
|
||||
}
|
||||
|
||||
// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`.
|
||||
func LoadInd(dst, src Register, offset int32, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: LoadIndOp(size),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
Constant: int64(offset),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff.
|
||||
func LoadAbsOp(size Size) OpCode {
|
||||
return OpCode(LdClass).SetMode(AbsMode).SetSize(size)
|
||||
}
|
||||
|
||||
// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`.
|
||||
func LoadAbs(offset int32, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: LoadAbsOp(size),
|
||||
Dst: R0,
|
||||
Constant: int64(offset),
|
||||
}
|
||||
}
|
||||
|
||||
// StoreMemOp returns the OpCode for storing a register of given size in memory.
|
||||
func StoreMemOp(size Size) OpCode {
|
||||
return OpCode(StXClass).SetMode(MemMode).SetSize(size)
|
||||
}
|
||||
|
||||
// StoreMem emits `*(size *)(dst + offset) = src`
|
||||
func StoreMem(dst Register, offset int16, src Register, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: StoreMemOp(size),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
Offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
// StoreImmOp returns the OpCode for storing an immediate of given size in memory.
|
||||
func StoreImmOp(size Size) OpCode {
|
||||
return OpCode(StClass).SetMode(MemMode).SetSize(size)
|
||||
}
|
||||
|
||||
// StoreImm emits `*(size *)(dst + offset) = value`.
|
||||
func StoreImm(dst Register, offset int16, value int64, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: StoreImmOp(size),
|
||||
Dst: dst,
|
||||
Offset: offset,
|
||||
Constant: value,
|
||||
}
|
||||
}
|
||||
|
||||
// StoreXAddOp returns the OpCode to atomically add a register to a value in memory.
|
||||
func StoreXAddOp(size Size) OpCode {
|
||||
return OpCode(StXClass).SetMode(XAddMode).SetSize(size)
|
||||
}
|
||||
|
||||
// StoreXAdd atomically adds src to *dst.
|
||||
func StoreXAdd(dst, src Register, size Size) Instruction {
|
||||
return Instruction{
|
||||
OpCode: StoreXAddOp(size),
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
}
|
||||
}
|
||||
80
src/runtime/vendor/github.com/cilium/ebpf/asm/load_store_string.go
generated
vendored
Normal file
80
src/runtime/vendor/github.com/cilium/ebpf/asm/load_store_string.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT.
|
||||
|
||||
package asm
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidMode-255]
|
||||
_ = x[ImmMode-0]
|
||||
_ = x[AbsMode-32]
|
||||
_ = x[IndMode-64]
|
||||
_ = x[MemMode-96]
|
||||
_ = x[XAddMode-192]
|
||||
}
|
||||
|
||||
const (
|
||||
_Mode_name_0 = "ImmMode"
|
||||
_Mode_name_1 = "AbsMode"
|
||||
_Mode_name_2 = "IndMode"
|
||||
_Mode_name_3 = "MemMode"
|
||||
_Mode_name_4 = "XAddMode"
|
||||
_Mode_name_5 = "InvalidMode"
|
||||
)
|
||||
|
||||
func (i Mode) String() string {
|
||||
switch {
|
||||
case i == 0:
|
||||
return _Mode_name_0
|
||||
case i == 32:
|
||||
return _Mode_name_1
|
||||
case i == 64:
|
||||
return _Mode_name_2
|
||||
case i == 96:
|
||||
return _Mode_name_3
|
||||
case i == 192:
|
||||
return _Mode_name_4
|
||||
case i == 255:
|
||||
return _Mode_name_5
|
||||
default:
|
||||
return "Mode(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidSize-255]
|
||||
_ = x[DWord-24]
|
||||
_ = x[Word-0]
|
||||
_ = x[Half-8]
|
||||
_ = x[Byte-16]
|
||||
}
|
||||
|
||||
const (
|
||||
_Size_name_0 = "Word"
|
||||
_Size_name_1 = "Half"
|
||||
_Size_name_2 = "Byte"
|
||||
_Size_name_3 = "DWord"
|
||||
_Size_name_4 = "InvalidSize"
|
||||
)
|
||||
|
||||
func (i Size) String() string {
|
||||
switch {
|
||||
case i == 0:
|
||||
return _Size_name_0
|
||||
case i == 8:
|
||||
return _Size_name_1
|
||||
case i == 16:
|
||||
return _Size_name_2
|
||||
case i == 24:
|
||||
return _Size_name_3
|
||||
case i == 255:
|
||||
return _Size_name_4
|
||||
default:
|
||||
return "Size(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
237
src/runtime/vendor/github.com/cilium/ebpf/asm/opcode.go
generated
vendored
Normal file
237
src/runtime/vendor/github.com/cilium/ebpf/asm/opcode.go
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
package asm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:generate stringer -output opcode_string.go -type=Class
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
unknownEncoding encoding = iota
|
||||
loadOrStore
|
||||
jumpOrALU
|
||||
)
|
||||
|
||||
// Class of operations
|
||||
//
|
||||
// msb lsb
|
||||
// +---+--+---+
|
||||
// | ?? |CLS|
|
||||
// +---+--+---+
|
||||
type Class uint8
|
||||
|
||||
const classMask OpCode = 0x07
|
||||
|
||||
const (
|
||||
// LdClass load memory
|
||||
LdClass Class = 0x00
|
||||
// LdXClass load memory from constant
|
||||
LdXClass Class = 0x01
|
||||
// StClass load register from memory
|
||||
StClass Class = 0x02
|
||||
// StXClass load register from constant
|
||||
StXClass Class = 0x03
|
||||
// ALUClass arithmetic operators
|
||||
ALUClass Class = 0x04
|
||||
// JumpClass jump operators
|
||||
JumpClass Class = 0x05
|
||||
// ALU64Class arithmetic in 64 bit mode
|
||||
ALU64Class Class = 0x07
|
||||
)
|
||||
|
||||
func (cls Class) encoding() encoding {
|
||||
switch cls {
|
||||
case LdClass, LdXClass, StClass, StXClass:
|
||||
return loadOrStore
|
||||
case ALU64Class, ALUClass, JumpClass:
|
||||
return jumpOrALU
|
||||
default:
|
||||
return unknownEncoding
|
||||
}
|
||||
}
|
||||
|
||||
// OpCode is a packed eBPF opcode.
|
||||
//
|
||||
// Its encoding is defined by a Class value:
|
||||
//
|
||||
// msb lsb
|
||||
// +----+-+---+
|
||||
// | ???? |CLS|
|
||||
// +----+-+---+
|
||||
type OpCode uint8
|
||||
|
||||
// InvalidOpCode is returned by setters on OpCode
|
||||
const InvalidOpCode OpCode = 0xff
|
||||
|
||||
// rawInstructions returns the number of BPF instructions required
|
||||
// to encode this opcode.
|
||||
func (op OpCode) rawInstructions() int {
|
||||
if op.IsDWordLoad() {
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (op OpCode) IsDWordLoad() bool {
|
||||
return op == LoadImmOp(DWord)
|
||||
}
|
||||
|
||||
// Class returns the class of operation.
|
||||
func (op OpCode) Class() Class {
|
||||
return Class(op & classMask)
|
||||
}
|
||||
|
||||
// Mode returns the mode for load and store operations.
|
||||
func (op OpCode) Mode() Mode {
|
||||
if op.Class().encoding() != loadOrStore {
|
||||
return InvalidMode
|
||||
}
|
||||
return Mode(op & modeMask)
|
||||
}
|
||||
|
||||
// Size returns the size for load and store operations.
|
||||
func (op OpCode) Size() Size {
|
||||
if op.Class().encoding() != loadOrStore {
|
||||
return InvalidSize
|
||||
}
|
||||
return Size(op & sizeMask)
|
||||
}
|
||||
|
||||
// Source returns the source for branch and ALU operations.
|
||||
func (op OpCode) Source() Source {
|
||||
if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap {
|
||||
return InvalidSource
|
||||
}
|
||||
return Source(op & sourceMask)
|
||||
}
|
||||
|
||||
// ALUOp returns the ALUOp.
|
||||
func (op OpCode) ALUOp() ALUOp {
|
||||
if op.Class().encoding() != jumpOrALU {
|
||||
return InvalidALUOp
|
||||
}
|
||||
return ALUOp(op & aluMask)
|
||||
}
|
||||
|
||||
// Endianness returns the Endianness for a byte swap instruction.
|
||||
func (op OpCode) Endianness() Endianness {
|
||||
if op.ALUOp() != Swap {
|
||||
return InvalidEndian
|
||||
}
|
||||
return Endianness(op & endianMask)
|
||||
}
|
||||
|
||||
// JumpOp returns the JumpOp.
|
||||
func (op OpCode) JumpOp() JumpOp {
|
||||
if op.Class().encoding() != jumpOrALU {
|
||||
return InvalidJumpOp
|
||||
}
|
||||
return JumpOp(op & jumpMask)
|
||||
}
|
||||
|
||||
// SetMode sets the mode on load and store operations.
|
||||
//
|
||||
// Returns InvalidOpCode if op is of the wrong class.
|
||||
func (op OpCode) SetMode(mode Mode) OpCode {
|
||||
if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) {
|
||||
return InvalidOpCode
|
||||
}
|
||||
return (op & ^modeMask) | OpCode(mode)
|
||||
}
|
||||
|
||||
// SetSize sets the size on load and store operations.
|
||||
//
|
||||
// Returns InvalidOpCode if op is of the wrong class.
|
||||
func (op OpCode) SetSize(size Size) OpCode {
|
||||
if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) {
|
||||
return InvalidOpCode
|
||||
}
|
||||
return (op & ^sizeMask) | OpCode(size)
|
||||
}
|
||||
|
||||
// SetSource sets the source on jump and ALU operations.
|
||||
//
|
||||
// Returns InvalidOpCode if op is of the wrong class.
|
||||
func (op OpCode) SetSource(source Source) OpCode {
|
||||
if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) {
|
||||
return InvalidOpCode
|
||||
}
|
||||
return (op & ^sourceMask) | OpCode(source)
|
||||
}
|
||||
|
||||
// SetALUOp sets the ALUOp on ALU operations.
|
||||
//
|
||||
// Returns InvalidOpCode if op is of the wrong class.
|
||||
func (op OpCode) SetALUOp(alu ALUOp) OpCode {
|
||||
class := op.Class()
|
||||
if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) {
|
||||
return InvalidOpCode
|
||||
}
|
||||
return (op & ^aluMask) | OpCode(alu)
|
||||
}
|
||||
|
||||
// SetJumpOp sets the JumpOp on jump operations.
|
||||
//
|
||||
// Returns InvalidOpCode if op is of the wrong class.
|
||||
func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
|
||||
if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) {
|
||||
return InvalidOpCode
|
||||
}
|
||||
return (op & ^jumpMask) | OpCode(jump)
|
||||
}
|
||||
|
||||
func (op OpCode) String() string {
|
||||
var f strings.Builder
|
||||
|
||||
switch class := op.Class(); class {
|
||||
case LdClass, LdXClass, StClass, StXClass:
|
||||
f.WriteString(strings.TrimSuffix(class.String(), "Class"))
|
||||
|
||||
mode := op.Mode()
|
||||
f.WriteString(strings.TrimSuffix(mode.String(), "Mode"))
|
||||
|
||||
switch op.Size() {
|
||||
case DWord:
|
||||
f.WriteString("DW")
|
||||
case Word:
|
||||
f.WriteString("W")
|
||||
case Half:
|
||||
f.WriteString("H")
|
||||
case Byte:
|
||||
f.WriteString("B")
|
||||
}
|
||||
|
||||
case ALU64Class, ALUClass:
|
||||
f.WriteString(op.ALUOp().String())
|
||||
|
||||
if op.ALUOp() == Swap {
|
||||
// Width for Endian is controlled by Constant
|
||||
f.WriteString(op.Endianness().String())
|
||||
} else {
|
||||
if class == ALUClass {
|
||||
f.WriteString("32")
|
||||
}
|
||||
|
||||
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
|
||||
}
|
||||
|
||||
case JumpClass:
|
||||
f.WriteString(op.JumpOp().String())
|
||||
if jop := op.JumpOp(); jop != Exit && jop != Call {
|
||||
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
|
||||
}
|
||||
|
||||
default:
|
||||
fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
|
||||
}
|
||||
|
||||
return f.String()
|
||||
}
|
||||
|
||||
// valid returns true if all bits in value are covered by mask.
|
||||
func valid(value, mask OpCode) bool {
|
||||
return value & ^mask == 0
|
||||
}
|
||||
38
src/runtime/vendor/github.com/cilium/ebpf/asm/opcode_string.go
generated
vendored
Normal file
38
src/runtime/vendor/github.com/cilium/ebpf/asm/opcode_string.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT.
|
||||
|
||||
package asm
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[LdClass-0]
|
||||
_ = x[LdXClass-1]
|
||||
_ = x[StClass-2]
|
||||
_ = x[StXClass-3]
|
||||
_ = x[ALUClass-4]
|
||||
_ = x[JumpClass-5]
|
||||
_ = x[ALU64Class-7]
|
||||
}
|
||||
|
||||
const (
|
||||
_Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass"
|
||||
_Class_name_1 = "ALU64Class"
|
||||
)
|
||||
|
||||
var (
|
||||
_Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47}
|
||||
)
|
||||
|
||||
func (i Class) String() string {
|
||||
switch {
|
||||
case 0 <= i && i <= 5:
|
||||
return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]]
|
||||
case i == 7:
|
||||
return _Class_name_1
|
||||
default:
|
||||
return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
49
src/runtime/vendor/github.com/cilium/ebpf/asm/register.go
generated
vendored
Normal file
49
src/runtime/vendor/github.com/cilium/ebpf/asm/register.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package asm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Register is the source or destination of most operations.
|
||||
type Register uint8
|
||||
|
||||
// R0 contains return values.
|
||||
const R0 Register = 0
|
||||
|
||||
// Registers for function arguments.
|
||||
const (
|
||||
R1 Register = R0 + 1 + iota
|
||||
R2
|
||||
R3
|
||||
R4
|
||||
R5
|
||||
)
|
||||
|
||||
// Callee saved registers preserved by function calls.
|
||||
const (
|
||||
R6 Register = R5 + 1 + iota
|
||||
R7
|
||||
R8
|
||||
R9
|
||||
)
|
||||
|
||||
// Read-only frame pointer to access stack.
|
||||
const (
|
||||
R10 Register = R9 + 1
|
||||
RFP = R10
|
||||
)
|
||||
|
||||
// Pseudo registers used by 64bit loads and jumps
|
||||
const (
|
||||
PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
|
||||
PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
|
||||
PseudoCall = R1 // BPF_PSEUDO_CALL
|
||||
)
|
||||
|
||||
func (r Register) String() string {
|
||||
v := uint8(r)
|
||||
if v == 10 {
|
||||
return "rfp"
|
||||
}
|
||||
return fmt.Sprintf("r%d", v)
|
||||
}
|
||||
616
src/runtime/vendor/github.com/cilium/ebpf/collection.go
generated
vendored
Normal file
616
src/runtime/vendor/github.com/cilium/ebpf/collection.go
generated
vendored
Normal file
@@ -0,0 +1,616 @@
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/btf"
|
||||
)
|
||||
|
||||
// CollectionOptions control loading a collection into the kernel.
|
||||
//
|
||||
// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions.
|
||||
type CollectionOptions struct {
|
||||
Maps MapOptions
|
||||
Programs ProgramOptions
|
||||
}
|
||||
|
||||
// CollectionSpec describes a collection.
|
||||
type CollectionSpec struct {
|
||||
Maps map[string]*MapSpec
|
||||
Programs map[string]*ProgramSpec
|
||||
}
|
||||
|
||||
// Copy returns a recursive copy of the spec.
|
||||
func (cs *CollectionSpec) Copy() *CollectionSpec {
|
||||
if cs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cpy := CollectionSpec{
|
||||
Maps: make(map[string]*MapSpec, len(cs.Maps)),
|
||||
Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
|
||||
}
|
||||
|
||||
for name, spec := range cs.Maps {
|
||||
cpy.Maps[name] = spec.Copy()
|
||||
}
|
||||
|
||||
for name, spec := range cs.Programs {
|
||||
cpy.Programs[name] = spec.Copy()
|
||||
}
|
||||
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// RewriteMaps replaces all references to specific maps.
|
||||
//
|
||||
// Use this function to use pre-existing maps instead of creating new ones
|
||||
// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
|
||||
//
|
||||
// Returns an error if a named map isn't used in at least one program.
|
||||
func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
|
||||
for symbol, m := range maps {
|
||||
// have we seen a program that uses this symbol / map
|
||||
seen := false
|
||||
fd := m.FD()
|
||||
for progName, progSpec := range cs.Programs {
|
||||
err := progSpec.Instructions.RewriteMapPtr(symbol, fd)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
seen = true
|
||||
|
||||
case asm.IsUnreferencedSymbol(err):
|
||||
// Not all programs need to use the map
|
||||
|
||||
default:
|
||||
return fmt.Errorf("program %s: %w", progName, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !seen {
|
||||
return fmt.Errorf("map %s not referenced by any programs", symbol)
|
||||
}
|
||||
|
||||
// Prevent NewCollection from creating rewritten maps
|
||||
delete(cs.Maps, symbol)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RewriteConstants replaces the value of multiple constants.
|
||||
//
|
||||
// The constant must be defined like so in the C program:
|
||||
//
|
||||
// volatile const type foobar;
|
||||
// volatile const type foobar = default;
|
||||
//
|
||||
// Replacement values must be of the same length as the C sizeof(type).
|
||||
// If necessary, they are marshalled according to the same rules as
|
||||
// map values.
|
||||
//
|
||||
// From Linux 5.5 the verifier will use constants to eliminate dead code.
|
||||
//
|
||||
// Returns an error if a constant doesn't exist.
|
||||
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
|
||||
rodata := cs.Maps[".rodata"]
|
||||
if rodata == nil {
|
||||
return errors.New("missing .rodata section")
|
||||
}
|
||||
|
||||
if rodata.BTF == nil {
|
||||
return errors.New(".rodata section has no BTF")
|
||||
}
|
||||
|
||||
if n := len(rodata.Contents); n != 1 {
|
||||
return fmt.Errorf("expected one key in .rodata, found %d", n)
|
||||
}
|
||||
|
||||
kv := rodata.Contents[0]
|
||||
value, ok := kv.Value.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
|
||||
}
|
||||
|
||||
buf := make([]byte, len(value))
|
||||
copy(buf, value)
|
||||
|
||||
err := patchValue(buf, btf.MapValue(rodata.BTF), consts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rodata.Contents[0] = MapKV{kv.Key, buf}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assign the contents of a CollectionSpec to a struct.
|
||||
//
|
||||
// This function is a short-cut to manually checking the presence
|
||||
// of maps and programs in a collection spec. Consider using bpf2go if this
|
||||
// sounds useful.
|
||||
//
|
||||
// The argument to must be a pointer to a struct. A field of the
|
||||
// struct is updated with values from Programs or Maps if it
|
||||
// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
|
||||
// The tag gives the name of the program or map as found in
|
||||
// the CollectionSpec.
|
||||
//
|
||||
// struct {
|
||||
// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
|
||||
// Bar *ebpf.MapSpec `ebpf:"bar_map"`
|
||||
// Ignored int
|
||||
// }
|
||||
//
|
||||
// Returns an error if any of the fields can't be found, or
|
||||
// if the same map or program is assigned multiple times.
|
||||
func (cs *CollectionSpec) Assign(to interface{}) error {
|
||||
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
|
||||
switch typ {
|
||||
case reflect.TypeOf((*ProgramSpec)(nil)):
|
||||
p := cs.Programs[name]
|
||||
if p == nil {
|
||||
return reflect.Value{}, fmt.Errorf("missing program %q", name)
|
||||
}
|
||||
return reflect.ValueOf(p), nil
|
||||
case reflect.TypeOf((*MapSpec)(nil)):
|
||||
m := cs.Maps[name]
|
||||
if m == nil {
|
||||
return reflect.Value{}, fmt.Errorf("missing map %q", name)
|
||||
}
|
||||
return reflect.ValueOf(m), nil
|
||||
default:
|
||||
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
|
||||
}
|
||||
}
|
||||
|
||||
return assignValues(to, valueOf)
|
||||
}
|
||||
|
||||
// LoadAndAssign maps and programs into the kernel and assign them to a struct.
|
||||
//
|
||||
// This function is a short-cut to manually checking the presence
|
||||
// of maps and programs in a collection spec. Consider using bpf2go if this
|
||||
// sounds useful.
|
||||
//
|
||||
// The argument to must be a pointer to a struct. A field of the
|
||||
// struct is updated with values from Programs or Maps if it
|
||||
// has an `ebpf` tag and its type is *Program or *Map.
|
||||
// The tag gives the name of the program or map as found in
|
||||
// the CollectionSpec.
|
||||
//
|
||||
// struct {
|
||||
// Foo *ebpf.Program `ebpf:"xdp_foo"`
|
||||
// Bar *ebpf.Map `ebpf:"bar_map"`
|
||||
// Ignored int
|
||||
// }
|
||||
//
|
||||
// opts may be nil.
|
||||
//
|
||||
// Returns an error if any of the fields can't be found, or
|
||||
// if the same map or program is assigned multiple times.
|
||||
func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
|
||||
if opts == nil {
|
||||
opts = &CollectionOptions{}
|
||||
}
|
||||
|
||||
loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts)
|
||||
defer cleanup()
|
||||
|
||||
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
|
||||
switch typ {
|
||||
case reflect.TypeOf((*Program)(nil)):
|
||||
p, err := loadProgram(name)
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
return reflect.ValueOf(p), nil
|
||||
case reflect.TypeOf((*Map)(nil)):
|
||||
m, err := loadMap(name)
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
return reflect.ValueOf(m), nil
|
||||
default:
|
||||
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
|
||||
}
|
||||
}
|
||||
|
||||
if err := assignValues(to, valueOf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collection is a collection of Programs and Maps associated
|
||||
// with their symbols
|
||||
type Collection struct {
|
||||
Programs map[string]*Program
|
||||
Maps map[string]*Map
|
||||
}
|
||||
|
||||
// NewCollection creates a Collection from a specification.
|
||||
func NewCollection(spec *CollectionSpec) (*Collection, error) {
|
||||
return NewCollectionWithOptions(spec, CollectionOptions{})
|
||||
}
|
||||
|
||||
// NewCollectionWithOptions creates a Collection from a specification.
|
||||
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
|
||||
loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts)
|
||||
defer cleanup()
|
||||
|
||||
for mapName := range spec.Maps {
|
||||
_, err := loadMap(mapName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for progName := range spec.Programs {
|
||||
_, err := loadProgram(progName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
maps, progs := done()
|
||||
return &Collection{
|
||||
progs,
|
||||
maps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type handleCache struct {
|
||||
btfHandles map[*btf.Spec]*btf.Handle
|
||||
btfSpecs map[io.ReaderAt]*btf.Spec
|
||||
}
|
||||
|
||||
func newHandleCache() *handleCache {
|
||||
return &handleCache{
|
||||
btfHandles: make(map[*btf.Spec]*btf.Handle),
|
||||
btfSpecs: make(map[io.ReaderAt]*btf.Spec),
|
||||
}
|
||||
}
|
||||
|
||||
func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
|
||||
if hc.btfHandles[spec] != nil {
|
||||
return hc.btfHandles[spec], nil
|
||||
}
|
||||
|
||||
handle, err := btf.NewHandle(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hc.btfHandles[spec] = handle
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) {
|
||||
if hc.btfSpecs[rd] != nil {
|
||||
return hc.btfSpecs[rd], nil
|
||||
}
|
||||
|
||||
spec, err := btf.LoadSpecFromReader(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hc.btfSpecs[rd] = spec
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (hc handleCache) close() {
|
||||
for _, handle := range hc.btfHandles {
|
||||
handle.Close()
|
||||
}
|
||||
hc.btfHandles = nil
|
||||
hc.btfSpecs = nil
|
||||
}
|
||||
|
||||
func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
||||
loadMap func(string) (*Map, error),
|
||||
loadProgram func(string) (*Program, error),
|
||||
done func() (map[string]*Map, map[string]*Program),
|
||||
cleanup func(),
|
||||
) {
|
||||
var (
|
||||
maps = make(map[string]*Map)
|
||||
progs = make(map[string]*Program)
|
||||
handles = newHandleCache()
|
||||
skipMapsAndProgs = false
|
||||
)
|
||||
|
||||
cleanup = func() {
|
||||
handles.close()
|
||||
|
||||
if skipMapsAndProgs {
|
||||
return
|
||||
}
|
||||
|
||||
for _, m := range maps {
|
||||
m.Close()
|
||||
}
|
||||
|
||||
for _, p := range progs {
|
||||
p.Close()
|
||||
}
|
||||
}
|
||||
|
||||
done = func() (map[string]*Map, map[string]*Program) {
|
||||
skipMapsAndProgs = true
|
||||
return maps, progs
|
||||
}
|
||||
|
||||
loadMap = func(mapName string) (*Map, error) {
|
||||
if m := maps[mapName]; m != nil {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
mapSpec := coll.Maps[mapName]
|
||||
if mapSpec == nil {
|
||||
return nil, fmt.Errorf("missing map %s", mapName)
|
||||
}
|
||||
|
||||
m, err := newMapWithOptions(mapSpec, opts.Maps, handles)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("map %s: %w", mapName, err)
|
||||
}
|
||||
|
||||
maps[mapName] = m
|
||||
return m, nil
|
||||
}
|
||||
|
||||
loadProgram = func(progName string) (*Program, error) {
|
||||
if prog := progs[progName]; prog != nil {
|
||||
return prog, nil
|
||||
}
|
||||
|
||||
progSpec := coll.Programs[progName]
|
||||
if progSpec == nil {
|
||||
return nil, fmt.Errorf("unknown program %s", progName)
|
||||
}
|
||||
|
||||
progSpec = progSpec.Copy()
|
||||
|
||||
// Rewrite any reference to a valid map.
|
||||
for i := range progSpec.Instructions {
|
||||
ins := &progSpec.Instructions[i]
|
||||
|
||||
if !ins.IsLoadFromMap() || ins.Reference == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(ins.Constant) != math.MaxUint32 {
|
||||
// Don't overwrite maps already rewritten, users can
|
||||
// rewrite programs in the spec themselves
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := loadMap(ins.Reference)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||
}
|
||||
|
||||
fd := m.FD()
|
||||
if fd < 0 {
|
||||
return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
|
||||
}
|
||||
if err := ins.RewriteMapPtr(m.FD()); err != nil {
|
||||
return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
|
||||
}
|
||||
}
|
||||
|
||||
prog, err := newProgramWithOptions(progSpec, opts.Programs, handles)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||
}
|
||||
|
||||
progs[progName] = prog
|
||||
return prog, nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// LoadCollection parses an object file and converts it to a collection.
|
||||
func LoadCollection(file string) (*Collection, error) {
|
||||
spec, err := LoadCollectionSpec(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewCollection(spec)
|
||||
}
|
||||
|
||||
// Close frees all maps and programs associated with the collection.
|
||||
//
|
||||
// The collection mustn't be used afterwards.
|
||||
func (coll *Collection) Close() {
|
||||
for _, prog := range coll.Programs {
|
||||
prog.Close()
|
||||
}
|
||||
for _, m := range coll.Maps {
|
||||
m.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// DetachMap removes the named map from the Collection.
|
||||
//
|
||||
// This means that a later call to Close() will not affect this map.
|
||||
//
|
||||
// Returns nil if no map of that name exists.
|
||||
func (coll *Collection) DetachMap(name string) *Map {
|
||||
m := coll.Maps[name]
|
||||
delete(coll.Maps, name)
|
||||
return m
|
||||
}
|
||||
|
||||
// DetachProgram removes the named program from the Collection.
|
||||
//
|
||||
// This means that a later call to Close() will not affect this program.
|
||||
//
|
||||
// Returns nil if no program of that name exists.
|
||||
func (coll *Collection) DetachProgram(name string) *Program {
|
||||
p := coll.Programs[name]
|
||||
delete(coll.Programs, name)
|
||||
return p
|
||||
}
|
||||
|
||||
// Assign the contents of a collection to a struct.
|
||||
//
|
||||
// Deprecated: use CollectionSpec.Assign instead. It provides the same
|
||||
// functionality but creates only the maps and programs requested.
|
||||
func (coll *Collection) Assign(to interface{}) error {
|
||||
assignedMaps := make(map[string]struct{})
|
||||
assignedPrograms := make(map[string]struct{})
|
||||
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
|
||||
switch typ {
|
||||
case reflect.TypeOf((*Program)(nil)):
|
||||
p := coll.Programs[name]
|
||||
if p == nil {
|
||||
return reflect.Value{}, fmt.Errorf("missing program %q", name)
|
||||
}
|
||||
assignedPrograms[name] = struct{}{}
|
||||
return reflect.ValueOf(p), nil
|
||||
case reflect.TypeOf((*Map)(nil)):
|
||||
m := coll.Maps[name]
|
||||
if m == nil {
|
||||
return reflect.Value{}, fmt.Errorf("missing map %q", name)
|
||||
}
|
||||
assignedMaps[name] = struct{}{}
|
||||
return reflect.ValueOf(m), nil
|
||||
default:
|
||||
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
|
||||
}
|
||||
}
|
||||
|
||||
if err := assignValues(to, valueOf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name := range assignedPrograms {
|
||||
coll.DetachProgram(name)
|
||||
}
|
||||
|
||||
for name := range assignedMaps {
|
||||
coll.DetachMap(name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error {
|
||||
type structField struct {
|
||||
reflect.StructField
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
var (
|
||||
fields []structField
|
||||
visitedTypes = make(map[reflect.Type]bool)
|
||||
flattenStruct func(reflect.Value) error
|
||||
)
|
||||
|
||||
flattenStruct = func(structVal reflect.Value) error {
|
||||
structType := structVal.Type()
|
||||
if structType.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("%s is not a struct", structType)
|
||||
}
|
||||
|
||||
if visitedTypes[structType] {
|
||||
return fmt.Errorf("recursion on type %s", structType)
|
||||
}
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
field := structField{structType.Field(i), structVal.Field(i)}
|
||||
|
||||
name := field.Tag.Get("ebpf")
|
||||
if name != "" {
|
||||
fields = append(fields, field)
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Ptr:
|
||||
if field.Type.Elem().Kind() != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
|
||||
if field.value.IsNil() {
|
||||
return fmt.Errorf("nil pointer to %s", structType)
|
||||
}
|
||||
|
||||
err = flattenStruct(field.value.Elem())
|
||||
|
||||
case reflect.Struct:
|
||||
err = flattenStruct(field.value)
|
||||
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("field %s: %w", field.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
toValue := reflect.ValueOf(to)
|
||||
if toValue.Type().Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("%T is not a pointer to struct", to)
|
||||
}
|
||||
|
||||
if toValue.IsNil() {
|
||||
return fmt.Errorf("nil pointer to %T", to)
|
||||
}
|
||||
|
||||
if err := flattenStruct(toValue.Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
type elem struct {
|
||||
// Either *Map or *Program
|
||||
typ reflect.Type
|
||||
name string
|
||||
}
|
||||
|
||||
assignedTo := make(map[elem]string)
|
||||
for _, field := range fields {
|
||||
name := field.Tag.Get("ebpf")
|
||||
if strings.Contains(name, ",") {
|
||||
return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
|
||||
}
|
||||
|
||||
e := elem{field.Type, name}
|
||||
if assignedField := assignedTo[e]; assignedField != "" {
|
||||
return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField)
|
||||
}
|
||||
|
||||
value, err := valueOf(field.Type, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("field %s: %w", field.Name, err)
|
||||
}
|
||||
|
||||
if !field.value.CanSet() {
|
||||
return fmt.Errorf("field %s: can't set value", field.Name)
|
||||
}
|
||||
|
||||
field.value.Set(value)
|
||||
assignedTo[e] = field.Name
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
16
src/runtime/vendor/github.com/cilium/ebpf/doc.go
generated
vendored
Normal file
16
src/runtime/vendor/github.com/cilium/ebpf/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Package ebpf is a toolkit for working with eBPF programs.
|
||||
//
|
||||
// eBPF programs are small snippets of code which are executed directly
|
||||
// in a VM in the Linux kernel, which makes them very fast and flexible.
|
||||
// Many Linux subsystems now accept eBPF programs. This makes it possible
|
||||
// to implement highly application specific logic inside the kernel,
|
||||
// without having to modify the actual kernel itself.
|
||||
//
|
||||
// This package is designed for long-running processes which
|
||||
// want to use eBPF to implement part of their application logic. It has no
|
||||
// run-time dependencies outside of the library and the Linux kernel itself.
|
||||
// eBPF code should be compiled ahead of time using clang, and shipped with
|
||||
// your application as any other resource.
|
||||
//
|
||||
// Use the link subpackage to attach a loaded program to a hook in the kernel.
|
||||
package ebpf
|
||||
953
src/runtime/vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
Normal file
953
src/runtime/vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
Normal file
@@ -0,0 +1,953 @@
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/btf"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// elfCode is a convenience to reduce the amount of arguments that have to
|
||||
// be passed around explicitly. You should treat it's contents as immutable.
|
||||
type elfCode struct {
|
||||
*internal.SafeELFFile
|
||||
sections map[elf.SectionIndex]*elfSection
|
||||
license string
|
||||
version uint32
|
||||
btf *btf.Spec
|
||||
}
|
||||
|
||||
// LoadCollectionSpec parses an ELF file into a CollectionSpec.
|
||||
func LoadCollectionSpec(file string) (*CollectionSpec, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
spec, err := LoadCollectionSpecFromReader(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("file %s: %w", file, err)
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
|
||||
func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
||||
f, err := internal.NewSafeELFFile(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var (
|
||||
licenseSection *elf.Section
|
||||
versionSection *elf.Section
|
||||
sections = make(map[elf.SectionIndex]*elfSection)
|
||||
relSections = make(map[elf.SectionIndex]*elf.Section)
|
||||
)
|
||||
|
||||
// This is the target of relocations generated by inline assembly.
|
||||
sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection)
|
||||
|
||||
// Collect all the sections we're interested in. This includes relocations
|
||||
// which we parse later.
|
||||
for i, sec := range f.Sections {
|
||||
idx := elf.SectionIndex(i)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(sec.Name, "license"):
|
||||
licenseSection = sec
|
||||
case strings.HasPrefix(sec.Name, "version"):
|
||||
versionSection = sec
|
||||
case strings.HasPrefix(sec.Name, "maps"):
|
||||
sections[idx] = newElfSection(sec, mapSection)
|
||||
case sec.Name == ".maps":
|
||||
sections[idx] = newElfSection(sec, btfMapSection)
|
||||
case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"):
|
||||
sections[idx] = newElfSection(sec, dataSection)
|
||||
case sec.Type == elf.SHT_REL:
|
||||
// Store relocations under the section index of the target
|
||||
relSections[elf.SectionIndex(sec.Info)] = sec
|
||||
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
|
||||
sections[idx] = newElfSection(sec, programSection)
|
||||
}
|
||||
}
|
||||
|
||||
license, err := loadLicense(licenseSection)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load license: %w", err)
|
||||
}
|
||||
|
||||
version, err := loadVersion(versionSection, f.ByteOrder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load version: %w", err)
|
||||
}
|
||||
|
||||
btfSpec, err := btf.LoadSpecFromReader(rd)
|
||||
if err != nil && !errors.Is(err, btf.ErrNotFound) {
|
||||
return nil, fmt.Errorf("load BTF: %w", err)
|
||||
}
|
||||
|
||||
// Assign symbols to all the sections we're interested in.
|
||||
symbols, err := f.Symbols()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load symbols: %v", err)
|
||||
}
|
||||
|
||||
for _, symbol := range symbols {
|
||||
idx := symbol.Section
|
||||
symType := elf.ST_TYPE(symbol.Info)
|
||||
|
||||
section := sections[idx]
|
||||
if section == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Older versions of LLVM don't tag symbols correctly, so keep
|
||||
// all NOTYPE ones.
|
||||
keep := symType == elf.STT_NOTYPE
|
||||
switch section.kind {
|
||||
case mapSection, btfMapSection, dataSection:
|
||||
keep = keep || symType == elf.STT_OBJECT
|
||||
case programSection:
|
||||
keep = keep || symType == elf.STT_FUNC
|
||||
}
|
||||
if !keep || symbol.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
section.symbols[symbol.Value] = symbol
|
||||
}
|
||||
|
||||
ec := &elfCode{
|
||||
SafeELFFile: f,
|
||||
sections: sections,
|
||||
license: license,
|
||||
version: version,
|
||||
btf: btfSpec,
|
||||
}
|
||||
|
||||
// Go through relocation sections, and parse the ones for sections we're
|
||||
// interested in. Make sure that relocations point at valid sections.
|
||||
for idx, relSection := range relSections {
|
||||
section := sections[idx]
|
||||
if section == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rels, err := ec.loadRelocations(relSection, symbols)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err)
|
||||
}
|
||||
|
||||
for _, rel := range rels {
|
||||
target := sections[rel.Section]
|
||||
if target == nil {
|
||||
return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
|
||||
}
|
||||
|
||||
if target.Flags&elf.SHF_STRINGS > 0 {
|
||||
return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
|
||||
}
|
||||
|
||||
target.references++
|
||||
}
|
||||
|
||||
section.relocations = rels
|
||||
}
|
||||
|
||||
// Collect all the various ways to define maps.
|
||||
maps := make(map[string]*MapSpec)
|
||||
if err := ec.loadMaps(maps); err != nil {
|
||||
return nil, fmt.Errorf("load maps: %w", err)
|
||||
}
|
||||
|
||||
if err := ec.loadBTFMaps(maps); err != nil {
|
||||
return nil, fmt.Errorf("load BTF maps: %w", err)
|
||||
}
|
||||
|
||||
if err := ec.loadDataSections(maps); err != nil {
|
||||
return nil, fmt.Errorf("load data sections: %w", err)
|
||||
}
|
||||
|
||||
// Finally, collect programs and link them.
|
||||
progs, err := ec.loadPrograms()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load programs: %w", err)
|
||||
}
|
||||
|
||||
return &CollectionSpec{maps, progs}, nil
|
||||
}
|
||||
|
||||
func loadLicense(sec *elf.Section) (string, error) {
|
||||
if sec == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
data, err := sec.Data()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("section %s: %v", sec.Name, err)
|
||||
}
|
||||
return string(bytes.TrimRight(data, "\000")), nil
|
||||
}
|
||||
|
||||
func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
|
||||
if sec == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var version uint32
|
||||
if err := binary.Read(sec.Open(), bo, &version); err != nil {
|
||||
return 0, fmt.Errorf("section %s: %v", sec.Name, err)
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
type elfSectionKind int
|
||||
|
||||
const (
|
||||
undefSection elfSectionKind = iota
|
||||
mapSection
|
||||
btfMapSection
|
||||
programSection
|
||||
dataSection
|
||||
)
|
||||
|
||||
type elfSection struct {
|
||||
*elf.Section
|
||||
kind elfSectionKind
|
||||
// Offset from the start of the section to a symbol
|
||||
symbols map[uint64]elf.Symbol
|
||||
// Offset from the start of the section to a relocation, which points at
|
||||
// a symbol in another section.
|
||||
relocations map[uint64]elf.Symbol
|
||||
// The number of relocations pointing at this section.
|
||||
references int
|
||||
}
|
||||
|
||||
func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection {
|
||||
return &elfSection{
|
||||
section,
|
||||
kind,
|
||||
make(map[uint64]elf.Symbol),
|
||||
make(map[uint64]elf.Symbol),
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) {
|
||||
var (
|
||||
progs []*ProgramSpec
|
||||
libs []*ProgramSpec
|
||||
)
|
||||
|
||||
for _, sec := range ec.sections {
|
||||
if sec.kind != programSection {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(sec.symbols) == 0 {
|
||||
return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
|
||||
}
|
||||
|
||||
funcSym, ok := sec.symbols[0]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("section %v: no label at start", sec.Name)
|
||||
}
|
||||
|
||||
insns, length, err := ec.loadInstructions(sec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
|
||||
}
|
||||
|
||||
progType, attachType, progFlags, attachTo := getProgType(sec.Name)
|
||||
|
||||
spec := &ProgramSpec{
|
||||
Name: funcSym.Name,
|
||||
Type: progType,
|
||||
Flags: progFlags,
|
||||
AttachType: attachType,
|
||||
AttachTo: attachTo,
|
||||
License: ec.license,
|
||||
KernelVersion: ec.version,
|
||||
Instructions: insns,
|
||||
ByteOrder: ec.ByteOrder,
|
||||
}
|
||||
|
||||
if ec.btf != nil {
|
||||
spec.BTF, err = ec.btf.Program(sec.Name, length)
|
||||
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
|
||||
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Type == UnspecifiedProgram {
|
||||
// There is no single name we can use for "library" sections,
|
||||
// since they may contain multiple functions. We'll decode the
|
||||
// labels they contain later on, and then link sections that way.
|
||||
libs = append(libs, spec)
|
||||
} else {
|
||||
progs = append(progs, spec)
|
||||
}
|
||||
}
|
||||
|
||||
res := make(map[string]*ProgramSpec, len(progs))
|
||||
for _, prog := range progs {
|
||||
err := link(prog, libs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: %w", prog.Name, err)
|
||||
}
|
||||
res[prog.Name] = prog
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) {
|
||||
var (
|
||||
r = bufio.NewReader(section.Open())
|
||||
insns asm.Instructions
|
||||
offset uint64
|
||||
)
|
||||
for {
|
||||
var ins asm.Instruction
|
||||
n, err := ins.Unmarshal(r, ec.ByteOrder)
|
||||
if err == io.EOF {
|
||||
return insns, offset, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
|
||||
}
|
||||
|
||||
ins.Symbol = section.symbols[offset].Name
|
||||
|
||||
if rel, ok := section.relocations[offset]; ok {
|
||||
if err = ec.relocateInstruction(&ins, rel); err != nil {
|
||||
return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err)
|
||||
}
|
||||
}
|
||||
|
||||
insns = append(insns, ins)
|
||||
offset += n
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
|
||||
var (
|
||||
typ = elf.ST_TYPE(rel.Info)
|
||||
bind = elf.ST_BIND(rel.Info)
|
||||
name = rel.Name
|
||||
)
|
||||
|
||||
target := ec.sections[rel.Section]
|
||||
|
||||
switch target.kind {
|
||||
case mapSection, btfMapSection:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name)
|
||||
}
|
||||
|
||||
if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
|
||||
// STT_NOTYPE is generated on clang < 8 which doesn't tag
|
||||
// relocations appropriately.
|
||||
return fmt.Errorf("map load: incorrect relocation type %v", typ)
|
||||
}
|
||||
|
||||
ins.Src = asm.PseudoMapFD
|
||||
|
||||
// Mark the instruction as needing an update when creating the
|
||||
// collection.
|
||||
if err := ins.RewriteMapPtr(-1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case dataSection:
|
||||
var offset uint32
|
||||
switch typ {
|
||||
case elf.STT_SECTION:
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||
}
|
||||
|
||||
// This is really a reference to a static symbol, which clang doesn't
|
||||
// emit a symbol table entry for. Instead it encodes the offset in
|
||||
// the instruction itself.
|
||||
offset = uint32(uint64(ins.Constant))
|
||||
|
||||
case elf.STT_OBJECT:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||
}
|
||||
|
||||
offset = uint32(rel.Value)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
|
||||
}
|
||||
|
||||
// We rely on using the name of the data section as the reference. It
|
||||
// would be nicer to keep the real name in case of an STT_OBJECT, but
|
||||
// it's not clear how to encode that into Instruction.
|
||||
name = target.Name
|
||||
|
||||
// The kernel expects the offset in the second basic BPF instruction.
|
||||
ins.Constant = int64(uint64(offset) << 32)
|
||||
ins.Src = asm.PseudoMapValue
|
||||
|
||||
// Mark the instruction as needing an update when creating the
|
||||
// collection.
|
||||
if err := ins.RewriteMapPtr(-1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case programSection:
|
||||
if ins.OpCode.JumpOp() != asm.Call {
|
||||
return fmt.Errorf("not a call instruction: %s", ins)
|
||||
}
|
||||
|
||||
if ins.Src != asm.PseudoCall {
|
||||
return fmt.Errorf("call: %s: incorrect source register", name)
|
||||
}
|
||||
|
||||
switch typ {
|
||||
case elf.STT_NOTYPE, elf.STT_FUNC:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
}
|
||||
|
||||
case elf.STT_SECTION:
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
}
|
||||
|
||||
// The function we want to call is in the indicated section,
|
||||
// at the offset encoded in the instruction itself. Reverse
|
||||
// the calculation to find the real function we're looking for.
|
||||
// A value of -1 references the first instruction in the section.
|
||||
offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
|
||||
if offset < 0 {
|
||||
return fmt.Errorf("call: %s: invalid offset %d", name, offset)
|
||||
}
|
||||
|
||||
sym, ok := target.symbols[uint64(offset)]
|
||||
if !ok {
|
||||
return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
|
||||
}
|
||||
|
||||
ins.Constant = -1
|
||||
name = sym.Name
|
||||
|
||||
default:
|
||||
return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
|
||||
}
|
||||
|
||||
case undefSection:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
|
||||
}
|
||||
|
||||
if typ != elf.STT_NOTYPE {
|
||||
return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
|
||||
}
|
||||
|
||||
// There is nothing to do here but set ins.Reference.
|
||||
|
||||
default:
|
||||
return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
|
||||
}
|
||||
|
||||
ins.Reference = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
|
||||
for _, sec := range ec.sections {
|
||||
if sec.kind != mapSection {
|
||||
continue
|
||||
}
|
||||
|
||||
nSym := len(sec.symbols)
|
||||
if nSym == 0 {
|
||||
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||
}
|
||||
|
||||
if sec.Size%uint64(nSym) != 0 {
|
||||
return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
|
||||
}
|
||||
|
||||
var (
|
||||
r = bufio.NewReader(sec.Open())
|
||||
size = sec.Size / uint64(nSym)
|
||||
)
|
||||
for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size {
|
||||
mapSym, ok := sec.symbols[offset]
|
||||
if !ok {
|
||||
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
||||
}
|
||||
|
||||
mapName := mapSym.Name
|
||||
if maps[mapName] != nil {
|
||||
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
||||
}
|
||||
|
||||
lr := io.LimitReader(r, int64(size))
|
||||
|
||||
spec := MapSpec{
|
||||
Name: SanitizeName(mapName, -1),
|
||||
}
|
||||
switch {
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
||||
return fmt.Errorf("map %s: missing type", mapName)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
||||
return fmt.Errorf("map %s: missing key size", mapName)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
||||
return fmt.Errorf("map %s: missing value size", mapName)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
||||
return fmt.Errorf("map %s: missing max entries", mapName)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
||||
return fmt.Errorf("map %s: missing flags", mapName)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
||||
return fmt.Errorf("map %s: unknown and non-zero fields in definition", mapName)
|
||||
}
|
||||
|
||||
if err := spec.clampPerfEventArraySize(); err != nil {
|
||||
return fmt.Errorf("map %s: %w", mapName, err)
|
||||
}
|
||||
|
||||
maps[mapName] = &spec
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
|
||||
for _, sec := range ec.sections {
|
||||
if sec.kind != btfMapSection {
|
||||
continue
|
||||
}
|
||||
|
||||
if ec.btf == nil {
|
||||
return fmt.Errorf("missing BTF")
|
||||
}
|
||||
|
||||
_, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported)
|
||||
}
|
||||
|
||||
var ds btf.Datasec
|
||||
if err := ec.btf.FindType(sec.Name, &ds); err != nil {
|
||||
return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err)
|
||||
}
|
||||
|
||||
for _, vs := range ds.Vars {
|
||||
v, ok := vs.Type.(*btf.Var)
|
||||
if !ok {
|
||||
return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type)
|
||||
}
|
||||
name := string(v.Name)
|
||||
|
||||
if maps[name] != nil {
|
||||
return fmt.Errorf("section %v: map %s already exists", sec.Name, name)
|
||||
}
|
||||
|
||||
mapStruct, ok := v.Type.(*btf.Struct)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected struct, got %s", v.Type)
|
||||
}
|
||||
|
||||
mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("map %v: %w", name, err)
|
||||
}
|
||||
|
||||
if err := mapSpec.clampPerfEventArraySize(); err != nil {
|
||||
return fmt.Errorf("map %v: %w", name, err)
|
||||
}
|
||||
|
||||
maps[name] = mapSpec
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing
|
||||
// a BTF map definition. The name and spec arguments will be copied to the
|
||||
// resulting MapSpec, and inner must be true on any resursive invocations.
|
||||
func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) {
|
||||
|
||||
var (
|
||||
key, value btf.Type
|
||||
keySize, valueSize uint32
|
||||
mapType, flags, maxEntries uint32
|
||||
pinType PinType
|
||||
innerMapSpec *MapSpec
|
||||
err error
|
||||
)
|
||||
|
||||
for i, member := range def.Members {
|
||||
switch member.Name {
|
||||
case "type":
|
||||
mapType, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get type: %w", err)
|
||||
}
|
||||
|
||||
case "map_flags":
|
||||
flags, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF map flags: %w", err)
|
||||
}
|
||||
|
||||
case "max_entries":
|
||||
maxEntries, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
|
||||
}
|
||||
|
||||
case "key":
|
||||
if keySize != 0 {
|
||||
return nil, errors.New("both key and key_size given")
|
||||
}
|
||||
|
||||
pk, ok := member.Type.(*btf.Pointer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("key type is not a pointer: %T", member.Type)
|
||||
}
|
||||
|
||||
key = pk.Target
|
||||
|
||||
size, err := btf.Sizeof(pk.Target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
|
||||
}
|
||||
|
||||
keySize = uint32(size)
|
||||
|
||||
case "value":
|
||||
if valueSize != 0 {
|
||||
return nil, errors.New("both value and value_size given")
|
||||
}
|
||||
|
||||
vk, ok := member.Type.(*btf.Pointer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("value type is not a pointer: %T", member.Type)
|
||||
}
|
||||
|
||||
value = vk.Target
|
||||
|
||||
size, err := btf.Sizeof(vk.Target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
|
||||
}
|
||||
|
||||
valueSize = uint32(size)
|
||||
|
||||
case "key_size":
|
||||
// Key needs to be nil and keySize needs to be 0 for key_size to be
|
||||
// considered a valid member.
|
||||
if key != nil || keySize != 0 {
|
||||
return nil, errors.New("both key and key_size given")
|
||||
}
|
||||
|
||||
keySize, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF key size: %w", err)
|
||||
}
|
||||
|
||||
case "value_size":
|
||||
// Value needs to be nil and valueSize needs to be 0 for value_size to be
|
||||
// considered a valid member.
|
||||
if value != nil || valueSize != 0 {
|
||||
return nil, errors.New("both value and value_size given")
|
||||
}
|
||||
|
||||
valueSize, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF value size: %w", err)
|
||||
}
|
||||
|
||||
case "pinning":
|
||||
if inner {
|
||||
return nil, errors.New("inner maps can't be pinned")
|
||||
}
|
||||
|
||||
pinning, err := uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pinning: %w", err)
|
||||
}
|
||||
|
||||
pinType = PinType(pinning)
|
||||
|
||||
case "values":
|
||||
// The 'values' field in BTF map definitions is used for declaring map
|
||||
// value types that are references to other BPF objects, like other maps
|
||||
// or programs. It is always expected to be an array of pointers.
|
||||
if i != len(def.Members)-1 {
|
||||
return nil, errors.New("'values' must be the last member in a BTF map definition")
|
||||
}
|
||||
|
||||
if valueSize != 0 && valueSize != 4 {
|
||||
return nil, errors.New("value_size must be 0 or 4")
|
||||
}
|
||||
valueSize = 4
|
||||
|
||||
valueType, err := resolveBTFArrayMacro(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't resolve type of member 'values': %w", err)
|
||||
}
|
||||
|
||||
switch t := valueType.(type) {
|
||||
case *btf.Struct:
|
||||
// The values member pointing to an array of structs means we're expecting
|
||||
// a map-in-map declaration.
|
||||
if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps {
|
||||
return nil, errors.New("outer map needs to be an array or a hash of maps")
|
||||
}
|
||||
if inner {
|
||||
return nil, fmt.Errorf("nested inner maps are not supported")
|
||||
}
|
||||
|
||||
// This inner map spec is used as a map template, but it needs to be
|
||||
// created as a traditional map before it can be used to do so.
|
||||
// libbpf names the inner map template '<outer_name>.inner', but we
|
||||
// opted for _inner to simplify validation logic. (dots only supported
|
||||
// on kernels 5.2 and up)
|
||||
// Pass the BTF spec from the parent object, since both parent and
|
||||
// child must be created from the same BTF blob (on kernels that support BTF).
|
||||
innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err)
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported value type %q in 'values' field", t)
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
||||
}
|
||||
}
|
||||
|
||||
bm := btf.NewMap(spec, key, value)
|
||||
|
||||
return &MapSpec{
|
||||
Name: SanitizeName(name, -1),
|
||||
Type: MapType(mapType),
|
||||
KeySize: keySize,
|
||||
ValueSize: valueSize,
|
||||
MaxEntries: maxEntries,
|
||||
Flags: flags,
|
||||
BTF: &bm,
|
||||
Pinning: pinType,
|
||||
InnerMap: innerMapSpec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// uintFromBTF resolves the __uint macro, which is a pointer to a sized
|
||||
// array, e.g. for int (*foo)[10], this function will return 10.
|
||||
func uintFromBTF(typ btf.Type) (uint32, error) {
|
||||
ptr, ok := typ.(*btf.Pointer)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a pointer: %v", typ)
|
||||
}
|
||||
|
||||
arr, ok := ptr.Target.(*btf.Array)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a pointer to array: %v", typ)
|
||||
}
|
||||
|
||||
return arr.Nelems, nil
|
||||
}
|
||||
|
||||
// resolveBTFArrayMacro resolves the __array macro, which declares an array
|
||||
// of pointers to a given type. This function returns the target Type of
|
||||
// the pointers in the array.
|
||||
func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) {
|
||||
arr, ok := typ.(*btf.Array)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an array: %v", typ)
|
||||
}
|
||||
|
||||
ptr, ok := arr.Type.(*btf.Pointer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an array of pointers: %v", typ)
|
||||
}
|
||||
|
||||
return ptr.Target, nil
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
|
||||
for _, sec := range ec.sections {
|
||||
if sec.kind != dataSection {
|
||||
continue
|
||||
}
|
||||
|
||||
if sec.references == 0 {
|
||||
// Prune data sections which are not referenced by any
|
||||
// instructions.
|
||||
continue
|
||||
}
|
||||
|
||||
if ec.btf == nil {
|
||||
return errors.New("data sections require BTF, make sure all consts are marked as static")
|
||||
}
|
||||
|
||||
btfMap, err := ec.btf.Datasec(sec.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := sec.Data()
|
||||
if err != nil {
|
||||
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
|
||||
}
|
||||
|
||||
if uint64(len(data)) > math.MaxUint32 {
|
||||
return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
|
||||
}
|
||||
|
||||
mapSpec := &MapSpec{
|
||||
Name: SanitizeName(sec.Name, -1),
|
||||
Type: Array,
|
||||
KeySize: 4,
|
||||
ValueSize: uint32(len(data)),
|
||||
MaxEntries: 1,
|
||||
Contents: []MapKV{{uint32(0), data}},
|
||||
BTF: btfMap,
|
||||
}
|
||||
|
||||
switch sec.Name {
|
||||
case ".rodata":
|
||||
mapSpec.Flags = unix.BPF_F_RDONLY_PROG
|
||||
mapSpec.Freeze = true
|
||||
case ".bss":
|
||||
// The kernel already zero-initializes the map
|
||||
mapSpec.Contents = nil
|
||||
}
|
||||
|
||||
maps[sec.Name] = mapSpec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
|
||||
types := map[string]struct {
|
||||
progType ProgramType
|
||||
attachType AttachType
|
||||
progFlags uint32
|
||||
}{
|
||||
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
|
||||
"socket": {SocketFilter, AttachNone, 0},
|
||||
"seccomp": {SocketFilter, AttachNone, 0},
|
||||
"kprobe/": {Kprobe, AttachNone, 0},
|
||||
"uprobe/": {Kprobe, AttachNone, 0},
|
||||
"kretprobe/": {Kprobe, AttachNone, 0},
|
||||
"uretprobe/": {Kprobe, AttachNone, 0},
|
||||
"tracepoint/": {TracePoint, AttachNone, 0},
|
||||
"raw_tracepoint/": {RawTracepoint, AttachNone, 0},
|
||||
"raw_tp/": {RawTracepoint, AttachNone, 0},
|
||||
"tp_btf/": {Tracing, AttachTraceRawTp, 0},
|
||||
"xdp": {XDP, AttachNone, 0},
|
||||
"perf_event": {PerfEvent, AttachNone, 0},
|
||||
"lwt_in": {LWTIn, AttachNone, 0},
|
||||
"lwt_out": {LWTOut, AttachNone, 0},
|
||||
"lwt_xmit": {LWTXmit, AttachNone, 0},
|
||||
"lwt_seg6local": {LWTSeg6Local, AttachNone, 0},
|
||||
"sockops": {SockOps, AttachCGroupSockOps, 0},
|
||||
"sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0},
|
||||
"sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser, 0},
|
||||
"sk_msg": {SkMsg, AttachSkSKBStreamVerdict, 0},
|
||||
"lirc_mode2": {LircMode2, AttachLircMode2, 0},
|
||||
"flow_dissector": {FlowDissector, AttachFlowDissector, 0},
|
||||
"iter/": {Tracing, AttachTraceIter, 0},
|
||||
"fentry/": {Tracing, AttachTraceFEntry, 0},
|
||||
"fmod_ret/": {Tracing, AttachModifyReturn, 0},
|
||||
"fexit/": {Tracing, AttachTraceFExit, 0},
|
||||
"fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE},
|
||||
"fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE},
|
||||
"fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE},
|
||||
"sk_lookup/": {SkLookup, AttachSkLookup, 0},
|
||||
"lsm/": {LSM, AttachLSMMac, 0},
|
||||
"lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE},
|
||||
|
||||
"cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0},
|
||||
"cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0},
|
||||
"cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0},
|
||||
"cgroup/skb": {CGroupSKB, AttachNone, 0},
|
||||
"cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0},
|
||||
"cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0},
|
||||
"cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0},
|
||||
"cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0},
|
||||
"cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0},
|
||||
"cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0},
|
||||
"cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0},
|
||||
"cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0},
|
||||
"cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0},
|
||||
"cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0},
|
||||
"cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0},
|
||||
"cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0},
|
||||
"cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0},
|
||||
"cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0},
|
||||
"classifier": {SchedCLS, AttachNone, 0},
|
||||
"action": {SchedACT, AttachNone, 0},
|
||||
}
|
||||
|
||||
for prefix, t := range types {
|
||||
if !strings.HasPrefix(sectionName, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
return t.progType, t.attachType, t.progFlags, ""
|
||||
}
|
||||
|
||||
return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):]
|
||||
}
|
||||
|
||||
return UnspecifiedProgram, AttachNone, 0, ""
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
|
||||
rels := make(map[uint64]elf.Symbol)
|
||||
|
||||
if sec.Entsize < 16 {
|
||||
return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
|
||||
}
|
||||
|
||||
r := bufio.NewReader(sec.Open())
|
||||
for off := uint64(0); off < sec.Size; off += sec.Entsize {
|
||||
ent := io.LimitReader(r, int64(sec.Entsize))
|
||||
|
||||
var rel elf.Rel64
|
||||
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
|
||||
return nil, fmt.Errorf("can't parse relocation at offset %v", off)
|
||||
}
|
||||
|
||||
symNo := int(elf.R_SYM64(rel.Info) - 1)
|
||||
if symNo >= len(symbols) {
|
||||
return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo)
|
||||
}
|
||||
|
||||
symbol := symbols[symNo]
|
||||
rels[rel.Off] = symbol
|
||||
}
|
||||
|
||||
return rels, nil
|
||||
}
|
||||
21
src/runtime/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go
generated
vendored
Normal file
21
src/runtime/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// +build gofuzz
|
||||
|
||||
// Use with https://github.com/dvyukov/go-fuzz
|
||||
|
||||
package ebpf
|
||||
|
||||
import "bytes"
|
||||
|
||||
func FuzzLoadCollectionSpec(data []byte) int {
|
||||
spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
if spec != nil {
|
||||
panic("spec is not nil")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if spec == nil {
|
||||
panic("spec is nil")
|
||||
}
|
||||
return 1
|
||||
}
|
||||
9
src/runtime/vendor/github.com/cilium/ebpf/go.mod
generated
vendored
Normal file
9
src/runtime/vendor/github.com/cilium/ebpf/go.mod
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
module github.com/cilium/ebpf
|
||||
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/frankban/quicktest v1.11.3
|
||||
github.com/google/go-cmp v0.5.4
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
)
|
||||
13
src/runtime/vendor/github.com/cilium/ebpf/go.sum
generated
vendored
Normal file
13
src/runtime/vendor/github.com/cilium/ebpf/go.sum
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
239
src/runtime/vendor/github.com/cilium/ebpf/info.go
generated
vendored
Normal file
239
src/runtime/vendor/github.com/cilium/ebpf/info.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
)
|
||||
|
||||
// MapInfo describes a map.
|
||||
type MapInfo struct {
|
||||
Type MapType
|
||||
id MapID
|
||||
KeySize uint32
|
||||
ValueSize uint32
|
||||
MaxEntries uint32
|
||||
Flags uint32
|
||||
// Name as supplied by user space at load time.
|
||||
Name string
|
||||
}
|
||||
|
||||
func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) {
|
||||
info, err := bpfGetMapInfoByFD(fd)
|
||||
if errors.Is(err, syscall.EINVAL) {
|
||||
return newMapInfoFromProc(fd)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MapInfo{
|
||||
MapType(info.map_type),
|
||||
MapID(info.id),
|
||||
info.key_size,
|
||||
info.value_size,
|
||||
info.max_entries,
|
||||
info.map_flags,
|
||||
// name is available from 4.15.
|
||||
internal.CString(info.name[:]),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) {
|
||||
var mi MapInfo
|
||||
err := scanFdInfo(fd, map[string]interface{}{
|
||||
"map_type": &mi.Type,
|
||||
"key_size": &mi.KeySize,
|
||||
"value_size": &mi.ValueSize,
|
||||
"max_entries": &mi.MaxEntries,
|
||||
"map_flags": &mi.Flags,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mi, nil
|
||||
}
|
||||
|
||||
// ID returns the map ID.
|
||||
//
|
||||
// Available from 4.13.
|
||||
//
|
||||
// The bool return value indicates whether this optional field is available.
|
||||
func (mi *MapInfo) ID() (MapID, bool) {
|
||||
return mi.id, mi.id > 0
|
||||
}
|
||||
|
||||
// programStats holds statistics of a program.
|
||||
type programStats struct {
|
||||
// Total accumulated runtime of the program ins ns.
|
||||
runtime time.Duration
|
||||
// Total number of times the program was called.
|
||||
runCount uint64
|
||||
}
|
||||
|
||||
// ProgramInfo describes a program.
|
||||
type ProgramInfo struct {
|
||||
Type ProgramType
|
||||
id ProgramID
|
||||
// Truncated hash of the BPF bytecode.
|
||||
Tag string
|
||||
// Name as supplied by user space at load time.
|
||||
Name string
|
||||
|
||||
stats *programStats
|
||||
}
|
||||
|
||||
func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) {
|
||||
info, err := bpfGetProgInfoByFD(fd)
|
||||
if errors.Is(err, syscall.EINVAL) {
|
||||
return newProgramInfoFromProc(fd)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ProgramInfo{
|
||||
Type: ProgramType(info.prog_type),
|
||||
id: ProgramID(info.id),
|
||||
// tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD.
|
||||
Tag: hex.EncodeToString(info.tag[:]),
|
||||
// name is available from 4.15.
|
||||
Name: internal.CString(info.name[:]),
|
||||
stats: &programStats{
|
||||
runtime: time.Duration(info.run_time_ns),
|
||||
runCount: info.run_cnt,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) {
|
||||
var info ProgramInfo
|
||||
err := scanFdInfo(fd, map[string]interface{}{
|
||||
"prog_type": &info.Type,
|
||||
"prog_tag": &info.Tag,
|
||||
})
|
||||
if errors.Is(err, errMissingFields) {
|
||||
return nil, &internal.UnsupportedFeatureError{
|
||||
Name: "reading program info from /proc/self/fdinfo",
|
||||
MinimumVersion: internal.Version{4, 10, 0},
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// ID returns the program ID.
|
||||
//
|
||||
// Available from 4.13.
|
||||
//
|
||||
// The bool return value indicates whether this optional field is available.
|
||||
func (pi *ProgramInfo) ID() (ProgramID, bool) {
|
||||
return pi.id, pi.id > 0
|
||||
}
|
||||
|
||||
// RunCount returns the total number of times the program was called.
|
||||
//
|
||||
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
|
||||
// The bool return value indicates whether this optional field is available.
|
||||
func (pi *ProgramInfo) RunCount() (uint64, bool) {
|
||||
if pi.stats != nil {
|
||||
return pi.stats.runCount, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Runtime returns the total accumulated runtime of the program.
|
||||
//
|
||||
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
|
||||
// The bool return value indicates whether this optional field is available.
|
||||
func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
|
||||
if pi.stats != nil {
|
||||
return pi.stats.runtime, true
|
||||
}
|
||||
return time.Duration(0), false
|
||||
}
|
||||
|
||||
func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
|
||||
raw, err := fd.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
if err := scanFdInfoReader(fh, fields); err != nil {
|
||||
return fmt.Errorf("%s: %w", fh.Name(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errMissingFields = errors.New("missing fields")
|
||||
|
||||
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
|
||||
var (
|
||||
scanner = bufio.NewScanner(r)
|
||||
scanned int
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
parts := strings.SplitN(scanner.Text(), "\t", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
name := strings.TrimSuffix(parts[0], ":")
|
||||
field, ok := fields[string(name)]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 {
|
||||
return fmt.Errorf("can't parse field %s: %v", name, err)
|
||||
}
|
||||
|
||||
scanned++
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if scanned != len(fields) {
|
||||
return errMissingFields
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableStats starts the measuring of the runtime
|
||||
// and run counts of eBPF programs.
|
||||
//
|
||||
// Collecting statistics can have an impact on the performance.
|
||||
//
|
||||
// Requires at least 5.8.
|
||||
func EnableStats(which uint32) (io.Closer, error) {
|
||||
attr := internal.BPFEnableStatsAttr{
|
||||
StatsType: which,
|
||||
}
|
||||
|
||||
fd, err := internal.BPFEnableStats(&attr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
799
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/btf.go
generated
vendored
Normal file
799
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/btf.go
generated
vendored
Normal file
@@ -0,0 +1,799 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
const btfMagic = 0xeB9F
|
||||
|
||||
// Errors returned by BTF functions.
|
||||
var (
|
||||
ErrNotSupported = internal.ErrNotSupported
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrNoExtendedInfo = errors.New("no extended info")
|
||||
)
|
||||
|
||||
// Spec represents decoded BTF.
|
||||
type Spec struct {
|
||||
rawTypes []rawType
|
||||
strings stringTable
|
||||
types []Type
|
||||
namedTypes map[string][]namedType
|
||||
funcInfos map[string]extInfo
|
||||
lineInfos map[string]extInfo
|
||||
coreRelos map[string]coreRelos
|
||||
byteOrder binary.ByteOrder
|
||||
}
|
||||
|
||||
type btfHeader struct {
|
||||
Magic uint16
|
||||
Version uint8
|
||||
Flags uint8
|
||||
HdrLen uint32
|
||||
|
||||
TypeOff uint32
|
||||
TypeLen uint32
|
||||
StringOff uint32
|
||||
StringLen uint32
|
||||
}
|
||||
|
||||
// LoadSpecFromReader reads BTF sections from an ELF.
|
||||
//
|
||||
// Returns ErrNotFound if the reader contains no BTF.
|
||||
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
||||
file, err := internal.NewSafeELFFile(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
btfSection, btfExtSection, sectionSizes, err := findBtfSections(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if btfSection == nil {
|
||||
return nil, fmt.Errorf("btf: %w", ErrNotFound)
|
||||
}
|
||||
|
||||
symbols, err := file.Symbols()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read symbols: %v", err)
|
||||
}
|
||||
|
||||
variableOffsets := make(map[variable]uint32)
|
||||
for _, symbol := range symbols {
|
||||
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
|
||||
// Ignore things like SHN_ABS
|
||||
continue
|
||||
}
|
||||
|
||||
if int(symbol.Section) >= len(file.Sections) {
|
||||
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
|
||||
}
|
||||
|
||||
secName := file.Sections[symbol.Section].Name
|
||||
if _, ok := sectionSizes[secName]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if symbol.Value > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
|
||||
}
|
||||
|
||||
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
|
||||
}
|
||||
|
||||
spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if btfExtSection == nil {
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read ext info: %w", err)
|
||||
}
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) {
|
||||
var (
|
||||
btfSection *elf.Section
|
||||
btfExtSection *elf.Section
|
||||
sectionSizes = make(map[string]uint32)
|
||||
)
|
||||
|
||||
for _, sec := range file.Sections {
|
||||
switch sec.Name {
|
||||
case ".BTF":
|
||||
btfSection = sec
|
||||
case ".BTF.ext":
|
||||
btfExtSection = sec
|
||||
default:
|
||||
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
|
||||
break
|
||||
}
|
||||
|
||||
if sec.Size > math.MaxUint32 {
|
||||
return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
|
||||
}
|
||||
|
||||
sectionSizes[sec.Name] = uint32(sec.Size)
|
||||
}
|
||||
}
|
||||
return btfSection, btfExtSection, sectionSizes, nil
|
||||
}
|
||||
|
||||
func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) {
|
||||
file, err := internal.NewSafeELFFile(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
btfSection, _, _, err := findBtfSections(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(".BTF ELF section: %s", err)
|
||||
}
|
||||
if btfSection == nil {
|
||||
return nil, fmt.Errorf("unable to find .BTF ELF section")
|
||||
}
|
||||
return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil)
|
||||
}
|
||||
|
||||
func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
|
||||
rawTypes, rawStrings, err := parseBTF(btf, bo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Spec{
|
||||
rawTypes: rawTypes,
|
||||
namedTypes: typesByName,
|
||||
types: types,
|
||||
strings: rawStrings,
|
||||
byteOrder: bo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var kernelBTF struct {
|
||||
sync.Mutex
|
||||
*Spec
|
||||
}
|
||||
|
||||
// LoadKernelSpec returns the current kernel's BTF information.
|
||||
//
|
||||
// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
|
||||
// ErrNotSupported if BTF is not enabled.
|
||||
func LoadKernelSpec() (*Spec, error) {
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
if kernelBTF.Spec != nil {
|
||||
return kernelBTF.Spec, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
kernelBTF.Spec, err = loadKernelSpec()
|
||||
return kernelBTF.Spec, err
|
||||
}
|
||||
|
||||
func loadKernelSpec() (*Spec, error) {
|
||||
release, err := unix.KernelRelease()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read kernel release number: %w", err)
|
||||
}
|
||||
|
||||
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||
if err == nil {
|
||||
defer fh.Close()
|
||||
|
||||
return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
|
||||
}
|
||||
|
||||
// use same list of locations as libbpf
|
||||
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
|
||||
locations := []string{
|
||||
"/boot/vmlinux-%s",
|
||||
"/lib/modules/%s/vmlinux-%[1]s",
|
||||
"/lib/modules/%s/build/vmlinux",
|
||||
"/usr/lib/modules/%s/kernel/vmlinux",
|
||||
"/usr/lib/debug/boot/vmlinux-%s",
|
||||
"/usr/lib/debug/boot/vmlinux-%s.debug",
|
||||
"/usr/lib/debug/lib/modules/%s/vmlinux",
|
||||
}
|
||||
|
||||
for _, loc := range locations {
|
||||
path := fmt.Sprintf(loc, release)
|
||||
|
||||
fh, err := os.Open(path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
return loadSpecFromVmlinux(fh)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
|
||||
}
|
||||
|
||||
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
|
||||
rawBTF, err := ioutil.ReadAll(btf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
|
||||
}
|
||||
|
||||
rd := bytes.NewReader(rawBTF)
|
||||
|
||||
var header btfHeader
|
||||
if err := binary.Read(rd, bo, &header); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
if header.Magic != btfMagic {
|
||||
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||
}
|
||||
|
||||
if header.Version != 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||
}
|
||||
|
||||
if header.Flags != 0 {
|
||||
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||
}
|
||||
|
||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||
if remainder < 0 {
|
||||
return nil, nil, errors.New("header is too short")
|
||||
}
|
||||
|
||||
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
|
||||
return nil, nil, fmt.Errorf("header padding: %v", err)
|
||||
}
|
||||
|
||||
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
|
||||
}
|
||||
|
||||
rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read type names: %w", err)
|
||||
}
|
||||
|
||||
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
|
||||
}
|
||||
|
||||
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read types: %w", err)
|
||||
}
|
||||
|
||||
return rawTypes, rawStrings, nil
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
section string
|
||||
name string
|
||||
}
|
||||
|
||||
func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
|
||||
for i, rawType := range rawTypes {
|
||||
if rawType.Kind() != kindDatasec {
|
||||
continue
|
||||
}
|
||||
|
||||
name, err := rawStrings.Lookup(rawType.NameOff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if name == ".kconfig" || name == ".ksyms" {
|
||||
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
|
||||
}
|
||||
|
||||
if rawTypes[i].SizeType != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
size, ok := sectionSizes[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("data section %s: missing size", name)
|
||||
}
|
||||
|
||||
rawTypes[i].SizeType = size
|
||||
|
||||
secinfos := rawType.data.([]btfVarSecinfo)
|
||||
for j, secInfo := range secinfos {
|
||||
id := int(secInfo.Type - 1)
|
||||
if id >= len(rawTypes) {
|
||||
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
|
||||
}
|
||||
|
||||
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
|
||||
}
|
||||
|
||||
offset, ok := variableOffsets[variable{name, varName}]
|
||||
if !ok {
|
||||
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
|
||||
}
|
||||
|
||||
secinfos[j].Offset = offset
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type marshalOpts struct {
|
||||
ByteOrder binary.ByteOrder
|
||||
StripFuncLinkage bool
|
||||
}
|
||||
|
||||
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
header = new(btfHeader)
|
||||
headerLen = binary.Size(header)
|
||||
)
|
||||
|
||||
// Reserve space for the header. We have to write it last since
|
||||
// we don't know the size of the type section yet.
|
||||
_, _ = buf.Write(make([]byte, headerLen))
|
||||
|
||||
// Write type section, just after the header.
|
||||
for _, raw := range s.rawTypes {
|
||||
switch {
|
||||
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
|
||||
raw.SetLinkage(StaticFunc)
|
||||
}
|
||||
|
||||
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
|
||||
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
typeLen := uint32(buf.Len() - headerLen)
|
||||
|
||||
// Write string section after type section.
|
||||
_, _ = buf.Write(s.strings)
|
||||
|
||||
// Fill out the header, and write it out.
|
||||
header = &btfHeader{
|
||||
Magic: btfMagic,
|
||||
Version: 1,
|
||||
Flags: 0,
|
||||
HdrLen: uint32(headerLen),
|
||||
TypeOff: 0,
|
||||
TypeLen: typeLen,
|
||||
StringOff: typeLen,
|
||||
StringLen: uint32(len(s.strings)),
|
||||
}
|
||||
|
||||
raw := buf.Bytes()
|
||||
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't write header: %v", err)
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
type sliceWriter []byte
|
||||
|
||||
func (sw sliceWriter) Write(p []byte) (int, error) {
|
||||
if len(p) != len(sw) {
|
||||
return 0, errors.New("size doesn't match")
|
||||
}
|
||||
|
||||
return copy(sw, p), nil
|
||||
}
|
||||
|
||||
// Program finds the BTF for a specific section.
|
||||
//
|
||||
// Length is the number of bytes in the raw BPF instruction stream.
|
||||
//
|
||||
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
|
||||
// contain extended BTF info.
|
||||
func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
||||
if length == 0 {
|
||||
return nil, errors.New("length musn't be zero")
|
||||
}
|
||||
|
||||
if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
|
||||
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
|
||||
}
|
||||
|
||||
funcInfos, funcOK := s.funcInfos[name]
|
||||
lineInfos, lineOK := s.lineInfos[name]
|
||||
relos, coreOK := s.coreRelos[name]
|
||||
|
||||
if !funcOK && !lineOK && !coreOK {
|
||||
return nil, fmt.Errorf("no extended BTF info for section %s", name)
|
||||
}
|
||||
|
||||
return &Program{s, length, funcInfos, lineInfos, relos}, nil
|
||||
}
|
||||
|
||||
// Datasec returns the BTF required to create maps which represent data sections.
|
||||
func (s *Spec) Datasec(name string) (*Map, error) {
|
||||
var datasec Datasec
|
||||
if err := s.FindType(name, &datasec); err != nil {
|
||||
return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
|
||||
}
|
||||
|
||||
m := NewMap(s, &Void{}, &datasec)
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// FindType searches for a type with a specific name.
|
||||
//
|
||||
// hint determines the type of the returned Type.
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if no matching
|
||||
// type exists in spec.
|
||||
func (s *Spec) FindType(name string, typ Type) error {
|
||||
var (
|
||||
wanted = reflect.TypeOf(typ)
|
||||
candidate Type
|
||||
)
|
||||
|
||||
for _, typ := range s.namedTypes[essentialName(name)] {
|
||||
if reflect.TypeOf(typ) != wanted {
|
||||
continue
|
||||
}
|
||||
|
||||
// Match against the full name, not just the essential one.
|
||||
if typ.name() != name {
|
||||
continue
|
||||
}
|
||||
|
||||
if candidate != nil {
|
||||
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
|
||||
}
|
||||
|
||||
candidate = typ
|
||||
}
|
||||
|
||||
if candidate == nil {
|
||||
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
||||
}
|
||||
|
||||
cpy, _ := copyType(candidate, nil)
|
||||
value := reflect.Indirect(reflect.ValueOf(cpy))
|
||||
reflect.Indirect(reflect.ValueOf(typ)).Set(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle is a reference to BTF loaded into the kernel.
|
||||
type Handle struct {
|
||||
fd *internal.FD
|
||||
}
|
||||
|
||||
// NewHandle loads BTF into the kernel.
|
||||
//
|
||||
// Returns ErrNotSupported if BTF is not supported.
|
||||
func NewHandle(spec *Spec) (*Handle, error) {
|
||||
if err := haveBTF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if spec.byteOrder != internal.NativeEndian {
|
||||
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
|
||||
}
|
||||
|
||||
btf, err := spec.marshal(marshalOpts{
|
||||
ByteOrder: internal.NativeEndian,
|
||||
StripFuncLinkage: haveFuncLinkage() != nil,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||
}
|
||||
|
||||
if uint64(len(btf)) > math.MaxUint32 {
|
||||
return nil, errors.New("BTF exceeds the maximum size")
|
||||
}
|
||||
|
||||
attr := &bpfLoadBTFAttr{
|
||||
btf: internal.NewSlicePointer(btf),
|
||||
btfSize: uint32(len(btf)),
|
||||
}
|
||||
|
||||
fd, err := bpfLoadBTF(attr)
|
||||
if err != nil {
|
||||
logBuf := make([]byte, 64*1024)
|
||||
attr.logBuf = internal.NewSlicePointer(logBuf)
|
||||
attr.btfLogSize = uint32(len(logBuf))
|
||||
attr.btfLogLevel = 1
|
||||
_, logErr := bpfLoadBTF(attr)
|
||||
return nil, internal.ErrorWithLog(err, logBuf, logErr)
|
||||
}
|
||||
|
||||
return &Handle{fd}, nil
|
||||
}
|
||||
|
||||
// Close destroys the handle.
|
||||
//
|
||||
// Subsequent calls to FD will return an invalid value.
|
||||
func (h *Handle) Close() error {
|
||||
return h.fd.Close()
|
||||
}
|
||||
|
||||
// FD returns the file descriptor for the handle.
|
||||
func (h *Handle) FD() int {
|
||||
value, err := h.fd.Value()
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(value)
|
||||
}
|
||||
|
||||
// Map is the BTF for a map.
|
||||
type Map struct {
|
||||
spec *Spec
|
||||
key, value Type
|
||||
}
|
||||
|
||||
// NewMap returns a new Map containing the given values.
|
||||
// The key and value arguments are initialized to Void if nil values are given.
|
||||
func NewMap(spec *Spec, key Type, value Type) Map {
|
||||
if key == nil {
|
||||
key = &Void{}
|
||||
}
|
||||
if value == nil {
|
||||
value = &Void{}
|
||||
}
|
||||
|
||||
return Map{
|
||||
spec: spec,
|
||||
key: key,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// MapSpec should be a method on Map, but is a free function
|
||||
// to hide it from users of the ebpf package.
|
||||
func MapSpec(m *Map) *Spec {
|
||||
return m.spec
|
||||
}
|
||||
|
||||
// MapKey should be a method on Map, but is a free function
|
||||
// to hide it from users of the ebpf package.
|
||||
func MapKey(m *Map) Type {
|
||||
return m.key
|
||||
}
|
||||
|
||||
// MapValue should be a method on Map, but is a free function
|
||||
// to hide it from users of the ebpf package.
|
||||
func MapValue(m *Map) Type {
|
||||
return m.value
|
||||
}
|
||||
|
||||
// Program is the BTF information for a stream of instructions.
|
||||
type Program struct {
|
||||
spec *Spec
|
||||
length uint64
|
||||
funcInfos, lineInfos extInfo
|
||||
coreRelos coreRelos
|
||||
}
|
||||
|
||||
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
|
||||
//
|
||||
// This is a free function instead of a method to hide it from users
|
||||
// of package ebpf.
|
||||
func ProgramSpec(s *Program) *Spec {
|
||||
return s.spec
|
||||
}
|
||||
|
||||
// ProgramAppend the information from other to the Program.
|
||||
//
|
||||
// This is a free function instead of a method to hide it from users
|
||||
// of package ebpf.
|
||||
func ProgramAppend(s, other *Program) error {
|
||||
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("func infos: %w", err)
|
||||
}
|
||||
|
||||
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("line infos: %w", err)
|
||||
}
|
||||
|
||||
s.funcInfos = funcInfos
|
||||
s.lineInfos = lineInfos
|
||||
s.coreRelos = s.coreRelos.append(other.coreRelos, s.length)
|
||||
s.length += other.length
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProgramFuncInfos returns the binary form of BTF function infos.
|
||||
//
|
||||
// This is a free function instead of a method to hide it from users
|
||||
// of package ebpf.
|
||||
func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
|
||||
bytes, err = s.funcInfos.MarshalBinary()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
return s.funcInfos.recordSize, bytes, nil
|
||||
}
|
||||
|
||||
// ProgramLineInfos returns the binary form of BTF line infos.
|
||||
//
|
||||
// This is a free function instead of a method to hide it from users
|
||||
// of package ebpf.
|
||||
func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
|
||||
bytes, err = s.lineInfos.MarshalBinary()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
return s.lineInfos.recordSize, bytes, nil
|
||||
}
|
||||
|
||||
// ProgramFixups returns the changes required to adjust the program to the target.
|
||||
//
|
||||
// This is a free function instead of a method to hide it from users
|
||||
// of package ebpf.
|
||||
func ProgramFixups(s *Program, target *Spec) (COREFixups, error) {
|
||||
if len(s.coreRelos) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
var err error
|
||||
target, err = LoadKernelSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return coreRelocate(s.spec, target, s.coreRelos)
|
||||
}
|
||||
|
||||
type bpfLoadBTFAttr struct {
|
||||
btf internal.Pointer
|
||||
logBuf internal.Pointer
|
||||
btfSize uint32
|
||||
btfLogSize uint32
|
||||
btfLogLevel uint32
|
||||
}
|
||||
|
||||
func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
|
||||
fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return internal.NewFD(uint32(fd)), nil
|
||||
}
|
||||
|
||||
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
|
||||
const minHeaderLength = 24
|
||||
|
||||
typesLen := uint32(binary.Size(types))
|
||||
header := btfHeader{
|
||||
Magic: btfMagic,
|
||||
Version: 1,
|
||||
HdrLen: minHeaderLength,
|
||||
TypeOff: 0,
|
||||
TypeLen: typesLen,
|
||||
StringOff: typesLen,
|
||||
StringLen: uint32(len(strings)),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = binary.Write(buf, bo, &header)
|
||||
_ = binary.Write(buf, bo, types)
|
||||
buf.Write(strings)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
|
||||
var (
|
||||
types struct {
|
||||
Integer btfType
|
||||
Var btfType
|
||||
btfVar struct{ Linkage uint32 }
|
||||
}
|
||||
strings = []byte{0, 'a', 0}
|
||||
)
|
||||
|
||||
// We use a BTF_KIND_VAR here, to make sure that
|
||||
// the kernel understands BTF at least as well as we
|
||||
// do. BTF_KIND_VAR was introduced ~5.1.
|
||||
types.Integer.SetKind(kindPointer)
|
||||
types.Var.NameOff = 1
|
||||
types.Var.SetKind(kindVar)
|
||||
types.Var.SizeType = 1
|
||||
|
||||
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||
|
||||
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||
btf: internal.NewSlicePointer(btf),
|
||||
btfSize: uint32(len(btf)),
|
||||
})
|
||||
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
|
||||
// Treat both EINVAL and EPERM as not supported: loading the program
|
||||
// might still succeed without BTF.
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
return nil
|
||||
})
|
||||
|
||||
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
|
||||
if err := haveBTF(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
types struct {
|
||||
FuncProto btfType
|
||||
Func btfType
|
||||
}
|
||||
strings = []byte{0, 'a', 0}
|
||||
)
|
||||
|
||||
types.FuncProto.SetKind(kindFuncProto)
|
||||
types.Func.SetKind(kindFunc)
|
||||
types.Func.SizeType = 1 // aka FuncProto
|
||||
types.Func.NameOff = 1
|
||||
types.Func.SetLinkage(GlobalFunc)
|
||||
|
||||
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||
|
||||
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||
btf: internal.NewSlicePointer(btf),
|
||||
btfSize: uint32(len(btf)),
|
||||
})
|
||||
if errors.Is(err, unix.EINVAL) {
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
return nil
|
||||
})
|
||||
282
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
generated
vendored
Normal file
282
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
generated
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
|
||||
|
||||
// btfKind describes a Type.
|
||||
type btfKind uint8
|
||||
|
||||
// Equivalents of the BTF_KIND_* constants.
|
||||
const (
|
||||
kindUnknown btfKind = iota
|
||||
kindInt
|
||||
kindPointer
|
||||
kindArray
|
||||
kindStruct
|
||||
kindUnion
|
||||
kindEnum
|
||||
kindForward
|
||||
kindTypedef
|
||||
kindVolatile
|
||||
kindConst
|
||||
kindRestrict
|
||||
// Added ~4.20
|
||||
kindFunc
|
||||
kindFuncProto
|
||||
// Added ~5.1
|
||||
kindVar
|
||||
kindDatasec
|
||||
)
|
||||
|
||||
// FuncLinkage describes BTF function linkage metadata.
|
||||
type FuncLinkage int
|
||||
|
||||
// Equivalent of enum btf_func_linkage.
|
||||
const (
|
||||
StaticFunc FuncLinkage = iota // static
|
||||
GlobalFunc // global
|
||||
ExternFunc // extern
|
||||
)
|
||||
|
||||
// VarLinkage describes BTF variable linkage metadata.
|
||||
type VarLinkage int
|
||||
|
||||
const (
|
||||
StaticVar VarLinkage = iota // static
|
||||
GlobalVar // global
|
||||
ExternVar // extern
|
||||
)
|
||||
|
||||
const (
|
||||
btfTypeKindShift = 24
|
||||
btfTypeKindLen = 4
|
||||
btfTypeVlenShift = 0
|
||||
btfTypeVlenMask = 16
|
||||
btfTypeKindFlagShift = 31
|
||||
btfTypeKindFlagMask = 1
|
||||
)
|
||||
|
||||
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
|
||||
type btfType struct {
|
||||
NameOff uint32
|
||||
/* "info" bits arrangement
|
||||
* bits 0-15: vlen (e.g. # of struct's members), linkage
|
||||
* bits 16-23: unused
|
||||
* bits 24-27: kind (e.g. int, ptr, array...etc)
|
||||
* bits 28-30: unused
|
||||
* bit 31: kind_flag, currently used by
|
||||
* struct, union and fwd
|
||||
*/
|
||||
Info uint32
|
||||
/* "size" is used by INT, ENUM, STRUCT and UNION.
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC and FUNC_PROTO.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
SizeType uint32
|
||||
}
|
||||
|
||||
func (k btfKind) String() string {
|
||||
switch k {
|
||||
case kindUnknown:
|
||||
return "Unknown"
|
||||
case kindInt:
|
||||
return "Integer"
|
||||
case kindPointer:
|
||||
return "Pointer"
|
||||
case kindArray:
|
||||
return "Array"
|
||||
case kindStruct:
|
||||
return "Struct"
|
||||
case kindUnion:
|
||||
return "Union"
|
||||
case kindEnum:
|
||||
return "Enumeration"
|
||||
case kindForward:
|
||||
return "Forward"
|
||||
case kindTypedef:
|
||||
return "Typedef"
|
||||
case kindVolatile:
|
||||
return "Volatile"
|
||||
case kindConst:
|
||||
return "Const"
|
||||
case kindRestrict:
|
||||
return "Restrict"
|
||||
case kindFunc:
|
||||
return "Function"
|
||||
case kindFuncProto:
|
||||
return "Function Proto"
|
||||
case kindVar:
|
||||
return "Variable"
|
||||
case kindDatasec:
|
||||
return "Section"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown (%d)", k)
|
||||
}
|
||||
}
|
||||
|
||||
func mask(len uint32) uint32 {
|
||||
return (1 << len) - 1
|
||||
}
|
||||
|
||||
func (bt *btfType) info(len, shift uint32) uint32 {
|
||||
return (bt.Info >> shift) & mask(len)
|
||||
}
|
||||
|
||||
func (bt *btfType) setInfo(value, len, shift uint32) {
|
||||
bt.Info &^= mask(len) << shift
|
||||
bt.Info |= (value & mask(len)) << shift
|
||||
}
|
||||
|
||||
func (bt *btfType) Kind() btfKind {
|
||||
return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetKind(kind btfKind) {
|
||||
bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) Vlen() int {
|
||||
return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetVlen(vlen int) {
|
||||
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) KindFlag() bool {
|
||||
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
|
||||
}
|
||||
|
||||
func (bt *btfType) Linkage() FuncLinkage {
|
||||
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetLinkage(linkage FuncLinkage) {
|
||||
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) Type() TypeID {
|
||||
// TODO: Panic here if wrong kind?
|
||||
return TypeID(bt.SizeType)
|
||||
}
|
||||
|
||||
func (bt *btfType) Size() uint32 {
|
||||
// TODO: Panic here if wrong kind?
|
||||
return bt.SizeType
|
||||
}
|
||||
|
||||
type rawType struct {
|
||||
btfType
|
||||
data interface{}
|
||||
}
|
||||
|
||||
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
||||
if err := binary.Write(w, bo, &rt.btfType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rt.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return binary.Write(w, bo, rt.data)
|
||||
}
|
||||
|
||||
type btfArray struct {
|
||||
Type TypeID
|
||||
IndexType TypeID
|
||||
Nelems uint32
|
||||
}
|
||||
|
||||
type btfMember struct {
|
||||
NameOff uint32
|
||||
Type TypeID
|
||||
Offset uint32
|
||||
}
|
||||
|
||||
type btfVarSecinfo struct {
|
||||
Type TypeID
|
||||
Offset uint32
|
||||
Size uint32
|
||||
}
|
||||
|
||||
type btfVariable struct {
|
||||
Linkage uint32
|
||||
}
|
||||
|
||||
type btfEnum struct {
|
||||
NameOff uint32
|
||||
Val int32
|
||||
}
|
||||
|
||||
type btfParam struct {
|
||||
NameOff uint32
|
||||
Type TypeID
|
||||
}
|
||||
|
||||
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
||||
var (
|
||||
header btfType
|
||||
types []rawType
|
||||
)
|
||||
|
||||
for id := TypeID(1); ; id++ {
|
||||
if err := binary.Read(r, bo, &header); err == io.EOF {
|
||||
return types, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
|
||||
}
|
||||
|
||||
var data interface{}
|
||||
switch header.Kind() {
|
||||
case kindInt:
|
||||
data = new(uint32)
|
||||
case kindPointer:
|
||||
case kindArray:
|
||||
data = new(btfArray)
|
||||
case kindStruct:
|
||||
fallthrough
|
||||
case kindUnion:
|
||||
data = make([]btfMember, header.Vlen())
|
||||
case kindEnum:
|
||||
data = make([]btfEnum, header.Vlen())
|
||||
case kindForward:
|
||||
case kindTypedef:
|
||||
case kindVolatile:
|
||||
case kindConst:
|
||||
case kindRestrict:
|
||||
case kindFunc:
|
||||
case kindFuncProto:
|
||||
data = make([]btfParam, header.Vlen())
|
||||
case kindVar:
|
||||
data = new(btfVariable)
|
||||
case kindDatasec:
|
||||
data = make([]btfVarSecinfo, header.Vlen())
|
||||
default:
|
||||
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
||||
}
|
||||
|
||||
if data == nil {
|
||||
types = append(types, rawType{header, nil})
|
||||
continue
|
||||
}
|
||||
|
||||
if err := binary.Read(r, bo, data); err != nil {
|
||||
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
|
||||
}
|
||||
|
||||
types = append(types, rawType{header, data})
|
||||
}
|
||||
}
|
||||
|
||||
func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
|
||||
return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
|
||||
}
|
||||
44
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go
generated
vendored
Normal file
44
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
|
||||
|
||||
package btf
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[StaticFunc-0]
|
||||
_ = x[GlobalFunc-1]
|
||||
_ = x[ExternFunc-2]
|
||||
}
|
||||
|
||||
const _FuncLinkage_name = "staticglobalextern"
|
||||
|
||||
var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
|
||||
|
||||
func (i FuncLinkage) String() string {
|
||||
if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
|
||||
return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
|
||||
}
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[StaticVar-0]
|
||||
_ = x[GlobalVar-1]
|
||||
_ = x[ExternVar-2]
|
||||
}
|
||||
|
||||
const _VarLinkage_name = "staticglobalextern"
|
||||
|
||||
var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
|
||||
|
||||
func (i VarLinkage) String() string {
|
||||
if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
|
||||
return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
|
||||
}
|
||||
887
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/core.go
generated
vendored
Normal file
887
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/core.go
generated
vendored
Normal file
@@ -0,0 +1,887 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
)
|
||||
|
||||
// Code in this file is derived from libbpf, which is available under a BSD
|
||||
// 2-Clause license.
|
||||
|
||||
// COREFixup is the result of computing a CO-RE relocation for a target.
|
||||
type COREFixup struct {
|
||||
Kind COREKind
|
||||
Local uint32
|
||||
Target uint32
|
||||
Poison bool
|
||||
}
|
||||
|
||||
func (f COREFixup) equal(other COREFixup) bool {
|
||||
return f.Local == other.Local && f.Target == other.Target
|
||||
}
|
||||
|
||||
func (f COREFixup) String() string {
|
||||
if f.Poison {
|
||||
return fmt.Sprintf("%s=poison", f.Kind)
|
||||
}
|
||||
return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target)
|
||||
}
|
||||
|
||||
func (f COREFixup) apply(ins *asm.Instruction) error {
|
||||
if f.Poison {
|
||||
return errors.New("can't poison individual instruction")
|
||||
}
|
||||
|
||||
switch class := ins.OpCode.Class(); class {
|
||||
case asm.LdXClass, asm.StClass, asm.StXClass:
|
||||
if want := int16(f.Local); want != ins.Offset {
|
||||
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
|
||||
}
|
||||
|
||||
if f.Target > math.MaxInt16 {
|
||||
return fmt.Errorf("offset %d exceeds MaxInt16", f.Target)
|
||||
}
|
||||
|
||||
ins.Offset = int16(f.Target)
|
||||
|
||||
case asm.LdClass:
|
||||
if !ins.IsConstantLoad(asm.DWord) {
|
||||
return fmt.Errorf("not a dword-sized immediate load")
|
||||
}
|
||||
|
||||
if want := int64(f.Local); want != ins.Constant {
|
||||
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
|
||||
}
|
||||
|
||||
ins.Constant = int64(f.Target)
|
||||
|
||||
case asm.ALUClass:
|
||||
if ins.OpCode.ALUOp() == asm.Swap {
|
||||
return fmt.Errorf("relocation against swap")
|
||||
}
|
||||
|
||||
fallthrough
|
||||
|
||||
case asm.ALU64Class:
|
||||
if src := ins.OpCode.Source(); src != asm.ImmSource {
|
||||
return fmt.Errorf("invalid source %s", src)
|
||||
}
|
||||
|
||||
if want := int64(f.Local); want != ins.Constant {
|
||||
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
|
||||
}
|
||||
|
||||
if f.Target > math.MaxInt32 {
|
||||
return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target)
|
||||
}
|
||||
|
||||
ins.Constant = int64(f.Target)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid class %s", class)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f COREFixup) isNonExistant() bool {
|
||||
return f.Kind.checksForExistence() && f.Target == 0
|
||||
}
|
||||
|
||||
type COREFixups map[uint64]COREFixup
|
||||
|
||||
// Apply a set of CO-RE relocations to a BPF program.
|
||||
func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
|
||||
if len(fs) == 0 {
|
||||
cpy := make(asm.Instructions, len(insns))
|
||||
copy(cpy, insns)
|
||||
return insns, nil
|
||||
}
|
||||
|
||||
cpy := make(asm.Instructions, 0, len(insns))
|
||||
iter := insns.Iterate()
|
||||
for iter.Next() {
|
||||
fixup, ok := fs[iter.Offset.Bytes()]
|
||||
if !ok {
|
||||
cpy = append(cpy, *iter.Ins)
|
||||
continue
|
||||
}
|
||||
|
||||
ins := *iter.Ins
|
||||
if fixup.Poison {
|
||||
const badRelo = asm.BuiltinFunc(0xbad2310)
|
||||
|
||||
cpy = append(cpy, badRelo.Call())
|
||||
if ins.OpCode.IsDWordLoad() {
|
||||
// 64 bit constant loads occupy two raw bpf instructions, so
|
||||
// we need to add another instruction as padding.
|
||||
cpy = append(cpy, badRelo.Call())
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err := fixup.apply(&ins); err != nil {
|
||||
return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err)
|
||||
}
|
||||
|
||||
cpy = append(cpy, ins)
|
||||
}
|
||||
|
||||
return cpy, nil
|
||||
}
|
||||
|
||||
// COREKind is the type of CO-RE relocation
|
||||
type COREKind uint32
|
||||
|
||||
const (
|
||||
reloFieldByteOffset COREKind = iota /* field byte offset */
|
||||
reloFieldByteSize /* field size in bytes */
|
||||
reloFieldExists /* field existence in target kernel */
|
||||
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
||||
reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
||||
reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
||||
reloTypeIDLocal /* type ID in local BPF object */
|
||||
reloTypeIDTarget /* type ID in target kernel */
|
||||
reloTypeExists /* type existence in target kernel */
|
||||
reloTypeSize /* type size in bytes */
|
||||
reloEnumvalExists /* enum value existence in target kernel */
|
||||
reloEnumvalValue /* enum value integer value */
|
||||
)
|
||||
|
||||
func (k COREKind) String() string {
|
||||
switch k {
|
||||
case reloFieldByteOffset:
|
||||
return "byte_off"
|
||||
case reloFieldByteSize:
|
||||
return "byte_sz"
|
||||
case reloFieldExists:
|
||||
return "field_exists"
|
||||
case reloFieldSigned:
|
||||
return "signed"
|
||||
case reloFieldLShiftU64:
|
||||
return "lshift_u64"
|
||||
case reloFieldRShiftU64:
|
||||
return "rshift_u64"
|
||||
case reloTypeIDLocal:
|
||||
return "local_type_id"
|
||||
case reloTypeIDTarget:
|
||||
return "target_type_id"
|
||||
case reloTypeExists:
|
||||
return "type_exists"
|
||||
case reloTypeSize:
|
||||
return "type_size"
|
||||
case reloEnumvalExists:
|
||||
return "enumval_exists"
|
||||
case reloEnumvalValue:
|
||||
return "enumval_value"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (k COREKind) checksForExistence() bool {
|
||||
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
|
||||
}
|
||||
|
||||
func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) {
|
||||
if local.byteOrder != target.byteOrder {
|
||||
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
|
||||
}
|
||||
|
||||
var ids []TypeID
|
||||
relosByID := make(map[TypeID]coreRelos)
|
||||
result := make(COREFixups, len(relos))
|
||||
for _, relo := range relos {
|
||||
if relo.kind == reloTypeIDLocal {
|
||||
// Filtering out reloTypeIDLocal here makes our lives a lot easier
|
||||
// down the line, since it doesn't have a target at all.
|
||||
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||
}
|
||||
|
||||
result[uint64(relo.insnOff)] = COREFixup{
|
||||
relo.kind,
|
||||
uint32(relo.typeID),
|
||||
uint32(relo.typeID),
|
||||
false,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
relos, ok := relosByID[relo.typeID]
|
||||
if !ok {
|
||||
ids = append(ids, relo.typeID)
|
||||
}
|
||||
relosByID[relo.typeID] = append(relos, relo)
|
||||
}
|
||||
|
||||
// Ensure we work on relocations in a deterministic order.
|
||||
sort.Slice(ids, func(i, j int) bool {
|
||||
return ids[i] < ids[j]
|
||||
})
|
||||
|
||||
for _, id := range ids {
|
||||
if int(id) >= len(local.types) {
|
||||
return nil, fmt.Errorf("invalid type id %d", id)
|
||||
}
|
||||
|
||||
localType := local.types[id]
|
||||
named, ok := localType.(namedType)
|
||||
if !ok || named.name() == "" {
|
||||
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
|
||||
}
|
||||
|
||||
relos := relosByID[id]
|
||||
targets := target.namedTypes[named.essentialName()]
|
||||
fixups, err := coreCalculateFixups(localType, targets, relos)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("relocate %s: %w", localType, err)
|
||||
}
|
||||
|
||||
for i, relo := range relos {
|
||||
result[uint64(relo.insnOff)] = fixups[i]
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
||||
var errImpossibleRelocation = errors.New("impossible relocation")
|
||||
|
||||
// coreCalculateFixups calculates the fixups for the given relocations using
|
||||
// the "best" target.
|
||||
//
|
||||
// The best target is determined by scoring: the less poisoning we have to do
|
||||
// the better the target is.
|
||||
func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) {
|
||||
localID := local.ID()
|
||||
local, err := copyType(local, skipQualifierAndTypedef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bestScore := len(relos)
|
||||
var bestFixups []COREFixup
|
||||
for i := range targets {
|
||||
targetID := targets[i].ID()
|
||||
target, err := copyType(targets[i], skipQualifierAndTypedef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
score := 0 // lower is better
|
||||
fixups := make([]COREFixup, 0, len(relos))
|
||||
for _, relo := range relos {
|
||||
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("target %s: %w", target, err)
|
||||
}
|
||||
if fixup.Poison || fixup.isNonExistant() {
|
||||
score++
|
||||
}
|
||||
fixups = append(fixups, fixup)
|
||||
}
|
||||
|
||||
if score > bestScore {
|
||||
// We have a better target already, ignore this one.
|
||||
continue
|
||||
}
|
||||
|
||||
if score < bestScore {
|
||||
// This is the best target yet, use it.
|
||||
bestScore = score
|
||||
bestFixups = fixups
|
||||
continue
|
||||
}
|
||||
|
||||
// Some other target has the same score as the current one. Make sure
|
||||
// the fixups agree with each other.
|
||||
for i, fixup := range bestFixups {
|
||||
if !fixup.equal(fixups[i]) {
|
||||
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bestFixups == nil {
|
||||
// Nothing at all matched, probably because there are no suitable
|
||||
// targets at all. Poison everything!
|
||||
bestFixups = make([]COREFixup, len(relos))
|
||||
for i, relo := range relos {
|
||||
bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
|
||||
}
|
||||
}
|
||||
|
||||
return bestFixups, nil
|
||||
}
|
||||
|
||||
// coreCalculateFixup calculates the fixup for a single local type, target type
|
||||
// and relocation.
|
||||
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) {
|
||||
fixup := func(local, target uint32) (COREFixup, error) {
|
||||
return COREFixup{relo.kind, local, target, false}, nil
|
||||
}
|
||||
poison := func() (COREFixup, error) {
|
||||
if relo.kind.checksForExistence() {
|
||||
return fixup(1, 0)
|
||||
}
|
||||
return COREFixup{relo.kind, 0, 0, true}, nil
|
||||
}
|
||||
zero := COREFixup{}
|
||||
|
||||
switch relo.kind {
|
||||
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
|
||||
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||
}
|
||||
|
||||
err := coreAreTypesCompatible(local, target)
|
||||
if errors.Is(err, errImpossibleRelocation) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
||||
}
|
||||
|
||||
switch relo.kind {
|
||||
case reloTypeExists:
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloTypeIDTarget:
|
||||
return fixup(uint32(localID), uint32(targetID))
|
||||
|
||||
case reloTypeSize:
|
||||
localSize, err := Sizeof(local)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
targetSize, err := Sizeof(target)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
return fixup(uint32(localSize), uint32(targetSize))
|
||||
}
|
||||
|
||||
case reloEnumvalValue, reloEnumvalExists:
|
||||
localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
|
||||
if errors.Is(err, errImpossibleRelocation) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
||||
}
|
||||
|
||||
switch relo.kind {
|
||||
case reloEnumvalExists:
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloEnumvalValue:
|
||||
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
|
||||
}
|
||||
|
||||
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
|
||||
if _, ok := target.(*Fwd); ok {
|
||||
// We can't relocate fields using a forward declaration, so
|
||||
// skip it. If a non-forward declaration is present in the BTF
|
||||
// we'll find it in one of the other iterations.
|
||||
return poison()
|
||||
}
|
||||
|
||||
localField, targetField, err := coreFindField(local, relo.accessor, target)
|
||||
if errors.Is(err, errImpossibleRelocation) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("target %s: %w", target, err)
|
||||
}
|
||||
|
||||
switch relo.kind {
|
||||
case reloFieldExists:
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloFieldByteOffset:
|
||||
return fixup(localField.offset/8, targetField.offset/8)
|
||||
|
||||
case reloFieldByteSize:
|
||||
localSize, err := Sizeof(localField.Type)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
targetSize, err := Sizeof(targetField.Type)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
return fixup(uint32(localSize), uint32(targetSize))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
|
||||
}
|
||||
|
||||
/* coreAccessor contains a path through a struct. It contains at least one index.
|
||||
*
|
||||
* The interpretation depends on the kind of the relocation. The following is
|
||||
* taken from struct bpf_core_relo in libbpf_internal.h:
|
||||
*
|
||||
* - for field-based relocations, string encodes an accessed field using
|
||||
* a sequence of field and array indices, separated by colon (:). It's
|
||||
* conceptually very close to LLVM's getelementptr ([0]) instruction's
|
||||
* arguments for identifying offset to a field.
|
||||
* - for type-based relocations, strings is expected to be just "0";
|
||||
* - for enum value-based relocations, string contains an index of enum
|
||||
* value within its enum type;
|
||||
*
|
||||
* Example to provide a better feel.
|
||||
*
|
||||
* struct sample {
|
||||
* int a;
|
||||
* struct {
|
||||
* int b[10];
|
||||
* };
|
||||
* };
|
||||
*
|
||||
* struct sample s = ...;
|
||||
* int x = &s->a; // encoded as "0:0" (a is field #0)
|
||||
* int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
|
||||
* // b is field #0 inside anon struct, accessing elem #5)
|
||||
* int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
|
||||
*/
|
||||
type coreAccessor []int
|
||||
|
||||
func parseCoreAccessor(accessor string) (coreAccessor, error) {
|
||||
if accessor == "" {
|
||||
return nil, fmt.Errorf("empty accessor")
|
||||
}
|
||||
|
||||
var result coreAccessor
|
||||
parts := strings.Split(accessor, ":")
|
||||
for _, part := range parts {
|
||||
// 31 bits to avoid overflowing int on 32 bit platforms.
|
||||
index, err := strconv.ParseUint(part, 10, 31)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("accessor index %q: %s", part, err)
|
||||
}
|
||||
|
||||
result = append(result, int(index))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ca coreAccessor) String() string {
|
||||
strs := make([]string, 0, len(ca))
|
||||
for _, i := range ca {
|
||||
strs = append(strs, strconv.Itoa(i))
|
||||
}
|
||||
return strings.Join(strs, ":")
|
||||
}
|
||||
|
||||
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
|
||||
e, ok := t.(*Enum)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an enum: %s", t)
|
||||
}
|
||||
|
||||
if len(ca) > 1 {
|
||||
return nil, fmt.Errorf("invalid accessor %s for enum", ca)
|
||||
}
|
||||
|
||||
i := ca[0]
|
||||
if i >= len(e.Values) {
|
||||
return nil, fmt.Errorf("invalid index %d for %s", i, e)
|
||||
}
|
||||
|
||||
return &e.Values[i], nil
|
||||
}
|
||||
|
||||
type coreField struct {
|
||||
Type Type
|
||||
offset uint32
|
||||
}
|
||||
|
||||
func adjustOffset(base uint32, t Type, n int) (uint32, error) {
|
||||
size, err := Sizeof(t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return base + (uint32(n) * uint32(size) * 8), nil
|
||||
}
|
||||
|
||||
// coreFindField descends into the local type using the accessor and tries to
|
||||
// find an equivalent field in target at each step.
|
||||
//
|
||||
// Returns the field and the offset of the field from the start of
|
||||
// target in bits.
|
||||
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
|
||||
// The first index is used to offset a pointer of the base type like
|
||||
// when accessing an array.
|
||||
localOffset, err := adjustOffset(0, local, localAcc[0])
|
||||
if err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
targetOffset, err := adjustOffset(0, target, localAcc[0])
|
||||
if err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
if err := coreAreMembersCompatible(local, target); err != nil {
|
||||
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
|
||||
}
|
||||
|
||||
var localMaybeFlex, targetMaybeFlex bool
|
||||
for _, acc := range localAcc[1:] {
|
||||
switch localType := local.(type) {
|
||||
case composite:
|
||||
// For composite types acc is used to find the field in the local type,
|
||||
// and then we try to find a field in target with the same name.
|
||||
localMembers := localType.members()
|
||||
if acc >= len(localMembers) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local)
|
||||
}
|
||||
|
||||
localMember := localMembers[acc]
|
||||
if localMember.Name == "" {
|
||||
_, ok := localMember.Type.(composite)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
|
||||
}
|
||||
|
||||
// This is an anonymous struct or union, ignore it.
|
||||
local = localMember.Type
|
||||
localOffset += localMember.Offset
|
||||
localMaybeFlex = false
|
||||
continue
|
||||
}
|
||||
|
||||
targetType, ok := target.(composite)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
targetMember, last, err := coreFindMember(targetType, localMember.Name)
|
||||
if err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
if targetMember.BitfieldSize > 0 {
|
||||
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
|
||||
}
|
||||
|
||||
local = localMember.Type
|
||||
localMaybeFlex = acc == len(localMembers)-1
|
||||
localOffset += localMember.Offset
|
||||
target = targetMember.Type
|
||||
targetMaybeFlex = last
|
||||
targetOffset += targetMember.Offset
|
||||
|
||||
case *Array:
|
||||
// For arrays, acc is the index in the target.
|
||||
targetType, ok := target.(*Array)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
if localType.Nelems == 0 && !localMaybeFlex {
|
||||
return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
|
||||
}
|
||||
if targetType.Nelems == 0 && !targetMaybeFlex {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
|
||||
}
|
||||
|
||||
if localType.Nelems > 0 && acc >= int(localType.Nelems) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
|
||||
}
|
||||
if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
local = localType.Type
|
||||
localMaybeFlex = false
|
||||
localOffset, err = adjustOffset(localOffset, local, acc)
|
||||
if err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
target = targetType.Type
|
||||
targetMaybeFlex = false
|
||||
targetOffset, err = adjustOffset(targetOffset, target, acc)
|
||||
if err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
default:
|
||||
return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
|
||||
}
|
||||
|
||||
if err := coreAreMembersCompatible(local, target); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return coreField{local, localOffset}, coreField{target, targetOffset}, nil
|
||||
}
|
||||
|
||||
// coreFindMember finds a member in a composite type while handling anonymous
|
||||
// structs and unions.
|
||||
func coreFindMember(typ composite, name Name) (Member, bool, error) {
|
||||
if name == "" {
|
||||
return Member{}, false, errors.New("can't search for anonymous member")
|
||||
}
|
||||
|
||||
type offsetTarget struct {
|
||||
composite
|
||||
offset uint32
|
||||
}
|
||||
|
||||
targets := []offsetTarget{{typ, 0}}
|
||||
visited := make(map[composite]bool)
|
||||
|
||||
for i := 0; i < len(targets); i++ {
|
||||
target := targets[i]
|
||||
|
||||
// Only visit targets once to prevent infinite recursion.
|
||||
if visited[target] {
|
||||
continue
|
||||
}
|
||||
if len(visited) >= maxTypeDepth {
|
||||
// This check is different than libbpf, which restricts the entire
|
||||
// path to BPF_CORE_SPEC_MAX_LEN items.
|
||||
return Member{}, false, fmt.Errorf("type is nested too deep")
|
||||
}
|
||||
visited[target] = true
|
||||
|
||||
members := target.members()
|
||||
for j, member := range members {
|
||||
if member.Name == name {
|
||||
// NB: This is safe because member is a copy.
|
||||
member.Offset += target.offset
|
||||
return member, j == len(members)-1, nil
|
||||
}
|
||||
|
||||
// The names don't match, but this member could be an anonymous struct
|
||||
// or union.
|
||||
if member.Name != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
comp, ok := member.Type.(composite)
|
||||
if !ok {
|
||||
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
|
||||
}
|
||||
|
||||
targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
|
||||
}
|
||||
}
|
||||
|
||||
return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
|
||||
func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
|
||||
localValue, err := localAcc.enumValue(local)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
targetEnum, ok := target.(*Enum)
|
||||
if !ok {
|
||||
return nil, nil, errImpossibleRelocation
|
||||
}
|
||||
|
||||
localName := localValue.Name.essentialName()
|
||||
for i, targetValue := range targetEnum.Values {
|
||||
if targetValue.Name.essentialName() != localName {
|
||||
continue
|
||||
}
|
||||
|
||||
return localValue, &targetEnum.Values[i], nil
|
||||
}
|
||||
|
||||
return nil, nil, errImpossibleRelocation
|
||||
}
|
||||
|
||||
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
||||
*
|
||||
* Check local and target types for compatibility. This check is used for
|
||||
* type-based CO-RE relocations and follow slightly different rules than
|
||||
* field-based relocations. This function assumes that root types were already
|
||||
* checked for name match. Beyond that initial root-level name check, names
|
||||
* are completely ignored. Compatibility rules are as follows:
|
||||
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
|
||||
* kind should match for local and target types (i.e., STRUCT is not
|
||||
* compatible with UNION);
|
||||
* - for ENUMs, the size is ignored;
|
||||
* - for INT, size and signedness are ignored;
|
||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||
* compatibility recursively;
|
||||
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
|
||||
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
|
||||
* - FUNC_PROTOs are compatible if they have compatible signature: same
|
||||
* number of input args and compatible return and argument types.
|
||||
* These rules are not set in stone and probably will be adjusted as we get
|
||||
* more experience with using BPF CO-RE relocations.
|
||||
*
|
||||
* Returns errImpossibleRelocation if types are not compatible.
|
||||
*/
|
||||
func coreAreTypesCompatible(localType Type, targetType Type) error {
|
||||
var (
|
||||
localTs, targetTs typeDeque
|
||||
l, t = &localType, &targetType
|
||||
depth = 0
|
||||
)
|
||||
|
||||
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
|
||||
if depth >= maxTypeDepth {
|
||||
return errors.New("types are nested too deep")
|
||||
}
|
||||
|
||||
localType = *l
|
||||
targetType = *t
|
||||
|
||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
switch lv := (localType).(type) {
|
||||
case *Void, *Struct, *Union, *Enum, *Fwd:
|
||||
// Nothing to do here
|
||||
|
||||
case *Int:
|
||||
tv := targetType.(*Int)
|
||||
if lv.isBitfield() || tv.isBitfield() {
|
||||
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
case *Pointer, *Array:
|
||||
depth++
|
||||
localType.walk(&localTs)
|
||||
targetType.walk(&targetTs)
|
||||
|
||||
case *FuncProto:
|
||||
tv := targetType.(*FuncProto)
|
||||
if len(lv.Params) != len(tv.Params) {
|
||||
return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
depth++
|
||||
localType.walk(&localTs)
|
||||
targetType.walk(&targetTs)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %T", localType)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
return fmt.Errorf("dangling local type %T", *l)
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
return fmt.Errorf("dangling target type %T", *t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
|
||||
*
|
||||
* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
||||
*
|
||||
* Check two types for compatibility for the purpose of field access
|
||||
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
|
||||
* are relocating semantically compatible entities:
|
||||
* - any two STRUCTs/UNIONs are compatible and can be mixed;
|
||||
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
|
||||
* - any two PTRs are always compatible;
|
||||
* - for ENUMs, names should be the same (ignoring flavor suffix) or at
|
||||
* least one of enums should be anonymous;
|
||||
* - for ENUMs, check sizes, names are ignored;
|
||||
* - for INT, size and signedness are ignored;
|
||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||
* compatibility recursively;
|
||||
* [ NB: coreAreMembersCompatible doesn't recurse, this check is done
|
||||
* by coreFindField. ]
|
||||
* - everything else shouldn't be ever a target of relocation.
|
||||
* These rules are not set in stone and probably will be adjusted as we get
|
||||
* more experience with using BPF CO-RE relocations.
|
||||
*
|
||||
* Returns errImpossibleRelocation if the members are not compatible.
|
||||
*/
|
||||
func coreAreMembersCompatible(localType Type, targetType Type) error {
|
||||
doNamesMatch := func(a, b string) error {
|
||||
if a == "" || b == "" {
|
||||
// allow anonymous and named type to match
|
||||
return nil
|
||||
}
|
||||
|
||||
if essentialName(a) == essentialName(b) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
_, lok := localType.(composite)
|
||||
_, tok := targetType.(composite)
|
||||
if lok && tok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
switch lv := localType.(type) {
|
||||
case *Array, *Pointer:
|
||||
return nil
|
||||
|
||||
case *Enum:
|
||||
tv := targetType.(*Enum)
|
||||
return doNamesMatch(lv.name(), tv.name())
|
||||
|
||||
case *Fwd:
|
||||
tv := targetType.(*Fwd)
|
||||
return doNamesMatch(lv.name(), tv.name())
|
||||
|
||||
case *Int:
|
||||
tv := targetType.(*Int)
|
||||
if lv.isBitfield() || tv.isBitfield() {
|
||||
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
|
||||
}
|
||||
}
|
||||
|
||||
func skipQualifierAndTypedef(typ Type) (Type, error) {
|
||||
result := typ
|
||||
for depth := 0; depth <= maxTypeDepth; depth++ {
|
||||
switch v := (result).(type) {
|
||||
case qualifier:
|
||||
result = v.qualify()
|
||||
case *Typedef:
|
||||
result = v.Type
|
||||
default:
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("exceeded type depth")
|
||||
}
|
||||
8
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/doc.go
generated
vendored
Normal file
8
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/doc.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Package btf handles data encoded according to the BPF Type Format.
|
||||
//
|
||||
// The canonical documentation lives in the Linux kernel repository and is
|
||||
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
|
||||
//
|
||||
// The API is very much unstable. You should only use this via the main
|
||||
// ebpf library.
|
||||
package btf
|
||||
303
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
generated
vendored
Normal file
303
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
generated
vendored
Normal file
@@ -0,0 +1,303 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
)
|
||||
|
||||
type btfExtHeader struct {
|
||||
Magic uint16
|
||||
Version uint8
|
||||
Flags uint8
|
||||
HdrLen uint32
|
||||
|
||||
FuncInfoOff uint32
|
||||
FuncInfoLen uint32
|
||||
LineInfoOff uint32
|
||||
LineInfoLen uint32
|
||||
}
|
||||
|
||||
type btfExtCoreHeader struct {
|
||||
CoreReloOff uint32
|
||||
CoreReloLen uint32
|
||||
}
|
||||
|
||||
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) {
|
||||
var header btfExtHeader
|
||||
var coreHeader btfExtCoreHeader
|
||||
if err := binary.Read(r, bo, &header); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
if header.Magic != btfMagic {
|
||||
return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||
}
|
||||
|
||||
if header.Version != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||
}
|
||||
|
||||
if header.Flags != 0 {
|
||||
return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||
}
|
||||
|
||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||
if remainder < 0 {
|
||||
return nil, nil, nil, errors.New("header is too short")
|
||||
}
|
||||
|
||||
coreHdrSize := int64(binary.Size(&coreHeader))
|
||||
if remainder >= coreHdrSize {
|
||||
if err := binary.Read(r, bo, &coreHeader); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
|
||||
}
|
||||
remainder -= coreHdrSize
|
||||
}
|
||||
|
||||
// Of course, the .BTF.ext header has different semantics than the
|
||||
// .BTF ext header. We need to ignore non-null values.
|
||||
_, err = io.CopyN(ioutil.Discard, r, remainder)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("header padding: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
|
||||
funcInfo, err = parseExtInfo(buf, bo, strings)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("function info: %w", err)
|
||||
}
|
||||
|
||||
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
|
||||
}
|
||||
|
||||
buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
|
||||
lineInfo, err = parseExtInfo(buf, bo, strings)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("line info: %w", err)
|
||||
}
|
||||
|
||||
if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
|
||||
if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
|
||||
}
|
||||
|
||||
relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return funcInfo, lineInfo, relos, nil
|
||||
}
|
||||
|
||||
type btfExtInfoSec struct {
|
||||
SecNameOff uint32
|
||||
NumInfo uint32
|
||||
}
|
||||
|
||||
type extInfoRecord struct {
|
||||
InsnOff uint64
|
||||
Opaque []byte
|
||||
}
|
||||
|
||||
type extInfo struct {
|
||||
recordSize uint32
|
||||
records []extInfoRecord
|
||||
}
|
||||
|
||||
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
|
||||
if other.recordSize != ei.recordSize {
|
||||
return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
|
||||
}
|
||||
|
||||
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
|
||||
records = append(records, ei.records...)
|
||||
for _, info := range other.records {
|
||||
records = append(records, extInfoRecord{
|
||||
InsnOff: info.InsnOff + offset,
|
||||
Opaque: info.Opaque,
|
||||
})
|
||||
}
|
||||
return extInfo{ei.recordSize, records}, nil
|
||||
}
|
||||
|
||||
func (ei extInfo) MarshalBinary() ([]byte, error) {
|
||||
if len(ei.records) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
|
||||
for _, info := range ei.records {
|
||||
// The kernel expects offsets in number of raw bpf instructions,
|
||||
// while the ELF tracks it in bytes.
|
||||
insnOff := uint32(info.InsnOff / asm.InstructionSize)
|
||||
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
|
||||
return nil, fmt.Errorf("can't write instruction offset: %v", err)
|
||||
}
|
||||
|
||||
buf.Write(info.Opaque)
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
|
||||
const maxRecordSize = 256
|
||||
|
||||
var recordSize uint32
|
||||
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||
return nil, fmt.Errorf("can't read record size: %v", err)
|
||||
}
|
||||
|
||||
if recordSize < 4 {
|
||||
// Need at least insnOff
|
||||
return nil, errors.New("record size too short")
|
||||
}
|
||||
if recordSize > maxRecordSize {
|
||||
return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
|
||||
}
|
||||
|
||||
result := make(map[string]extInfo)
|
||||
for {
|
||||
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var records []extInfoRecord
|
||||
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||
var byteOff uint32
|
||||
if err := binary.Read(r, bo, &byteOff); err != nil {
|
||||
return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, int(recordSize-4))
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
|
||||
}
|
||||
|
||||
if byteOff%asm.InstructionSize != 0 {
|
||||
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
|
||||
}
|
||||
|
||||
records = append(records, extInfoRecord{uint64(byteOff), buf})
|
||||
}
|
||||
|
||||
result[secName] = extInfo{
|
||||
recordSize,
|
||||
records,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
|
||||
type bpfCoreRelo struct {
|
||||
InsnOff uint32
|
||||
TypeID TypeID
|
||||
AccessStrOff uint32
|
||||
Kind COREKind
|
||||
}
|
||||
|
||||
type coreRelo struct {
|
||||
insnOff uint32
|
||||
typeID TypeID
|
||||
accessor coreAccessor
|
||||
kind COREKind
|
||||
}
|
||||
|
||||
type coreRelos []coreRelo
|
||||
|
||||
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
|
||||
// by offset.
|
||||
func (r coreRelos) append(other coreRelos, offset uint64) coreRelos {
|
||||
result := make([]coreRelo, 0, len(r)+len(other))
|
||||
result = append(result, r...)
|
||||
for _, relo := range other {
|
||||
relo.insnOff += uint32(offset)
|
||||
result = append(result, relo)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var extInfoReloSize = binary.Size(bpfCoreRelo{})
|
||||
|
||||
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) {
|
||||
var recordSize uint32
|
||||
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||
return nil, fmt.Errorf("read record size: %v", err)
|
||||
}
|
||||
|
||||
if recordSize != uint32(extInfoReloSize) {
|
||||
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
|
||||
}
|
||||
|
||||
result := make(map[string]coreRelos)
|
||||
for {
|
||||
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var relos coreRelos
|
||||
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||
var relo bpfCoreRelo
|
||||
if err := binary.Read(r, bo, &relo); err != nil {
|
||||
return nil, fmt.Errorf("section %v: read record: %v", secName, err)
|
||||
}
|
||||
|
||||
if relo.InsnOff%asm.InstructionSize != 0 {
|
||||
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
|
||||
}
|
||||
|
||||
accessorStr, err := strings.Lookup(relo.AccessStrOff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessor, err := parseCoreAccessor(accessorStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
|
||||
}
|
||||
|
||||
relos = append(relos, coreRelo{
|
||||
relo.InsnOff,
|
||||
relo.TypeID,
|
||||
accessor,
|
||||
relo.Kind,
|
||||
})
|
||||
}
|
||||
|
||||
result[secName] = relos
|
||||
}
|
||||
}
|
||||
|
||||
func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
|
||||
var infoHeader btfExtInfoSec
|
||||
if err := binary.Read(r, bo, &infoHeader); err != nil {
|
||||
return "", nil, fmt.Errorf("read ext info header: %w", err)
|
||||
}
|
||||
|
||||
secName, err := strings.Lookup(infoHeader.SecNameOff)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("get section name: %w", err)
|
||||
}
|
||||
|
||||
if infoHeader.NumInfo == 0 {
|
||||
return "", nil, fmt.Errorf("section %s has zero records", secName)
|
||||
}
|
||||
|
||||
return secName, &infoHeader, nil
|
||||
}
|
||||
49
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
generated
vendored
Normal file
49
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// +build gofuzz
|
||||
|
||||
// Use with https://github.com/dvyukov/go-fuzz
|
||||
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
)
|
||||
|
||||
func FuzzSpec(data []byte) int {
|
||||
if len(data) < binary.Size(btfHeader{}) {
|
||||
return -1
|
||||
}
|
||||
|
||||
spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
|
||||
if err != nil {
|
||||
if spec != nil {
|
||||
panic("spec is not nil")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if spec == nil {
|
||||
panic("spec is nil")
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func FuzzExtInfo(data []byte) int {
|
||||
if len(data) < binary.Size(btfExtHeader{}) {
|
||||
return -1
|
||||
}
|
||||
|
||||
table := stringTable("\x00foo\x00barfoo\x00")
|
||||
info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
|
||||
if err != nil {
|
||||
if info != nil {
|
||||
panic("info is not nil")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if info == nil {
|
||||
panic("info is nil")
|
||||
}
|
||||
return 1
|
||||
}
|
||||
60
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/strings.go
generated
vendored
Normal file
60
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/strings.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type stringTable []byte
|
||||
|
||||
func readStringTable(r io.Reader) (stringTable, error) {
|
||||
contents, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read string table: %v", err)
|
||||
}
|
||||
|
||||
if len(contents) < 1 {
|
||||
return nil, errors.New("string table is empty")
|
||||
}
|
||||
|
||||
if contents[0] != '\x00' {
|
||||
return nil, errors.New("first item in string table is non-empty")
|
||||
}
|
||||
|
||||
if contents[len(contents)-1] != '\x00' {
|
||||
return nil, errors.New("string table isn't null terminated")
|
||||
}
|
||||
|
||||
return stringTable(contents), nil
|
||||
}
|
||||
|
||||
func (st stringTable) Lookup(offset uint32) (string, error) {
|
||||
if int64(offset) > int64(^uint(0)>>1) {
|
||||
return "", fmt.Errorf("offset %d overflows int", offset)
|
||||
}
|
||||
|
||||
pos := int(offset)
|
||||
if pos >= len(st) {
|
||||
return "", fmt.Errorf("offset %d is out of bounds", offset)
|
||||
}
|
||||
|
||||
if pos > 0 && st[pos-1] != '\x00' {
|
||||
return "", fmt.Errorf("offset %d isn't start of a string", offset)
|
||||
}
|
||||
|
||||
str := st[pos:]
|
||||
end := bytes.IndexByte(str, '\x00')
|
||||
if end == -1 {
|
||||
return "", fmt.Errorf("offset %d isn't null terminated", offset)
|
||||
}
|
||||
|
||||
return string(str[:end]), nil
|
||||
}
|
||||
|
||||
func (st stringTable) LookupName(offset uint32) (Name, error) {
|
||||
str, err := st.Lookup(offset)
|
||||
return Name(str), err
|
||||
}
|
||||
893
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/types.go
generated
vendored
Normal file
893
src/runtime/vendor/github.com/cilium/ebpf/internal/btf/types.go
generated
vendored
Normal file
@@ -0,0 +1,893 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const maxTypeDepth = 32
|
||||
|
||||
// TypeID identifies a type in a BTF section.
|
||||
type TypeID uint32
|
||||
|
||||
// ID implements part of the Type interface.
|
||||
func (tid TypeID) ID() TypeID {
|
||||
return tid
|
||||
}
|
||||
|
||||
// Type represents a type described by BTF.
|
||||
type Type interface {
|
||||
ID() TypeID
|
||||
|
||||
String() string
|
||||
|
||||
// Make a copy of the type, without copying Type members.
|
||||
copy() Type
|
||||
|
||||
// Enumerate all nested Types. Repeated calls must visit nested
|
||||
// types in the same order.
|
||||
walk(*typeDeque)
|
||||
}
|
||||
|
||||
// namedType is a type with a name.
|
||||
//
|
||||
// Most named types simply embed Name.
|
||||
type namedType interface {
|
||||
Type
|
||||
name() string
|
||||
essentialName() string
|
||||
}
|
||||
|
||||
// Name identifies a type.
|
||||
//
|
||||
// Anonymous types have an empty name.
|
||||
type Name string
|
||||
|
||||
func (n Name) name() string {
|
||||
return string(n)
|
||||
}
|
||||
|
||||
func (n Name) essentialName() string {
|
||||
return essentialName(string(n))
|
||||
}
|
||||
|
||||
// Void is the unit type of BTF.
|
||||
type Void struct{}
|
||||
|
||||
func (v *Void) ID() TypeID { return 0 }
|
||||
func (v *Void) String() string { return "void#0" }
|
||||
func (v *Void) size() uint32 { return 0 }
|
||||
func (v *Void) copy() Type { return (*Void)(nil) }
|
||||
func (v *Void) walk(*typeDeque) {}
|
||||
|
||||
type IntEncoding byte
|
||||
|
||||
const (
|
||||
Signed IntEncoding = 1 << iota
|
||||
Char
|
||||
Bool
|
||||
)
|
||||
|
||||
// Int is an integer of a given length.
|
||||
type Int struct {
|
||||
TypeID
|
||||
Name
|
||||
|
||||
// The size of the integer in bytes.
|
||||
Size uint32
|
||||
Encoding IntEncoding
|
||||
// Offset is the starting bit offset. Currently always 0.
|
||||
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
|
||||
Offset uint32
|
||||
Bits byte
|
||||
}
|
||||
|
||||
var _ namedType = (*Int)(nil)
|
||||
|
||||
func (i *Int) String() string {
|
||||
var s strings.Builder
|
||||
|
||||
switch {
|
||||
case i.Encoding&Char != 0:
|
||||
s.WriteString("char")
|
||||
case i.Encoding&Bool != 0:
|
||||
s.WriteString("bool")
|
||||
default:
|
||||
if i.Encoding&Signed == 0 {
|
||||
s.WriteRune('u')
|
||||
}
|
||||
s.WriteString("int")
|
||||
fmt.Fprintf(&s, "%d", i.Size*8)
|
||||
}
|
||||
|
||||
fmt.Fprintf(&s, "#%d", i.TypeID)
|
||||
|
||||
if i.Bits > 0 {
|
||||
fmt.Fprintf(&s, "[bits=%d]", i.Bits)
|
||||
}
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (i *Int) size() uint32 { return i.Size }
|
||||
func (i *Int) walk(*typeDeque) {}
|
||||
func (i *Int) copy() Type {
|
||||
cpy := *i
|
||||
return &cpy
|
||||
}
|
||||
|
||||
func (i *Int) isBitfield() bool {
|
||||
return i.Offset > 0
|
||||
}
|
||||
|
||||
// Pointer is a pointer to another type.
|
||||
type Pointer struct {
|
||||
TypeID
|
||||
Target Type
|
||||
}
|
||||
|
||||
func (p *Pointer) String() string {
|
||||
return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
|
||||
}
|
||||
|
||||
func (p *Pointer) size() uint32 { return 8 }
|
||||
func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
|
||||
func (p *Pointer) copy() Type {
|
||||
cpy := *p
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Array is an array with a fixed number of elements.
|
||||
type Array struct {
|
||||
TypeID
|
||||
Type Type
|
||||
Nelems uint32
|
||||
}
|
||||
|
||||
func (arr *Array) String() string {
|
||||
return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
|
||||
}
|
||||
|
||||
func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
|
||||
func (arr *Array) copy() Type {
|
||||
cpy := *arr
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Struct is a compound type of consecutive members.
|
||||
type Struct struct {
|
||||
TypeID
|
||||
Name
|
||||
// The size of the struct including padding, in bytes
|
||||
Size uint32
|
||||
Members []Member
|
||||
}
|
||||
|
||||
func (s *Struct) String() string {
|
||||
return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
|
||||
}
|
||||
|
||||
func (s *Struct) size() uint32 { return s.Size }
|
||||
|
||||
func (s *Struct) walk(tdq *typeDeque) {
|
||||
for i := range s.Members {
|
||||
tdq.push(&s.Members[i].Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Struct) copy() Type {
|
||||
cpy := *s
|
||||
cpy.Members = copyMembers(s.Members)
|
||||
return &cpy
|
||||
}
|
||||
|
||||
func (s *Struct) members() []Member {
|
||||
return s.Members
|
||||
}
|
||||
|
||||
// Union is a compound type where members occupy the same memory.
|
||||
type Union struct {
|
||||
TypeID
|
||||
Name
|
||||
// The size of the union including padding, in bytes.
|
||||
Size uint32
|
||||
Members []Member
|
||||
}
|
||||
|
||||
func (u *Union) String() string {
|
||||
return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
|
||||
}
|
||||
|
||||
func (u *Union) size() uint32 { return u.Size }
|
||||
|
||||
func (u *Union) walk(tdq *typeDeque) {
|
||||
for i := range u.Members {
|
||||
tdq.push(&u.Members[i].Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Union) copy() Type {
|
||||
cpy := *u
|
||||
cpy.Members = copyMembers(u.Members)
|
||||
return &cpy
|
||||
}
|
||||
|
||||
func (u *Union) members() []Member {
|
||||
return u.Members
|
||||
}
|
||||
|
||||
func copyMembers(orig []Member) []Member {
|
||||
cpy := make([]Member, len(orig))
|
||||
copy(cpy, orig)
|
||||
return cpy
|
||||
}
|
||||
|
||||
type composite interface {
|
||||
members() []Member
|
||||
}
|
||||
|
||||
var (
|
||||
_ composite = (*Struct)(nil)
|
||||
_ composite = (*Union)(nil)
|
||||
)
|
||||
|
||||
// Member is part of a Struct or Union.
|
||||
//
|
||||
// It is not a valid Type.
|
||||
type Member struct {
|
||||
Name
|
||||
Type Type
|
||||
// Offset is the bit offset of this member
|
||||
Offset uint32
|
||||
BitfieldSize uint32
|
||||
}
|
||||
|
||||
// Enum lists possible values.
|
||||
type Enum struct {
|
||||
TypeID
|
||||
Name
|
||||
Values []EnumValue
|
||||
}
|
||||
|
||||
func (e *Enum) String() string {
|
||||
return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
|
||||
}
|
||||
|
||||
// EnumValue is part of an Enum
|
||||
//
|
||||
// Is is not a valid Type
|
||||
type EnumValue struct {
|
||||
Name
|
||||
Value int32
|
||||
}
|
||||
|
||||
func (e *Enum) size() uint32 { return 4 }
|
||||
func (e *Enum) walk(*typeDeque) {}
|
||||
func (e *Enum) copy() Type {
|
||||
cpy := *e
|
||||
cpy.Values = make([]EnumValue, len(e.Values))
|
||||
copy(cpy.Values, e.Values)
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// FwdKind is the type of forward declaration.
|
||||
type FwdKind int
|
||||
|
||||
// Valid types of forward declaration.
|
||||
const (
|
||||
FwdStruct FwdKind = iota
|
||||
FwdUnion
|
||||
)
|
||||
|
||||
func (fk FwdKind) String() string {
|
||||
switch fk {
|
||||
case FwdStruct:
|
||||
return "struct"
|
||||
case FwdUnion:
|
||||
return "union"
|
||||
default:
|
||||
return fmt.Sprintf("%T(%d)", fk, int(fk))
|
||||
}
|
||||
}
|
||||
|
||||
// Fwd is a forward declaration of a Type.
|
||||
type Fwd struct {
|
||||
TypeID
|
||||
Name
|
||||
Kind FwdKind
|
||||
}
|
||||
|
||||
func (f *Fwd) String() string {
|
||||
return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
|
||||
}
|
||||
|
||||
func (f *Fwd) walk(*typeDeque) {}
|
||||
func (f *Fwd) copy() Type {
|
||||
cpy := *f
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Typedef is an alias of a Type.
|
||||
type Typedef struct {
|
||||
TypeID
|
||||
Name
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (td *Typedef) String() string {
|
||||
return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
|
||||
}
|
||||
|
||||
func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
|
||||
func (td *Typedef) copy() Type {
|
||||
cpy := *td
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Volatile is a qualifier.
|
||||
type Volatile struct {
|
||||
TypeID
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (v *Volatile) String() string {
|
||||
return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
|
||||
}
|
||||
|
||||
func (v *Volatile) qualify() Type { return v.Type }
|
||||
func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
|
||||
func (v *Volatile) copy() Type {
|
||||
cpy := *v
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Const is a qualifier.
|
||||
type Const struct {
|
||||
TypeID
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (c *Const) String() string {
|
||||
return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
|
||||
}
|
||||
|
||||
func (c *Const) qualify() Type { return c.Type }
|
||||
func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
|
||||
func (c *Const) copy() Type {
|
||||
cpy := *c
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Restrict is a qualifier.
|
||||
type Restrict struct {
|
||||
TypeID
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (r *Restrict) String() string {
|
||||
return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
|
||||
}
|
||||
|
||||
func (r *Restrict) qualify() Type { return r.Type }
|
||||
func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
|
||||
func (r *Restrict) copy() Type {
|
||||
cpy := *r
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Func is a function definition.
|
||||
type Func struct {
|
||||
TypeID
|
||||
Name
|
||||
Type Type
|
||||
Linkage FuncLinkage
|
||||
}
|
||||
|
||||
func (f *Func) String() string {
|
||||
return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID())
|
||||
}
|
||||
|
||||
func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
|
||||
func (f *Func) copy() Type {
|
||||
cpy := *f
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// FuncProto is a function declaration.
|
||||
type FuncProto struct {
|
||||
TypeID
|
||||
Return Type
|
||||
Params []FuncParam
|
||||
}
|
||||
|
||||
func (fp *FuncProto) String() string {
|
||||
var s strings.Builder
|
||||
fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
|
||||
for _, param := range fp.Params {
|
||||
fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
|
||||
}
|
||||
fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (fp *FuncProto) walk(tdq *typeDeque) {
|
||||
tdq.push(&fp.Return)
|
||||
for i := range fp.Params {
|
||||
tdq.push(&fp.Params[i].Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (fp *FuncProto) copy() Type {
|
||||
cpy := *fp
|
||||
cpy.Params = make([]FuncParam, len(fp.Params))
|
||||
copy(cpy.Params, fp.Params)
|
||||
return &cpy
|
||||
}
|
||||
|
||||
type FuncParam struct {
|
||||
Name
|
||||
Type Type
|
||||
}
|
||||
|
||||
// Var is a global variable.
|
||||
type Var struct {
|
||||
TypeID
|
||||
Name
|
||||
Type Type
|
||||
Linkage VarLinkage
|
||||
}
|
||||
|
||||
func (v *Var) String() string {
|
||||
return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name)
|
||||
}
|
||||
|
||||
func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
|
||||
func (v *Var) copy() Type {
|
||||
cpy := *v
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Datasec is a global program section containing data.
|
||||
type Datasec struct {
|
||||
TypeID
|
||||
Name
|
||||
Size uint32
|
||||
Vars []VarSecinfo
|
||||
}
|
||||
|
||||
func (ds *Datasec) String() string {
|
||||
return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
|
||||
}
|
||||
|
||||
func (ds *Datasec) size() uint32 { return ds.Size }
|
||||
|
||||
func (ds *Datasec) walk(tdq *typeDeque) {
|
||||
for i := range ds.Vars {
|
||||
tdq.push(&ds.Vars[i].Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (ds *Datasec) copy() Type {
|
||||
cpy := *ds
|
||||
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
|
||||
copy(cpy.Vars, ds.Vars)
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// VarSecinfo describes variable in a Datasec
|
||||
//
|
||||
// It is not a valid Type.
|
||||
type VarSecinfo struct {
|
||||
Type Type
|
||||
Offset uint32
|
||||
Size uint32
|
||||
}
|
||||
|
||||
type sizer interface {
|
||||
size() uint32
|
||||
}
|
||||
|
||||
var (
|
||||
_ sizer = (*Int)(nil)
|
||||
_ sizer = (*Pointer)(nil)
|
||||
_ sizer = (*Struct)(nil)
|
||||
_ sizer = (*Union)(nil)
|
||||
_ sizer = (*Enum)(nil)
|
||||
_ sizer = (*Datasec)(nil)
|
||||
)
|
||||
|
||||
type qualifier interface {
|
||||
qualify() Type
|
||||
}
|
||||
|
||||
var (
|
||||
_ qualifier = (*Const)(nil)
|
||||
_ qualifier = (*Restrict)(nil)
|
||||
_ qualifier = (*Volatile)(nil)
|
||||
)
|
||||
|
||||
// Sizeof returns the size of a type in bytes.
|
||||
//
|
||||
// Returns an error if the size can't be computed.
|
||||
func Sizeof(typ Type) (int, error) {
|
||||
var (
|
||||
n = int64(1)
|
||||
elem int64
|
||||
)
|
||||
|
||||
for i := 0; i < maxTypeDepth; i++ {
|
||||
switch v := typ.(type) {
|
||||
case *Array:
|
||||
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
|
||||
return 0, fmt.Errorf("type %s: overflow", typ)
|
||||
}
|
||||
|
||||
// Arrays may be of zero length, which allows
|
||||
// n to be zero as well.
|
||||
n *= int64(v.Nelems)
|
||||
typ = v.Type
|
||||
continue
|
||||
|
||||
case sizer:
|
||||
elem = int64(v.size())
|
||||
|
||||
case *Typedef:
|
||||
typ = v.Type
|
||||
continue
|
||||
|
||||
case qualifier:
|
||||
typ = v.qualify()
|
||||
continue
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("unsized type %T", typ)
|
||||
}
|
||||
|
||||
if n > 0 && elem > math.MaxInt64/n {
|
||||
return 0, fmt.Errorf("type %s: overflow", typ)
|
||||
}
|
||||
|
||||
size := n * elem
|
||||
if int64(int(size)) != size {
|
||||
return 0, fmt.Errorf("type %s: overflow", typ)
|
||||
}
|
||||
|
||||
return int(size), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("type %s: exceeded type depth", typ)
|
||||
}
|
||||
|
||||
// copy a Type recursively.
|
||||
//
|
||||
// typ may form a cycle.
|
||||
//
|
||||
// Returns any errors from transform verbatim.
|
||||
func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) {
|
||||
var (
|
||||
copies = make(map[Type]Type)
|
||||
work typeDeque
|
||||
)
|
||||
|
||||
for t := &typ; t != nil; t = work.pop() {
|
||||
// *t is the identity of the type.
|
||||
if cpy := copies[*t]; cpy != nil {
|
||||
*t = cpy
|
||||
continue
|
||||
}
|
||||
|
||||
var cpy Type
|
||||
if transform != nil {
|
||||
tf, err := transform(*t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy %s: %w", typ, err)
|
||||
}
|
||||
cpy = tf.copy()
|
||||
} else {
|
||||
cpy = (*t).copy()
|
||||
}
|
||||
|
||||
copies[*t] = cpy
|
||||
*t = cpy
|
||||
|
||||
// Mark any nested types for copying.
|
||||
cpy.walk(&work)
|
||||
}
|
||||
|
||||
return typ, nil
|
||||
}
|
||||
|
||||
// typeDeque keeps track of pointers to types which still
|
||||
// need to be visited.
|
||||
type typeDeque struct {
|
||||
types []*Type
|
||||
read, write uint64
|
||||
mask uint64
|
||||
}
|
||||
|
||||
// push adds a type to the stack.
|
||||
func (dq *typeDeque) push(t *Type) {
|
||||
if dq.write-dq.read < uint64(len(dq.types)) {
|
||||
dq.types[dq.write&dq.mask] = t
|
||||
dq.write++
|
||||
return
|
||||
}
|
||||
|
||||
new := len(dq.types) * 2
|
||||
if new == 0 {
|
||||
new = 8
|
||||
}
|
||||
|
||||
types := make([]*Type, new)
|
||||
pivot := dq.read & dq.mask
|
||||
n := copy(types, dq.types[pivot:])
|
||||
n += copy(types[n:], dq.types[:pivot])
|
||||
types[n] = t
|
||||
|
||||
dq.types = types
|
||||
dq.mask = uint64(new) - 1
|
||||
dq.read, dq.write = 0, uint64(n+1)
|
||||
}
|
||||
|
||||
// shift returns the first element or null.
|
||||
func (dq *typeDeque) shift() *Type {
|
||||
if dq.read == dq.write {
|
||||
return nil
|
||||
}
|
||||
|
||||
index := dq.read & dq.mask
|
||||
t := dq.types[index]
|
||||
dq.types[index] = nil
|
||||
dq.read++
|
||||
return t
|
||||
}
|
||||
|
||||
// pop returns the last element or null.
|
||||
func (dq *typeDeque) pop() *Type {
|
||||
if dq.read == dq.write {
|
||||
return nil
|
||||
}
|
||||
|
||||
dq.write--
|
||||
index := dq.write & dq.mask
|
||||
t := dq.types[index]
|
||||
dq.types[index] = nil
|
||||
return t
|
||||
}
|
||||
|
||||
// all returns all elements.
|
||||
//
|
||||
// The deque is empty after calling this method.
|
||||
func (dq *typeDeque) all() []*Type {
|
||||
length := dq.write - dq.read
|
||||
types := make([]*Type, 0, length)
|
||||
for t := dq.shift(); t != nil; t = dq.shift() {
|
||||
types = append(types, t)
|
||||
}
|
||||
return types
|
||||
}
|
||||
|
||||
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
|
||||
// it into a graph of Types connected via pointers.
|
||||
//
|
||||
// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
|
||||
// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
|
||||
// the same name. A Type may form a cyclic graph by pointing at itself.
|
||||
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) {
|
||||
type fixupDef struct {
|
||||
id TypeID
|
||||
expectedKind btfKind
|
||||
typ *Type
|
||||
}
|
||||
|
||||
var fixups []fixupDef
|
||||
fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
|
||||
fixups = append(fixups, fixupDef{id, expectedKind, typ})
|
||||
}
|
||||
|
||||
convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
|
||||
// NB: The fixup below relies on pre-allocating this array to
|
||||
// work, since otherwise append might re-allocate members.
|
||||
members := make([]Member, 0, len(raw))
|
||||
for i, btfMember := range raw {
|
||||
name, err := rawStrings.LookupName(btfMember.NameOff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
|
||||
}
|
||||
m := Member{
|
||||
Name: name,
|
||||
Offset: btfMember.Offset,
|
||||
}
|
||||
if kindFlag {
|
||||
m.BitfieldSize = btfMember.Offset >> 24
|
||||
m.Offset &= 0xffffff
|
||||
}
|
||||
members = append(members, m)
|
||||
}
|
||||
for i := range members {
|
||||
fixup(raw[i].Type, kindUnknown, &members[i].Type)
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
types = make([]Type, 0, len(rawTypes))
|
||||
types = append(types, (*Void)(nil))
|
||||
namedTypes = make(map[string][]namedType)
|
||||
|
||||
for i, raw := range rawTypes {
|
||||
var (
|
||||
// Void is defined to always be type ID 0, and is thus
|
||||
// omitted from BTF.
|
||||
id = TypeID(i + 1)
|
||||
typ Type
|
||||
)
|
||||
|
||||
name, err := rawStrings.LookupName(raw.NameOff)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
|
||||
}
|
||||
|
||||
switch raw.Kind() {
|
||||
case kindInt:
|
||||
encoding, offset, bits := intEncoding(*raw.data.(*uint32))
|
||||
typ = &Int{id, name, raw.Size(), encoding, offset, bits}
|
||||
|
||||
case kindPointer:
|
||||
ptr := &Pointer{id, nil}
|
||||
fixup(raw.Type(), kindUnknown, &ptr.Target)
|
||||
typ = ptr
|
||||
|
||||
case kindArray:
|
||||
btfArr := raw.data.(*btfArray)
|
||||
|
||||
// IndexType is unused according to btf.rst.
|
||||
// Don't make it available right now.
|
||||
arr := &Array{id, nil, btfArr.Nelems}
|
||||
fixup(btfArr.Type, kindUnknown, &arr.Type)
|
||||
typ = arr
|
||||
|
||||
case kindStruct:
|
||||
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
|
||||
}
|
||||
typ = &Struct{id, name, raw.Size(), members}
|
||||
|
||||
case kindUnion:
|
||||
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
|
||||
}
|
||||
typ = &Union{id, name, raw.Size(), members}
|
||||
|
||||
case kindEnum:
|
||||
rawvals := raw.data.([]btfEnum)
|
||||
vals := make([]EnumValue, 0, len(rawvals))
|
||||
for i, btfVal := range rawvals {
|
||||
name, err := rawStrings.LookupName(btfVal.NameOff)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
|
||||
}
|
||||
vals = append(vals, EnumValue{
|
||||
Name: name,
|
||||
Value: btfVal.Val,
|
||||
})
|
||||
}
|
||||
typ = &Enum{id, name, vals}
|
||||
|
||||
case kindForward:
|
||||
if raw.KindFlag() {
|
||||
typ = &Fwd{id, name, FwdUnion}
|
||||
} else {
|
||||
typ = &Fwd{id, name, FwdStruct}
|
||||
}
|
||||
|
||||
case kindTypedef:
|
||||
typedef := &Typedef{id, name, nil}
|
||||
fixup(raw.Type(), kindUnknown, &typedef.Type)
|
||||
typ = typedef
|
||||
|
||||
case kindVolatile:
|
||||
volatile := &Volatile{id, nil}
|
||||
fixup(raw.Type(), kindUnknown, &volatile.Type)
|
||||
typ = volatile
|
||||
|
||||
case kindConst:
|
||||
cnst := &Const{id, nil}
|
||||
fixup(raw.Type(), kindUnknown, &cnst.Type)
|
||||
typ = cnst
|
||||
|
||||
case kindRestrict:
|
||||
restrict := &Restrict{id, nil}
|
||||
fixup(raw.Type(), kindUnknown, &restrict.Type)
|
||||
typ = restrict
|
||||
|
||||
case kindFunc:
|
||||
fn := &Func{id, name, nil, raw.Linkage()}
|
||||
fixup(raw.Type(), kindFuncProto, &fn.Type)
|
||||
typ = fn
|
||||
|
||||
case kindFuncProto:
|
||||
rawparams := raw.data.([]btfParam)
|
||||
params := make([]FuncParam, 0, len(rawparams))
|
||||
for i, param := range rawparams {
|
||||
name, err := rawStrings.LookupName(param.NameOff)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
|
||||
}
|
||||
params = append(params, FuncParam{
|
||||
Name: name,
|
||||
})
|
||||
}
|
||||
for i := range params {
|
||||
fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type)
|
||||
}
|
||||
|
||||
fp := &FuncProto{id, nil, params}
|
||||
fixup(raw.Type(), kindUnknown, &fp.Return)
|
||||
typ = fp
|
||||
|
||||
case kindVar:
|
||||
variable := raw.data.(*btfVariable)
|
||||
v := &Var{id, name, nil, VarLinkage(variable.Linkage)}
|
||||
fixup(raw.Type(), kindUnknown, &v.Type)
|
||||
typ = v
|
||||
|
||||
case kindDatasec:
|
||||
btfVars := raw.data.([]btfVarSecinfo)
|
||||
vars := make([]VarSecinfo, 0, len(btfVars))
|
||||
for _, btfVar := range btfVars {
|
||||
vars = append(vars, VarSecinfo{
|
||||
Offset: btfVar.Offset,
|
||||
Size: btfVar.Size,
|
||||
})
|
||||
}
|
||||
for i := range vars {
|
||||
fixup(btfVars[i].Type, kindVar, &vars[i].Type)
|
||||
}
|
||||
typ = &Datasec{id, name, raw.SizeType, vars}
|
||||
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
|
||||
}
|
||||
|
||||
types = append(types, typ)
|
||||
|
||||
if named, ok := typ.(namedType); ok {
|
||||
if name := essentialName(named.name()); name != "" {
|
||||
namedTypes[name] = append(namedTypes[name], named)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, fixup := range fixups {
|
||||
i := int(fixup.id)
|
||||
if i >= len(types) {
|
||||
return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
|
||||
}
|
||||
|
||||
// Default void (id 0) to unknown
|
||||
rawKind := kindUnknown
|
||||
if i > 0 {
|
||||
rawKind = rawTypes[i-1].Kind()
|
||||
}
|
||||
|
||||
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
|
||||
return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
|
||||
}
|
||||
|
||||
*fixup.typ = types[i]
|
||||
}
|
||||
|
||||
return types, namedTypes, nil
|
||||
}
|
||||
|
||||
// essentialName returns name without a ___ suffix.
|
||||
func essentialName(name string) string {
|
||||
lastIdx := strings.LastIndex(name, "___")
|
||||
if lastIdx > 0 {
|
||||
return name[:lastIdx]
|
||||
}
|
||||
return name
|
||||
}
|
||||
62
src/runtime/vendor/github.com/cilium/ebpf/internal/cpu.go
generated
vendored
Normal file
62
src/runtime/vendor/github.com/cilium/ebpf/internal/cpu.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var sysCPU struct {
|
||||
once sync.Once
|
||||
err error
|
||||
num int
|
||||
}
|
||||
|
||||
// PossibleCPUs returns the max number of CPUs a system may possibly have
|
||||
// Logical CPU numbers must be of the form 0-n
|
||||
func PossibleCPUs() (int, error) {
|
||||
sysCPU.once.Do(func() {
|
||||
sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
|
||||
})
|
||||
|
||||
return sysCPU.num, sysCPU.err
|
||||
}
|
||||
|
||||
func parseCPUsFromFile(path string) (int, error) {
|
||||
spec, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n, err := parseCPUs(string(spec))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("can't parse %s: %v", path, err)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// parseCPUs parses the number of cpus from a string produced
|
||||
// by bitmap_list_string() in the Linux kernel.
|
||||
// Multiple ranges are rejected, since they can't be unified
|
||||
// into a single number.
|
||||
// This is the format of /sys/devices/system/cpu/possible, it
|
||||
// is not suitable for /sys/devices/system/cpu/online, etc.
|
||||
func parseCPUs(spec string) (int, error) {
|
||||
if strings.Trim(spec, "\n") == "0" {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
var low, high int
|
||||
n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
|
||||
if n != 2 || err != nil {
|
||||
return 0, fmt.Errorf("invalid format: %s", spec)
|
||||
}
|
||||
if low != 0 {
|
||||
return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
|
||||
}
|
||||
|
||||
// cpus is 0 indexed
|
||||
return high + 1, nil
|
||||
}
|
||||
68
src/runtime/vendor/github.com/cilium/ebpf/internal/elf.go
generated
vendored
Normal file
68
src/runtime/vendor/github.com/cilium/ebpf/internal/elf.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type SafeELFFile struct {
|
||||
*elf.File
|
||||
}
|
||||
|
||||
// NewSafeELFFile reads an ELF safely.
|
||||
//
|
||||
// Any panic during parsing is turned into an error. This is necessary since
|
||||
// there are a bunch of unfixed bugs in debug/elf.
|
||||
//
|
||||
// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
|
||||
func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
safe = nil
|
||||
err = fmt.Errorf("reading ELF file panicked: %s", r)
|
||||
}()
|
||||
|
||||
file, err := elf.NewFile(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SafeELFFile{file}, nil
|
||||
}
|
||||
|
||||
// Symbols is the safe version of elf.File.Symbols.
|
||||
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
syms = nil
|
||||
err = fmt.Errorf("reading ELF symbols panicked: %s", r)
|
||||
}()
|
||||
|
||||
syms, err = se.File.Symbols()
|
||||
return
|
||||
}
|
||||
|
||||
// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
|
||||
func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
syms = nil
|
||||
err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
|
||||
}()
|
||||
|
||||
syms, err = se.File.DynamicSymbols()
|
||||
return
|
||||
}
|
||||
29
src/runtime/vendor/github.com/cilium/ebpf/internal/endian.go
generated
vendored
Normal file
29
src/runtime/vendor/github.com/cilium/ebpf/internal/endian.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
|
||||
// depending on the host's endianness.
|
||||
var NativeEndian binary.ByteOrder
|
||||
|
||||
// Clang is set to either "el" or "eb" depending on the host's endianness.
|
||||
var ClangEndian string
|
||||
|
||||
func init() {
|
||||
if isBigEndian() {
|
||||
NativeEndian = binary.BigEndian
|
||||
ClangEndian = "eb"
|
||||
} else {
|
||||
NativeEndian = binary.LittleEndian
|
||||
ClangEndian = "el"
|
||||
}
|
||||
}
|
||||
|
||||
func isBigEndian() (ret bool) {
|
||||
i := int(0x1)
|
||||
bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i))
|
||||
return bs[0] == 0
|
||||
}
|
||||
51
src/runtime/vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
Normal file
51
src/runtime/vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// ErrorWithLog returns an error that includes logs from the
|
||||
// kernel verifier.
|
||||
//
|
||||
// logErr should be the error returned by the syscall that generated
|
||||
// the log. It is used to check for truncation of the output.
|
||||
func ErrorWithLog(err error, log []byte, logErr error) error {
|
||||
logStr := strings.Trim(CString(log), "\t\r\n ")
|
||||
if errors.Is(logErr, unix.ENOSPC) {
|
||||
logStr += " (truncated...)"
|
||||
}
|
||||
|
||||
return &VerifierError{err, logStr}
|
||||
}
|
||||
|
||||
// VerifierError includes information from the eBPF verifier.
|
||||
type VerifierError struct {
|
||||
cause error
|
||||
log string
|
||||
}
|
||||
|
||||
func (le *VerifierError) Unwrap() error {
|
||||
return le.cause
|
||||
}
|
||||
|
||||
func (le *VerifierError) Error() string {
|
||||
if le.log == "" {
|
||||
return le.cause.Error()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s: %s", le.cause, le.log)
|
||||
}
|
||||
|
||||
// CString turns a NUL / zero terminated byte buffer into a string.
|
||||
func CString(in []byte) string {
|
||||
inLen := bytes.IndexByte(in, 0)
|
||||
if inLen == -1 {
|
||||
return ""
|
||||
}
|
||||
return string(in[:inLen])
|
||||
}
|
||||
69
src/runtime/vendor/github.com/cilium/ebpf/internal/fd.go
generated
vendored
Normal file
69
src/runtime/vendor/github.com/cilium/ebpf/internal/fd.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
var ErrClosedFd = errors.New("use of closed file descriptor")
|
||||
|
||||
type FD struct {
|
||||
raw int64
|
||||
}
|
||||
|
||||
func NewFD(value uint32) *FD {
|
||||
fd := &FD{int64(value)}
|
||||
runtime.SetFinalizer(fd, (*FD).Close)
|
||||
return fd
|
||||
}
|
||||
|
||||
func (fd *FD) String() string {
|
||||
return strconv.FormatInt(fd.raw, 10)
|
||||
}
|
||||
|
||||
func (fd *FD) Value() (uint32, error) {
|
||||
if fd.raw < 0 {
|
||||
return 0, ErrClosedFd
|
||||
}
|
||||
|
||||
return uint32(fd.raw), nil
|
||||
}
|
||||
|
||||
func (fd *FD) Close() error {
|
||||
if fd.raw < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
value := int(fd.raw)
|
||||
fd.raw = -1
|
||||
|
||||
fd.Forget()
|
||||
return unix.Close(value)
|
||||
}
|
||||
|
||||
func (fd *FD) Forget() {
|
||||
runtime.SetFinalizer(fd, nil)
|
||||
}
|
||||
|
||||
func (fd *FD) Dup() (*FD, error) {
|
||||
if fd.raw < 0 {
|
||||
return nil, ErrClosedFd
|
||||
}
|
||||
|
||||
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't dup fd: %v", err)
|
||||
}
|
||||
|
||||
return NewFD(uint32(dup)), nil
|
||||
}
|
||||
|
||||
func (fd *FD) File(name string) *os.File {
|
||||
fd.Forget()
|
||||
return os.NewFile(uintptr(fd.raw), name)
|
||||
}
|
||||
100
src/runtime/vendor/github.com/cilium/ebpf/internal/feature.go
generated
vendored
Normal file
100
src/runtime/vendor/github.com/cilium/ebpf/internal/feature.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrNotSupported indicates that a feature is not supported by the current kernel.
|
||||
var ErrNotSupported = errors.New("not supported")
|
||||
|
||||
// UnsupportedFeatureError is returned by FeatureTest() functions.
|
||||
type UnsupportedFeatureError struct {
|
||||
// The minimum Linux mainline version required for this feature.
|
||||
// Used for the error string, and for sanity checking during testing.
|
||||
MinimumVersion Version
|
||||
|
||||
// The name of the feature that isn't supported.
|
||||
Name string
|
||||
}
|
||||
|
||||
func (ufe *UnsupportedFeatureError) Error() string {
|
||||
if ufe.MinimumVersion.Unspecified() {
|
||||
return fmt.Sprintf("%s not supported", ufe.Name)
|
||||
}
|
||||
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
|
||||
}
|
||||
|
||||
// Is indicates that UnsupportedFeatureError is ErrNotSupported.
|
||||
func (ufe *UnsupportedFeatureError) Is(target error) bool {
|
||||
return target == ErrNotSupported
|
||||
}
|
||||
|
||||
type featureTest struct {
|
||||
sync.RWMutex
|
||||
successful bool
|
||||
result error
|
||||
}
|
||||
|
||||
// FeatureTestFn is used to determine whether the kernel supports
|
||||
// a certain feature.
|
||||
//
|
||||
// The return values have the following semantics:
|
||||
//
|
||||
// err == ErrNotSupported: the feature is not available
|
||||
// err == nil: the feature is available
|
||||
// err != nil: the test couldn't be executed
|
||||
type FeatureTestFn func() error
|
||||
|
||||
// FeatureTest wraps a function so that it is run at most once.
|
||||
//
|
||||
// name should identify the tested feature, while version must be in the
|
||||
// form Major.Minor[.Patch].
|
||||
//
|
||||
// Returns an error wrapping ErrNotSupported if the feature is not supported.
|
||||
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
|
||||
v, err := NewVersion(version)
|
||||
if err != nil {
|
||||
return func() error { return err }
|
||||
}
|
||||
|
||||
ft := new(featureTest)
|
||||
return func() error {
|
||||
ft.RLock()
|
||||
if ft.successful {
|
||||
defer ft.RUnlock()
|
||||
return ft.result
|
||||
}
|
||||
ft.RUnlock()
|
||||
ft.Lock()
|
||||
defer ft.Unlock()
|
||||
// check one more time on the off
|
||||
// chance that two go routines
|
||||
// were able to call into the write
|
||||
// lock
|
||||
if ft.successful {
|
||||
return ft.result
|
||||
}
|
||||
err := fn()
|
||||
switch {
|
||||
case errors.Is(err, ErrNotSupported):
|
||||
ft.result = &UnsupportedFeatureError{
|
||||
MinimumVersion: v,
|
||||
Name: name,
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case err == nil:
|
||||
ft.successful = true
|
||||
|
||||
default:
|
||||
// We couldn't execute the feature test to a point
|
||||
// where it could make a determination.
|
||||
// Don't cache the result, just return it.
|
||||
return fmt.Errorf("detect support for %s: %w", name, err)
|
||||
}
|
||||
|
||||
return ft.result
|
||||
}
|
||||
}
|
||||
16
src/runtime/vendor/github.com/cilium/ebpf/internal/io.go
generated
vendored
Normal file
16
src/runtime/vendor/github.com/cilium/ebpf/internal/io.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package internal
|
||||
|
||||
import "errors"
|
||||
|
||||
// DiscardZeroes makes sure that all written bytes are zero
|
||||
// before discarding them.
|
||||
type DiscardZeroes struct{}
|
||||
|
||||
func (DiscardZeroes) Write(p []byte) (int, error) {
|
||||
for _, b := range p {
|
||||
if b != 0 {
|
||||
return 0, errors.New("encountered non-zero byte")
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
44
src/runtime/vendor/github.com/cilium/ebpf/internal/pinning.go
generated
vendored
Normal file
44
src/runtime/vendor/github.com/cilium/ebpf/internal/pinning.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
func Pin(currentPath, newPath string, fd *FD) error {
|
||||
if newPath == "" {
|
||||
return errors.New("given pinning path cannot be empty")
|
||||
}
|
||||
if currentPath == newPath {
|
||||
return nil
|
||||
}
|
||||
if currentPath == "" {
|
||||
return BPFObjPin(newPath, fd)
|
||||
}
|
||||
var err error
|
||||
// Renameat2 is used instead of os.Rename to disallow the new path replacing
|
||||
// an existing path.
|
||||
if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil {
|
||||
// Object is now moved to the new pinning path.
|
||||
return nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
|
||||
}
|
||||
// Internal state not in sync with the file system so let's fix it.
|
||||
return BPFObjPin(newPath, fd)
|
||||
}
|
||||
|
||||
func Unpin(pinnedPath string) error {
|
||||
if pinnedPath == "" {
|
||||
return nil
|
||||
}
|
||||
err := os.Remove(pinnedPath)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
31
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr.go
generated
vendored
Normal file
31
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// NewPointer creates a 64-bit pointer from an unsafe Pointer.
|
||||
func NewPointer(ptr unsafe.Pointer) Pointer {
|
||||
return Pointer{ptr: ptr}
|
||||
}
|
||||
|
||||
// NewSlicePointer creates a 64-bit pointer from a byte slice.
|
||||
func NewSlicePointer(buf []byte) Pointer {
|
||||
if len(buf) == 0 {
|
||||
return Pointer{}
|
||||
}
|
||||
|
||||
return Pointer{ptr: unsafe.Pointer(&buf[0])}
|
||||
}
|
||||
|
||||
// NewStringPointer creates a 64-bit pointer from a string.
|
||||
func NewStringPointer(str string) Pointer {
|
||||
p, err := unix.BytePtrFromString(str)
|
||||
if err != nil {
|
||||
return Pointer{}
|
||||
}
|
||||
|
||||
return Pointer{ptr: unsafe.Pointer(p)}
|
||||
}
|
||||
14
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go
generated
vendored
Normal file
14
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build armbe mips mips64p32
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer wraps an unsafe.Pointer to be 64bit to
|
||||
// conform to the syscall specification.
|
||||
type Pointer struct {
|
||||
pad uint32
|
||||
ptr unsafe.Pointer
|
||||
}
|
||||
14
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go
generated
vendored
Normal file
14
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build 386 amd64p32 arm mipsle mips64p32le
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer wraps an unsafe.Pointer to be 64bit to
|
||||
// conform to the syscall specification.
|
||||
type Pointer struct {
|
||||
ptr unsafe.Pointer
|
||||
pad uint32
|
||||
}
|
||||
14
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr_64.go
generated
vendored
Normal file
14
src/runtime/vendor/github.com/cilium/ebpf/internal/ptr_64.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le
|
||||
// +build !armbe,!mips,!mips64p32
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer wraps an unsafe.Pointer to be 64bit to
|
||||
// conform to the syscall specification.
|
||||
type Pointer struct {
|
||||
ptr unsafe.Pointer
|
||||
}
|
||||
245
src/runtime/vendor/github.com/cilium/ebpf/internal/syscall.go
generated
vendored
Normal file
245
src/runtime/vendor/github.com/cilium/ebpf/internal/syscall.go
generated
vendored
Normal file
@@ -0,0 +1,245 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
//go:generate stringer -output syscall_string.go -type=BPFCmd
|
||||
|
||||
// BPFCmd identifies a subcommand of the bpf syscall.
|
||||
type BPFCmd int
|
||||
|
||||
// Well known BPF commands.
|
||||
const (
|
||||
BPF_MAP_CREATE BPFCmd = iota
|
||||
BPF_MAP_LOOKUP_ELEM
|
||||
BPF_MAP_UPDATE_ELEM
|
||||
BPF_MAP_DELETE_ELEM
|
||||
BPF_MAP_GET_NEXT_KEY
|
||||
BPF_PROG_LOAD
|
||||
BPF_OBJ_PIN
|
||||
BPF_OBJ_GET
|
||||
BPF_PROG_ATTACH
|
||||
BPF_PROG_DETACH
|
||||
BPF_PROG_TEST_RUN
|
||||
BPF_PROG_GET_NEXT_ID
|
||||
BPF_MAP_GET_NEXT_ID
|
||||
BPF_PROG_GET_FD_BY_ID
|
||||
BPF_MAP_GET_FD_BY_ID
|
||||
BPF_OBJ_GET_INFO_BY_FD
|
||||
BPF_PROG_QUERY
|
||||
BPF_RAW_TRACEPOINT_OPEN
|
||||
BPF_BTF_LOAD
|
||||
BPF_BTF_GET_FD_BY_ID
|
||||
BPF_TASK_FD_QUERY
|
||||
BPF_MAP_LOOKUP_AND_DELETE_ELEM
|
||||
BPF_MAP_FREEZE
|
||||
BPF_BTF_GET_NEXT_ID
|
||||
BPF_MAP_LOOKUP_BATCH
|
||||
BPF_MAP_LOOKUP_AND_DELETE_BATCH
|
||||
BPF_MAP_UPDATE_BATCH
|
||||
BPF_MAP_DELETE_BATCH
|
||||
BPF_LINK_CREATE
|
||||
BPF_LINK_UPDATE
|
||||
BPF_LINK_GET_FD_BY_ID
|
||||
BPF_LINK_GET_NEXT_ID
|
||||
BPF_ENABLE_STATS
|
||||
BPF_ITER_CREATE
|
||||
)
|
||||
|
||||
// BPF wraps SYS_BPF.
|
||||
//
|
||||
// Any pointers contained in attr must use the Pointer type from this package.
|
||||
func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
||||
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
|
||||
runtime.KeepAlive(attr)
|
||||
|
||||
var err error
|
||||
if errNo != 0 {
|
||||
err = wrappedErrno{errNo}
|
||||
}
|
||||
|
||||
return r1, err
|
||||
}
|
||||
|
||||
type BPFProgAttachAttr struct {
|
||||
TargetFd uint32
|
||||
AttachBpfFd uint32
|
||||
AttachType uint32
|
||||
AttachFlags uint32
|
||||
ReplaceBpfFd uint32
|
||||
}
|
||||
|
||||
func BPFProgAttach(attr *BPFProgAttachAttr) error {
|
||||
_, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
return err
|
||||
}
|
||||
|
||||
type BPFProgDetachAttr struct {
|
||||
TargetFd uint32
|
||||
AttachBpfFd uint32
|
||||
AttachType uint32
|
||||
}
|
||||
|
||||
func BPFProgDetach(attr *BPFProgDetachAttr) error {
|
||||
_, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
return err
|
||||
}
|
||||
|
||||
type BPFEnableStatsAttr struct {
|
||||
StatsType uint32
|
||||
}
|
||||
|
||||
func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) {
|
||||
ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("enable stats: %w", err)
|
||||
}
|
||||
return NewFD(uint32(ptr)), nil
|
||||
|
||||
}
|
||||
|
||||
type bpfObjAttr struct {
|
||||
fileName Pointer
|
||||
fd uint32
|
||||
fileFlags uint32
|
||||
}
|
||||
|
||||
const bpfFSType = 0xcafe4a11
|
||||
|
||||
// BPFObjPin wraps BPF_OBJ_PIN.
|
||||
func BPFObjPin(fileName string, fd *FD) error {
|
||||
dirName := filepath.Dir(fileName)
|
||||
var statfs unix.Statfs_t
|
||||
if err := unix.Statfs(dirName, &statfs); err != nil {
|
||||
return err
|
||||
}
|
||||
if uint64(statfs.Type) != bpfFSType {
|
||||
return fmt.Errorf("%s is not on a bpf filesystem", fileName)
|
||||
}
|
||||
|
||||
value, err := fd.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attr := bpfObjAttr{
|
||||
fileName: NewStringPointer(fileName),
|
||||
fd: value,
|
||||
}
|
||||
_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("pin object %s: %w", fileName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BPFObjGet wraps BPF_OBJ_GET.
|
||||
func BPFObjGet(fileName string, flags uint32) (*FD, error) {
|
||||
attr := bpfObjAttr{
|
||||
fileName: NewStringPointer(fileName),
|
||||
fileFlags: flags,
|
||||
}
|
||||
ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object %s: %w", fileName, err)
|
||||
}
|
||||
return NewFD(uint32(ptr)), nil
|
||||
}
|
||||
|
||||
type bpfObjGetInfoByFDAttr struct {
|
||||
fd uint32
|
||||
infoLen uint32
|
||||
info Pointer
|
||||
}
|
||||
|
||||
// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD.
|
||||
//
|
||||
// Available from 4.13.
|
||||
func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
|
||||
value, err := fd.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attr := bpfObjGetInfoByFDAttr{
|
||||
fd: value,
|
||||
infoLen: uint32(size),
|
||||
info: NewPointer(info),
|
||||
}
|
||||
_, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("fd %v: %w", fd, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BPFObjName is a null-terminated string made up of
|
||||
// 'A-Za-z0-9_' characters.
|
||||
type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte
|
||||
|
||||
// NewBPFObjName truncates the result if it is too long.
|
||||
func NewBPFObjName(name string) BPFObjName {
|
||||
var result BPFObjName
|
||||
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
|
||||
return result
|
||||
}
|
||||
|
||||
type BPFMapCreateAttr struct {
|
||||
MapType uint32
|
||||
KeySize uint32
|
||||
ValueSize uint32
|
||||
MaxEntries uint32
|
||||
Flags uint32
|
||||
InnerMapFd uint32 // since 4.12 56f668dfe00d
|
||||
NumaNode uint32 // since 4.14 96eabe7a40aa
|
||||
MapName BPFObjName // since 4.15 ad5b177bd73f
|
||||
MapIfIndex uint32
|
||||
BTFFd uint32
|
||||
BTFKeyTypeID uint32
|
||||
BTFValueTypeID uint32
|
||||
}
|
||||
|
||||
func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) {
|
||||
fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewFD(uint32(fd)), nil
|
||||
}
|
||||
|
||||
// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
|
||||
// syscall.E* or unix.E* constants.
|
||||
//
|
||||
// You should never export an error of this type.
|
||||
type wrappedErrno struct {
|
||||
syscall.Errno
|
||||
}
|
||||
|
||||
func (we wrappedErrno) Unwrap() error {
|
||||
return we.Errno
|
||||
}
|
||||
|
||||
type syscallError struct {
|
||||
error
|
||||
errno syscall.Errno
|
||||
}
|
||||
|
||||
func SyscallError(err error, errno syscall.Errno) error {
|
||||
return &syscallError{err, errno}
|
||||
}
|
||||
|
||||
func (se *syscallError) Is(target error) bool {
|
||||
return target == se.error
|
||||
}
|
||||
|
||||
func (se *syscallError) Unwrap() error {
|
||||
return se.errno
|
||||
}
|
||||
56
src/runtime/vendor/github.com/cilium/ebpf/internal/syscall_string.go
generated
vendored
Normal file
56
src/runtime/vendor/github.com/cilium/ebpf/internal/syscall_string.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
|
||||
|
||||
package internal
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[BPF_MAP_CREATE-0]
|
||||
_ = x[BPF_MAP_LOOKUP_ELEM-1]
|
||||
_ = x[BPF_MAP_UPDATE_ELEM-2]
|
||||
_ = x[BPF_MAP_DELETE_ELEM-3]
|
||||
_ = x[BPF_MAP_GET_NEXT_KEY-4]
|
||||
_ = x[BPF_PROG_LOAD-5]
|
||||
_ = x[BPF_OBJ_PIN-6]
|
||||
_ = x[BPF_OBJ_GET-7]
|
||||
_ = x[BPF_PROG_ATTACH-8]
|
||||
_ = x[BPF_PROG_DETACH-9]
|
||||
_ = x[BPF_PROG_TEST_RUN-10]
|
||||
_ = x[BPF_PROG_GET_NEXT_ID-11]
|
||||
_ = x[BPF_MAP_GET_NEXT_ID-12]
|
||||
_ = x[BPF_PROG_GET_FD_BY_ID-13]
|
||||
_ = x[BPF_MAP_GET_FD_BY_ID-14]
|
||||
_ = x[BPF_OBJ_GET_INFO_BY_FD-15]
|
||||
_ = x[BPF_PROG_QUERY-16]
|
||||
_ = x[BPF_RAW_TRACEPOINT_OPEN-17]
|
||||
_ = x[BPF_BTF_LOAD-18]
|
||||
_ = x[BPF_BTF_GET_FD_BY_ID-19]
|
||||
_ = x[BPF_TASK_FD_QUERY-20]
|
||||
_ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
|
||||
_ = x[BPF_MAP_FREEZE-22]
|
||||
_ = x[BPF_BTF_GET_NEXT_ID-23]
|
||||
_ = x[BPF_MAP_LOOKUP_BATCH-24]
|
||||
_ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
|
||||
_ = x[BPF_MAP_UPDATE_BATCH-26]
|
||||
_ = x[BPF_MAP_DELETE_BATCH-27]
|
||||
_ = x[BPF_LINK_CREATE-28]
|
||||
_ = x[BPF_LINK_UPDATE-29]
|
||||
_ = x[BPF_LINK_GET_FD_BY_ID-30]
|
||||
_ = x[BPF_LINK_GET_NEXT_ID-31]
|
||||
_ = x[BPF_ENABLE_STATS-32]
|
||||
_ = x[BPF_ITER_CREATE-33]
|
||||
}
|
||||
|
||||
const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
|
||||
|
||||
var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
|
||||
|
||||
func (i BPFCmd) String() string {
|
||||
if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
|
||||
return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
|
||||
}
|
||||
204
src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
Normal file
204
src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// +build linux
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"syscall"
|
||||
|
||||
linux "golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
ENOENT = linux.ENOENT
|
||||
EEXIST = linux.EEXIST
|
||||
EAGAIN = linux.EAGAIN
|
||||
ENOSPC = linux.ENOSPC
|
||||
EINVAL = linux.EINVAL
|
||||
EPOLLIN = linux.EPOLLIN
|
||||
EINTR = linux.EINTR
|
||||
EPERM = linux.EPERM
|
||||
ESRCH = linux.ESRCH
|
||||
ENODEV = linux.ENODEV
|
||||
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
|
||||
ENOTSUPP = syscall.Errno(0x20c)
|
||||
|
||||
EBADF = linux.EBADF
|
||||
BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
|
||||
BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
|
||||
BPF_F_RDONLY = linux.BPF_F_RDONLY
|
||||
BPF_F_WRONLY = linux.BPF_F_WRONLY
|
||||
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
|
||||
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
|
||||
BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
|
||||
BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
|
||||
BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
|
||||
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
|
||||
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
|
||||
SYS_BPF = linux.SYS_BPF
|
||||
F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
|
||||
EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
|
||||
EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
|
||||
O_CLOEXEC = linux.O_CLOEXEC
|
||||
O_NONBLOCK = linux.O_NONBLOCK
|
||||
PROT_READ = linux.PROT_READ
|
||||
PROT_WRITE = linux.PROT_WRITE
|
||||
MAP_SHARED = linux.MAP_SHARED
|
||||
PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
|
||||
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
|
||||
PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
|
||||
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
|
||||
PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
|
||||
PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
|
||||
PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
|
||||
PerfBitWatermark = linux.PerfBitWatermark
|
||||
PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
|
||||
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
|
||||
RLIM_INFINITY = linux.RLIM_INFINITY
|
||||
RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
|
||||
BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
|
||||
PERF_RECORD_LOST = linux.PERF_RECORD_LOST
|
||||
PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
|
||||
AT_FDCWD = linux.AT_FDCWD
|
||||
RENAME_NOREPLACE = linux.RENAME_NOREPLACE
|
||||
)
|
||||
|
||||
// Statfs_t is a wrapper
|
||||
type Statfs_t = linux.Statfs_t
|
||||
|
||||
// Rlimit is a wrapper
|
||||
type Rlimit = linux.Rlimit
|
||||
|
||||
// Setrlimit is a wrapper
|
||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||
return linux.Setrlimit(resource, rlim)
|
||||
}
|
||||
|
||||
// Syscall is a wrapper
|
||||
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
return linux.Syscall(trap, a1, a2, a3)
|
||||
}
|
||||
|
||||
// FcntlInt is a wrapper
|
||||
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
|
||||
return linux.FcntlInt(fd, cmd, arg)
|
||||
}
|
||||
|
||||
// IoctlSetInt is a wrapper
|
||||
func IoctlSetInt(fd int, req uint, value int) error {
|
||||
return linux.IoctlSetInt(fd, req, value)
|
||||
}
|
||||
|
||||
// Statfs is a wrapper
|
||||
func Statfs(path string, buf *Statfs_t) (err error) {
|
||||
return linux.Statfs(path, buf)
|
||||
}
|
||||
|
||||
// Close is a wrapper
|
||||
func Close(fd int) (err error) {
|
||||
return linux.Close(fd)
|
||||
}
|
||||
|
||||
// EpollEvent is a wrapper
|
||||
type EpollEvent = linux.EpollEvent
|
||||
|
||||
// EpollWait is a wrapper
|
||||
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
|
||||
return linux.EpollWait(epfd, events, msec)
|
||||
}
|
||||
|
||||
// EpollCtl is a wrapper
|
||||
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
|
||||
return linux.EpollCtl(epfd, op, fd, event)
|
||||
}
|
||||
|
||||
// Eventfd is a wrapper
|
||||
func Eventfd(initval uint, flags int) (fd int, err error) {
|
||||
return linux.Eventfd(initval, flags)
|
||||
}
|
||||
|
||||
// Write is a wrapper
|
||||
func Write(fd int, p []byte) (n int, err error) {
|
||||
return linux.Write(fd, p)
|
||||
}
|
||||
|
||||
// EpollCreate1 is a wrapper
|
||||
func EpollCreate1(flag int) (fd int, err error) {
|
||||
return linux.EpollCreate1(flag)
|
||||
}
|
||||
|
||||
// PerfEventMmapPage is a wrapper
|
||||
type PerfEventMmapPage linux.PerfEventMmapPage
|
||||
|
||||
// SetNonblock is a wrapper
|
||||
func SetNonblock(fd int, nonblocking bool) (err error) {
|
||||
return linux.SetNonblock(fd, nonblocking)
|
||||
}
|
||||
|
||||
// Mmap is a wrapper
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return linux.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
// Munmap is a wrapper
|
||||
func Munmap(b []byte) (err error) {
|
||||
return linux.Munmap(b)
|
||||
}
|
||||
|
||||
// PerfEventAttr is a wrapper
|
||||
type PerfEventAttr = linux.PerfEventAttr
|
||||
|
||||
// PerfEventOpen is a wrapper
|
||||
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
|
||||
return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
|
||||
}
|
||||
|
||||
// Utsname is a wrapper
|
||||
type Utsname = linux.Utsname
|
||||
|
||||
// Uname is a wrapper
|
||||
func Uname(buf *Utsname) (err error) {
|
||||
return linux.Uname(buf)
|
||||
}
|
||||
|
||||
// Getpid is a wrapper
|
||||
func Getpid() int {
|
||||
return linux.Getpid()
|
||||
}
|
||||
|
||||
// Gettid is a wrapper
|
||||
func Gettid() int {
|
||||
return linux.Gettid()
|
||||
}
|
||||
|
||||
// Tgkill is a wrapper
|
||||
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
|
||||
return linux.Tgkill(tgid, tid, sig)
|
||||
}
|
||||
|
||||
// BytePtrFromString is a wrapper
|
||||
func BytePtrFromString(s string) (*byte, error) {
|
||||
return linux.BytePtrFromString(s)
|
||||
}
|
||||
|
||||
// ByteSliceToString is a wrapper
|
||||
func ByteSliceToString(s []byte) string {
|
||||
return linux.ByteSliceToString(s)
|
||||
}
|
||||
|
||||
// Renameat2 is a wrapper
|
||||
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
|
||||
return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags)
|
||||
}
|
||||
|
||||
func KernelRelease() (string, error) {
|
||||
var uname Utsname
|
||||
err := Uname(&uname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
end := bytes.IndexByte(uname.Release[:], 0)
|
||||
release := string(uname.Release[:end])
|
||||
return release, nil
|
||||
}
|
||||
263
src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
Normal file
263
src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// +build !linux
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
const (
|
||||
ENOENT = syscall.ENOENT
|
||||
EEXIST = syscall.EEXIST
|
||||
EAGAIN = syscall.EAGAIN
|
||||
ENOSPC = syscall.ENOSPC
|
||||
EINVAL = syscall.EINVAL
|
||||
EINTR = syscall.EINTR
|
||||
EPERM = syscall.EPERM
|
||||
ESRCH = syscall.ESRCH
|
||||
ENODEV = syscall.ENODEV
|
||||
EBADF = syscall.Errno(0)
|
||||
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
|
||||
ENOTSUPP = syscall.Errno(0x20c)
|
||||
|
||||
BPF_F_NO_PREALLOC = 0
|
||||
BPF_F_NUMA_NODE = 0
|
||||
BPF_F_RDONLY = 0
|
||||
BPF_F_WRONLY = 0
|
||||
BPF_F_RDONLY_PROG = 0
|
||||
BPF_F_WRONLY_PROG = 0
|
||||
BPF_F_SLEEPABLE = 0
|
||||
BPF_F_MMAPABLE = 0
|
||||
BPF_F_INNER_MAP = 0
|
||||
BPF_OBJ_NAME_LEN = 0x10
|
||||
BPF_TAG_SIZE = 0x8
|
||||
SYS_BPF = 321
|
||||
F_DUPFD_CLOEXEC = 0x406
|
||||
EPOLLIN = 0x1
|
||||
EPOLL_CTL_ADD = 0x1
|
||||
EPOLL_CLOEXEC = 0x80000
|
||||
O_CLOEXEC = 0x80000
|
||||
O_NONBLOCK = 0x800
|
||||
PROT_READ = 0x1
|
||||
PROT_WRITE = 0x2
|
||||
MAP_SHARED = 0x1
|
||||
PERF_ATTR_SIZE_VER1 = 0
|
||||
PERF_TYPE_SOFTWARE = 0x1
|
||||
PERF_TYPE_TRACEPOINT = 0
|
||||
PERF_COUNT_SW_BPF_OUTPUT = 0xa
|
||||
PERF_EVENT_IOC_DISABLE = 0
|
||||
PERF_EVENT_IOC_ENABLE = 0
|
||||
PERF_EVENT_IOC_SET_BPF = 0
|
||||
PerfBitWatermark = 0x4000
|
||||
PERF_SAMPLE_RAW = 0x400
|
||||
PERF_FLAG_FD_CLOEXEC = 0x8
|
||||
RLIM_INFINITY = 0x7fffffffffffffff
|
||||
RLIMIT_MEMLOCK = 8
|
||||
BPF_STATS_RUN_TIME = 0
|
||||
PERF_RECORD_LOST = 2
|
||||
PERF_RECORD_SAMPLE = 9
|
||||
AT_FDCWD = -0x2
|
||||
RENAME_NOREPLACE = 0x1
|
||||
)
|
||||
|
||||
// Statfs_t is a wrapper
|
||||
type Statfs_t struct {
|
||||
Type int64
|
||||
Bsize int64
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Fsid [2]int32
|
||||
Namelen int64
|
||||
Frsize int64
|
||||
Flags int64
|
||||
Spare [4]int64
|
||||
}
|
||||
|
||||
// Rlimit is a wrapper
|
||||
type Rlimit struct {
|
||||
Cur uint64
|
||||
Max uint64
|
||||
}
|
||||
|
||||
// Setrlimit is a wrapper
|
||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// Syscall is a wrapper
|
||||
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
return 0, 0, syscall.Errno(1)
|
||||
}
|
||||
|
||||
// FcntlInt is a wrapper
|
||||
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
|
||||
return -1, errNonLinux
|
||||
}
|
||||
|
||||
// IoctlSetInt is a wrapper
|
||||
func IoctlSetInt(fd int, req uint, value int) error {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// Statfs is a wrapper
|
||||
func Statfs(path string, buf *Statfs_t) error {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// Close is a wrapper
|
||||
func Close(fd int) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// EpollEvent is a wrapper
|
||||
type EpollEvent struct {
|
||||
Events uint32
|
||||
Fd int32
|
||||
Pad int32
|
||||
}
|
||||
|
||||
// EpollWait is a wrapper
|
||||
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
|
||||
return 0, errNonLinux
|
||||
}
|
||||
|
||||
// EpollCtl is a wrapper
|
||||
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// Eventfd is a wrapper
|
||||
func Eventfd(initval uint, flags int) (fd int, err error) {
|
||||
return 0, errNonLinux
|
||||
}
|
||||
|
||||
// Write is a wrapper
|
||||
func Write(fd int, p []byte) (n int, err error) {
|
||||
return 0, errNonLinux
|
||||
}
|
||||
|
||||
// EpollCreate1 is a wrapper
|
||||
func EpollCreate1(flag int) (fd int, err error) {
|
||||
return 0, errNonLinux
|
||||
}
|
||||
|
||||
// PerfEventMmapPage is a wrapper
|
||||
type PerfEventMmapPage struct {
|
||||
Version uint32
|
||||
Compat_version uint32
|
||||
Lock uint32
|
||||
Index uint32
|
||||
Offset int64
|
||||
Time_enabled uint64
|
||||
Time_running uint64
|
||||
Capabilities uint64
|
||||
Pmc_width uint16
|
||||
Time_shift uint16
|
||||
Time_mult uint32
|
||||
Time_offset uint64
|
||||
Time_zero uint64
|
||||
Size uint32
|
||||
|
||||
Data_head uint64
|
||||
Data_tail uint64
|
||||
Data_offset uint64
|
||||
Data_size uint64
|
||||
Aux_head uint64
|
||||
Aux_tail uint64
|
||||
Aux_offset uint64
|
||||
Aux_size uint64
|
||||
}
|
||||
|
||||
// SetNonblock is a wrapper
|
||||
func SetNonblock(fd int, nonblocking bool) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// Mmap is a wrapper
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return []byte{}, errNonLinux
|
||||
}
|
||||
|
||||
// Munmap is a wrapper
|
||||
func Munmap(b []byte) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// PerfEventAttr is a wrapper
|
||||
type PerfEventAttr struct {
|
||||
Type uint32
|
||||
Size uint32
|
||||
Config uint64
|
||||
Sample uint64
|
||||
Sample_type uint64
|
||||
Read_format uint64
|
||||
Bits uint64
|
||||
Wakeup uint32
|
||||
Bp_type uint32
|
||||
Ext1 uint64
|
||||
Ext2 uint64
|
||||
Branch_sample_type uint64
|
||||
Sample_regs_user uint64
|
||||
Sample_stack_user uint32
|
||||
Clockid int32
|
||||
Sample_regs_intr uint64
|
||||
Aux_watermark uint32
|
||||
Sample_max_stack uint16
|
||||
}
|
||||
|
||||
// PerfEventOpen is a wrapper
|
||||
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
|
||||
return 0, errNonLinux
|
||||
}
|
||||
|
||||
// Utsname is a wrapper
|
||||
type Utsname struct {
|
||||
Release [65]byte
|
||||
Version [65]byte
|
||||
}
|
||||
|
||||
// Uname is a wrapper
|
||||
func Uname(buf *Utsname) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// Getpid is a wrapper
|
||||
func Getpid() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Gettid is a wrapper
|
||||
func Gettid() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Tgkill is a wrapper
|
||||
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
// BytePtrFromString is a wrapper
|
||||
func BytePtrFromString(s string) (*byte, error) {
|
||||
return nil, errNonLinux
|
||||
}
|
||||
|
||||
// ByteSliceToString is a wrapper
|
||||
func ByteSliceToString(s []byte) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Renameat2 is a wrapper
|
||||
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
func KernelRelease() (string, error) {
|
||||
return "", errNonLinux
|
||||
}
|
||||
163
src/runtime/vendor/github.com/cilium/ebpf/internal/version.go
generated
vendored
Normal file
163
src/runtime/vendor/github.com/cilium/ebpf/internal/version.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// Version constant used in ELF binaries indicating that the loader needs to
|
||||
// substitute the eBPF program's version with the value of the kernel's
|
||||
// KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf
|
||||
// and RedSift.
|
||||
MagicKernelVersion = 0xFFFFFFFE
|
||||
)
|
||||
|
||||
var (
|
||||
// Match between one and three decimals separated by dots, with the last
|
||||
// segment (patch level) being optional on some kernels.
|
||||
// The x.y.z string must appear at the start of a string or right after
|
||||
// whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'.
|
||||
rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`)
|
||||
|
||||
kernelVersion = struct {
|
||||
once sync.Once
|
||||
version Version
|
||||
err error
|
||||
}{}
|
||||
)
|
||||
|
||||
// A Version in the form Major.Minor.Patch.
|
||||
type Version [3]uint16
|
||||
|
||||
// NewVersion creates a version from a string like "Major.Minor.Patch".
|
||||
//
|
||||
// Patch is optional.
|
||||
func NewVersion(ver string) (Version, error) {
|
||||
var major, minor, patch uint16
|
||||
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
|
||||
if n < 2 {
|
||||
return Version{}, fmt.Errorf("invalid version: %s", ver)
|
||||
}
|
||||
return Version{major, minor, patch}, nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v[2] == 0 {
|
||||
return fmt.Sprintf("v%d.%d", v[0], v[1])
|
||||
}
|
||||
return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
|
||||
}
|
||||
|
||||
// Less returns true if the version is less than another version.
|
||||
func (v Version) Less(other Version) bool {
|
||||
for i, a := range v {
|
||||
if a == other[i] {
|
||||
continue
|
||||
}
|
||||
return a < other[i]
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Unspecified returns true if the version is all zero.
|
||||
func (v Version) Unspecified() bool {
|
||||
return v[0] == 0 && v[1] == 0 && v[2] == 0
|
||||
}
|
||||
|
||||
// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h.
|
||||
// It represents the kernel version and patch level as a single value.
|
||||
func (v Version) Kernel() uint32 {
|
||||
|
||||
// Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid
|
||||
// overflowing into PATCHLEVEL.
|
||||
// See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255").
|
||||
s := v[2]
|
||||
if s > 255 {
|
||||
s = 255
|
||||
}
|
||||
|
||||
// Truncate members to uint8 to prevent them from spilling over into
|
||||
// each other when overflowing 8 bits.
|
||||
return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s))
|
||||
}
|
||||
|
||||
// KernelVersion returns the version of the currently running kernel.
|
||||
func KernelVersion() (Version, error) {
|
||||
kernelVersion.once.Do(func() {
|
||||
kernelVersion.version, kernelVersion.err = detectKernelVersion()
|
||||
})
|
||||
|
||||
if kernelVersion.err != nil {
|
||||
return Version{}, kernelVersion.err
|
||||
}
|
||||
return kernelVersion.version, nil
|
||||
}
|
||||
|
||||
// detectKernelVersion returns the version of the running kernel. It scans the
|
||||
// following sources in order: /proc/version_signature, uname -v, uname -r.
|
||||
// In each of those locations, the last-appearing x.y(.z) value is selected
|
||||
// for parsing. The first location that yields a usable version number is
|
||||
// returned.
|
||||
func detectKernelVersion() (Version, error) {
|
||||
|
||||
// Try reading /proc/version_signature for Ubuntu compatibility.
|
||||
// Example format: Ubuntu 4.15.0-91.92-generic 4.15.18
|
||||
// This method exists in the kernel itself, see d18acd15c
|
||||
// ("perf tools: Fix kernel version error in ubuntu").
|
||||
if pvs, err := ioutil.ReadFile("/proc/version_signature"); err == nil {
|
||||
// If /proc/version_signature exists, failing to parse it is an error.
|
||||
// It only exists on Ubuntu, where the real patch level is not obtainable
|
||||
// through any other method.
|
||||
v, err := findKernelVersion(string(pvs))
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
var uname unix.Utsname
|
||||
if err := unix.Uname(&uname); err != nil {
|
||||
return Version{}, fmt.Errorf("calling uname: %w", err)
|
||||
}
|
||||
|
||||
// Debian puts the version including the patch level in uname.Version.
|
||||
// It is not an error if there's no version number in uname.Version,
|
||||
// as most distributions don't use it. Parsing can continue on uname.Release.
|
||||
// Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08)
|
||||
if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Most other distributions have the full kernel version including patch
|
||||
// level in uname.Release.
|
||||
// Example format: 4.19.0-5-amd64, 5.5.10-arch1-1
|
||||
v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:]))
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// findKernelVersion matches s against rgxKernelVersion and parses the result
|
||||
// into a Version. If s contains multiple matches, the last entry is selected.
|
||||
func findKernelVersion(s string) (Version, error) {
|
||||
m := rgxKernelVersion.FindAllString(s, -1)
|
||||
if m == nil {
|
||||
return Version{}, fmt.Errorf("no kernel version in string: %s", s)
|
||||
}
|
||||
// Pick the last match of the string in case there are multiple.
|
||||
s = m[len(m)-1]
|
||||
|
||||
v, err := NewVersion(s)
|
||||
if err != nil {
|
||||
return Version{}, fmt.Errorf("parsing version string %s: %w", s, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
171
src/runtime/vendor/github.com/cilium/ebpf/link/cgroup.go
generated
vendored
Normal file
171
src/runtime/vendor/github.com/cilium/ebpf/link/cgroup.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type cgroupAttachFlags uint32
|
||||
|
||||
// cgroup attach flags
|
||||
const (
|
||||
flagAllowOverride cgroupAttachFlags = 1 << iota
|
||||
flagAllowMulti
|
||||
flagReplace
|
||||
)
|
||||
|
||||
type CgroupOptions struct {
|
||||
// Path to a cgroupv2 folder.
|
||||
Path string
|
||||
// One of the AttachCgroup* constants
|
||||
Attach ebpf.AttachType
|
||||
// Program must be of type CGroup*, and the attach type must match Attach.
|
||||
Program *ebpf.Program
|
||||
}
|
||||
|
||||
// AttachCgroup links a BPF program to a cgroup.
|
||||
func AttachCgroup(opts CgroupOptions) (Link, error) {
|
||||
cgroup, err := os.Open(opts.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open cgroup: %s", err)
|
||||
}
|
||||
|
||||
clone, err := opts.Program.Clone()
|
||||
if err != nil {
|
||||
cgroup.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cg Link
|
||||
cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
|
||||
if errors.Is(err, ErrNotSupported) {
|
||||
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
|
||||
}
|
||||
if errors.Is(err, ErrNotSupported) {
|
||||
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
|
||||
}
|
||||
if err != nil {
|
||||
cgroup.Close()
|
||||
clone.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cg, nil
|
||||
}
|
||||
|
||||
// LoadPinnedCgroup loads a pinned cgroup from a bpffs.
|
||||
func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
|
||||
link, err := LoadPinnedRawLink(fileName, CgroupType, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &linkCgroup{*link}, nil
|
||||
}
|
||||
|
||||
type progAttachCgroup struct {
|
||||
cgroup *os.File
|
||||
current *ebpf.Program
|
||||
attachType ebpf.AttachType
|
||||
flags cgroupAttachFlags
|
||||
}
|
||||
|
||||
var _ Link = (*progAttachCgroup)(nil)
|
||||
|
||||
func (cg *progAttachCgroup) isLink() {}
|
||||
|
||||
func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
|
||||
if flags&flagAllowMulti > 0 {
|
||||
if err := haveProgAttachReplace(); err != nil {
|
||||
return nil, fmt.Errorf("can't support multiple programs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := RawAttachProgram(RawAttachProgramOptions{
|
||||
Target: int(cgroup.Fd()),
|
||||
Program: prog,
|
||||
Flags: uint32(flags),
|
||||
Attach: attach,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cgroup: %w", err)
|
||||
}
|
||||
|
||||
return &progAttachCgroup{cgroup, prog, attach, flags}, nil
|
||||
}
|
||||
|
||||
func (cg *progAttachCgroup) Close() error {
|
||||
defer cg.cgroup.Close()
|
||||
defer cg.current.Close()
|
||||
|
||||
err := RawDetachProgram(RawDetachProgramOptions{
|
||||
Target: int(cg.cgroup.Fd()),
|
||||
Program: cg.current,
|
||||
Attach: cg.attachType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("close cgroup: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
|
||||
new, err := prog.Clone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := RawAttachProgramOptions{
|
||||
Target: int(cg.cgroup.Fd()),
|
||||
Program: prog,
|
||||
Attach: cg.attachType,
|
||||
Flags: uint32(cg.flags),
|
||||
}
|
||||
|
||||
if cg.flags&flagAllowMulti > 0 {
|
||||
// Atomically replacing multiple programs requires at least
|
||||
// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
|
||||
// program in MULTI mode")
|
||||
args.Flags |= uint32(flagReplace)
|
||||
args.Replace = cg.current
|
||||
}
|
||||
|
||||
if err := RawAttachProgram(args); err != nil {
|
||||
new.Close()
|
||||
return fmt.Errorf("can't update cgroup: %s", err)
|
||||
}
|
||||
|
||||
cg.current.Close()
|
||||
cg.current = new
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cg *progAttachCgroup) Pin(string) error {
|
||||
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
func (cg *progAttachCgroup) Unpin() error {
|
||||
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
type linkCgroup struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
var _ Link = (*linkCgroup)(nil)
|
||||
|
||||
func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
|
||||
link, err := AttachRawLink(RawLinkOptions{
|
||||
Target: int(cgroup.Fd()),
|
||||
Program: prog,
|
||||
Attach: attach,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &linkCgroup{*link}, err
|
||||
}
|
||||
2
src/runtime/vendor/github.com/cilium/ebpf/link/doc.go
generated
vendored
Normal file
2
src/runtime/vendor/github.com/cilium/ebpf/link/doc.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package link allows attaching eBPF programs to various kernel hooks.
|
||||
package link
|
||||
100
src/runtime/vendor/github.com/cilium/ebpf/link/iter.go
generated
vendored
Normal file
100
src/runtime/vendor/github.com/cilium/ebpf/link/iter.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
)
|
||||
|
||||
type IterOptions struct {
|
||||
// Program must be of type Tracing with attach type
|
||||
// AttachTraceIter. The kind of iterator to attach to is
|
||||
// determined at load time via the AttachTo field.
|
||||
//
|
||||
// AttachTo requires the kernel to include BTF of itself,
|
||||
// and it to be compiled with a recent pahole (>= 1.16).
|
||||
Program *ebpf.Program
|
||||
|
||||
// Map specifies the target map for bpf_map_elem and sockmap iterators.
|
||||
// It may be nil.
|
||||
Map *ebpf.Map
|
||||
}
|
||||
|
||||
// AttachIter attaches a BPF seq_file iterator.
|
||||
func AttachIter(opts IterOptions) (*Iter, error) {
|
||||
if err := haveBPFLink(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
progFd := opts.Program.FD()
|
||||
if progFd < 0 {
|
||||
return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
|
||||
}
|
||||
|
||||
var info bpfIterLinkInfoMap
|
||||
if opts.Map != nil {
|
||||
mapFd := opts.Map.FD()
|
||||
if mapFd < 0 {
|
||||
return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd)
|
||||
}
|
||||
info.map_fd = uint32(mapFd)
|
||||
}
|
||||
|
||||
attr := bpfLinkCreateIterAttr{
|
||||
prog_fd: uint32(progFd),
|
||||
attach_type: ebpf.AttachTraceIter,
|
||||
iter_info: internal.NewPointer(unsafe.Pointer(&info)),
|
||||
iter_info_len: uint32(unsafe.Sizeof(info)),
|
||||
}
|
||||
|
||||
fd, err := bpfLinkCreateIter(&attr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't link iterator: %w", err)
|
||||
}
|
||||
|
||||
return &Iter{RawLink{fd, ""}}, err
|
||||
}
|
||||
|
||||
// LoadPinnedIter loads a pinned iterator from a bpffs.
|
||||
func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) {
|
||||
link, err := LoadPinnedRawLink(fileName, IterType, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Iter{*link}, err
|
||||
}
|
||||
|
||||
// Iter represents an attached bpf_iter.
|
||||
type Iter struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
// Open creates a new instance of the iterator.
|
||||
//
|
||||
// Reading from the returned reader triggers the BPF program.
|
||||
func (it *Iter) Open() (io.ReadCloser, error) {
|
||||
linkFd, err := it.fd.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attr := &bpfIterCreateAttr{
|
||||
linkFd: linkFd,
|
||||
}
|
||||
|
||||
fd, err := bpfIterCreate(attr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't create iterator: %w", err)
|
||||
}
|
||||
|
||||
return fd.File("bpf_iter"), nil
|
||||
}
|
||||
|
||||
// union bpf_iter_link_info.map
|
||||
type bpfIterLinkInfoMap struct {
|
||||
map_fd uint32
|
||||
}
|
||||
438
src/runtime/vendor/github.com/cilium/ebpf/link/kprobe.go
generated
vendored
Normal file
438
src/runtime/vendor/github.com/cilium/ebpf/link/kprobe.go
generated
vendored
Normal file
@@ -0,0 +1,438 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
|
||||
|
||||
kprobeRetprobeBit = struct {
|
||||
once sync.Once
|
||||
value uint64
|
||||
err error
|
||||
}{}
|
||||
)
|
||||
|
||||
type probeType uint8
|
||||
|
||||
const (
|
||||
kprobeType probeType = iota
|
||||
uprobeType
|
||||
)
|
||||
|
||||
func (pt probeType) String() string {
|
||||
if pt == kprobeType {
|
||||
return "kprobe"
|
||||
}
|
||||
return "uprobe"
|
||||
}
|
||||
|
||||
func (pt probeType) EventsPath() string {
|
||||
if pt == kprobeType {
|
||||
return kprobeEventsPath
|
||||
}
|
||||
return uprobeEventsPath
|
||||
}
|
||||
|
||||
func (pt probeType) PerfEventType(ret bool) perfEventType {
|
||||
if pt == kprobeType {
|
||||
if ret {
|
||||
return kretprobeEvent
|
||||
}
|
||||
return kprobeEvent
|
||||
}
|
||||
if ret {
|
||||
return uretprobeEvent
|
||||
}
|
||||
return uprobeEvent
|
||||
}
|
||||
|
||||
func (pt probeType) RetprobeBit() (uint64, error) {
|
||||
if pt == kprobeType {
|
||||
return kretprobeBit()
|
||||
}
|
||||
return uretprobeBit()
|
||||
}
|
||||
|
||||
// Kprobe attaches the given eBPF program to a perf event that fires when the
|
||||
// given kernel symbol starts executing. See /proc/kallsyms for available
|
||||
// symbols. For example, printk():
|
||||
//
|
||||
// Kprobe("printk", prog)
|
||||
//
|
||||
// The resulting Link must be Closed during program shutdown to avoid leaking
|
||||
// system resources.
|
||||
func Kprobe(symbol string, prog *ebpf.Program) (Link, error) {
|
||||
k, err := kprobe(symbol, prog, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = k.attach(prog)
|
||||
if err != nil {
|
||||
k.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// Kretprobe attaches the given eBPF program to a perf event that fires right
|
||||
// before the given kernel symbol exits, with the function stack left intact.
|
||||
// See /proc/kallsyms for available symbols. For example, printk():
|
||||
//
|
||||
// Kretprobe("printk", prog)
|
||||
//
|
||||
// The resulting Link must be Closed during program shutdown to avoid leaking
|
||||
// system resources.
|
||||
func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) {
|
||||
k, err := kprobe(symbol, prog, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = k.attach(prog)
|
||||
if err != nil {
|
||||
k.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// kprobe opens a perf event on the given symbol and attaches prog to it.
|
||||
// If ret is true, create a kretprobe.
|
||||
func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
|
||||
if symbol == "" {
|
||||
return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
|
||||
}
|
||||
if prog == nil {
|
||||
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
|
||||
}
|
||||
if !rgxTraceEvent.MatchString(symbol) {
|
||||
return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput)
|
||||
}
|
||||
if prog.Type() != ebpf.Kprobe {
|
||||
return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
|
||||
}
|
||||
|
||||
// Use kprobe PMU if the kernel has it available.
|
||||
tp, err := pmuKprobe(platformPrefix(symbol), ret)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
tp, err = pmuKprobe(symbol, ret)
|
||||
}
|
||||
if err == nil {
|
||||
return tp, nil
|
||||
}
|
||||
if err != nil && !errors.Is(err, ErrNotSupported) {
|
||||
return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err)
|
||||
}
|
||||
|
||||
// Use tracefs if kprobe PMU is missing.
|
||||
tp, err = tracefsKprobe(platformPrefix(symbol), ret)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
tp, err = tracefsKprobe(symbol, ret)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
|
||||
}
|
||||
|
||||
return tp, nil
|
||||
}
|
||||
|
||||
// pmuKprobe opens a perf event based on the kprobe PMU.
|
||||
// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
|
||||
func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
|
||||
return pmuProbe(kprobeType, symbol, "", 0, ret)
|
||||
}
|
||||
|
||||
// pmuProbe opens a perf event based on a Performance Monitoring Unit.
|
||||
//
|
||||
// Requires at least a 4.17 kernel.
|
||||
// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
|
||||
// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
|
||||
//
|
||||
// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
|
||||
func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
|
||||
// Getting the PMU type will fail if the kernel doesn't support
|
||||
// the perf_[k,u]probe PMU.
|
||||
et, err := getPMUEventType(typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config uint64
|
||||
if ret {
|
||||
bit, err := typ.RetprobeBit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config |= 1 << bit
|
||||
}
|
||||
|
||||
var (
|
||||
attr unix.PerfEventAttr
|
||||
sp unsafe.Pointer
|
||||
)
|
||||
switch typ {
|
||||
case kprobeType:
|
||||
// Create a pointer to a NUL-terminated string for the kernel.
|
||||
sp, err := unsafeStringPtr(symbol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attr = unix.PerfEventAttr{
|
||||
Type: uint32(et), // PMU event type read from sysfs
|
||||
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
|
||||
Config: config, // Retprobe flag
|
||||
}
|
||||
case uprobeType:
|
||||
sp, err := unsafeStringPtr(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attr = unix.PerfEventAttr{
|
||||
// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
|
||||
// since it added the config2 (Ext2) field. The Size field controls the
|
||||
// size of the internal buffer the kernel allocates for reading the
|
||||
// perf_event_attr argument from userspace.
|
||||
Size: unix.PERF_ATTR_SIZE_VER1,
|
||||
Type: uint32(et), // PMU event type read from sysfs
|
||||
Ext1: uint64(uintptr(sp)), // Uprobe path
|
||||
Ext2: offset, // Uprobe offset
|
||||
Config: config, // Retprobe flag
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
||||
|
||||
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
|
||||
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
|
||||
// is returned to the caller.
|
||||
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
|
||||
return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening perf event: %w", err)
|
||||
}
|
||||
|
||||
// Ensure the string pointer is not collected before PerfEventOpen returns.
|
||||
runtime.KeepAlive(sp)
|
||||
|
||||
// Kernel has perf_[k,u]probe PMU available, initialize perf event.
|
||||
return &perfEvent{
|
||||
fd: internal.NewFD(uint32(fd)),
|
||||
pmuID: et,
|
||||
name: symbol,
|
||||
typ: typ.PerfEventType(ret),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tracefsKprobe creates a Kprobe tracefs entry.
|
||||
func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
|
||||
return tracefsProbe(kprobeType, symbol, "", 0, ret)
|
||||
}
|
||||
|
||||
// tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
|
||||
// A new trace event group name is generated on every call to support creating
|
||||
// multiple trace events for the same kernel or userspace symbol.
|
||||
// Path and offset are only set in the case of uprobe(s) and are used to set
|
||||
// the executable/library path on the filesystem and the offset where the probe is inserted.
|
||||
// A perf event is then opened on the newly-created trace event and returned to the caller.
|
||||
func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
|
||||
// Generate a random string for each trace event we attempt to create.
|
||||
// This value is used as the 'group' token in tracefs to allow creating
|
||||
// multiple kprobe trace events with the same name.
|
||||
group, err := randomGroup("ebpf")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("randomizing group name: %w", err)
|
||||
}
|
||||
|
||||
// Before attempting to create a trace event through tracefs,
|
||||
// check if an event with the same group and name already exists.
|
||||
// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
|
||||
// entry, so we need to rely on reads for detecting uniqueness.
|
||||
_, err = getTraceEventID(group, symbol)
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol)
|
||||
}
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err)
|
||||
}
|
||||
|
||||
// Create the [k,u]probe trace event using tracefs.
|
||||
if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil {
|
||||
return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
|
||||
}
|
||||
|
||||
// Get the newly-created trace event's id.
|
||||
tid, err := getTraceEventID(group, symbol)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting trace event id: %w", err)
|
||||
}
|
||||
|
||||
// Kprobes are ephemeral tracepoints and share the same perf event type.
|
||||
fd, err := openTracepointPerfEvent(tid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &perfEvent{
|
||||
fd: fd,
|
||||
group: group,
|
||||
name: symbol,
|
||||
tracefsID: tid,
|
||||
typ: typ.PerfEventType(ret),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
|
||||
// <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
|
||||
// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
|
||||
// if a probe with the same group and symbol already exists.
|
||||
func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error {
|
||||
// Open the kprobe_events file in tracefs.
|
||||
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var pe string
|
||||
switch typ {
|
||||
case kprobeType:
|
||||
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
|
||||
// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
||||
// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
|
||||
// -:[GRP/]EVENT : Clear a probe
|
||||
//
|
||||
// Some examples:
|
||||
// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
|
||||
// p:ebpf_5678/p_my_kprobe __x64_sys_execve
|
||||
//
|
||||
// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
|
||||
// kernel default to NR_CPUS. This is desired in most eBPF cases since
|
||||
// subsampling or rate limiting logic can be more accurately implemented in
|
||||
// the eBPF program itself.
|
||||
// See Documentation/kprobes.txt for more details.
|
||||
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol)
|
||||
case uprobeType:
|
||||
// The uprobe_events syntax is as follows:
|
||||
// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
|
||||
// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
|
||||
// -:[GRP/]EVENT : Clear a probe
|
||||
//
|
||||
// Some examples:
|
||||
// r:ebpf_1234/readline /bin/bash:0x12345
|
||||
// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345
|
||||
//
|
||||
// See Documentation/trace/uprobetracer.txt for more details.
|
||||
pathOffset := uprobePathOffset(path, offset)
|
||||
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset)
|
||||
}
|
||||
_, err = f.WriteString(pe)
|
||||
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
|
||||
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
|
||||
// is returned to the caller.
|
||||
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
|
||||
return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
|
||||
// from <tracefs>/[k,u]probe_events.
|
||||
func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
|
||||
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// See [k,u]probe_events syntax above. The probe type does not need to be specified
|
||||
// for removals.
|
||||
pe := fmt.Sprintf("-:%s/%s", group, symbol)
|
||||
if _, err = f.WriteString(pe); err != nil {
|
||||
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// randomGroup generates a pseudorandom string for use as a tracefs group name.
|
||||
// Returns an error when the output string would exceed 63 characters (kernel
|
||||
// limitation), when rand.Read() fails or when prefix contains characters not
|
||||
// allowed by rgxTraceEvent.
|
||||
func randomGroup(prefix string) (string, error) {
|
||||
if !rgxTraceEvent.MatchString(prefix) {
|
||||
return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
|
||||
}
|
||||
|
||||
b := make([]byte, 8)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", fmt.Errorf("reading random bytes: %w", err)
|
||||
}
|
||||
|
||||
group := fmt.Sprintf("%s_%x", prefix, b)
|
||||
if len(group) > 63 {
|
||||
return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput)
|
||||
}
|
||||
|
||||
return group, nil
|
||||
}
|
||||
|
||||
func probePrefix(ret bool) string {
|
||||
if ret {
|
||||
return "r"
|
||||
}
|
||||
return "p"
|
||||
}
|
||||
|
||||
// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit
|
||||
// from /sys/bus/event_source/devices/<pmu>/format/retprobe.
|
||||
func determineRetprobeBit(typ probeType) (uint64, error) {
|
||||
p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe")
|
||||
|
||||
data, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rp uint64
|
||||
n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("parse retprobe bit: %w", err)
|
||||
}
|
||||
if n != 1 {
|
||||
return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n)
|
||||
}
|
||||
|
||||
return rp, nil
|
||||
}
|
||||
|
||||
func kretprobeBit() (uint64, error) {
|
||||
kprobeRetprobeBit.once.Do(func() {
|
||||
kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType)
|
||||
})
|
||||
return kprobeRetprobeBit.value, kprobeRetprobeBit.err
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user