mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-03-11 15:22:14 +00:00
Compare commits
93 Commits
dependabot
...
sprt/remov
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b30219a595 | ||
|
|
19b35bdcbf | ||
|
|
ed5ad51d6b | ||
|
|
cbfdc4b764 | ||
|
|
b6c60d9229 | ||
|
|
f9a8eb6ecc | ||
|
|
374b0abe29 | ||
|
|
718632bfe0 | ||
|
|
68bdbef676 | ||
|
|
3dd77bf576 | ||
|
|
aae54f704c | ||
|
|
a98e328359 | ||
|
|
9fe03fb170 | ||
|
|
eaa711617e | ||
|
|
a4fd32a29a | ||
|
|
fb743a304c | ||
|
|
22c4cab237 | ||
|
|
62b0f63e37 | ||
|
|
b2932f963a | ||
|
|
1c8c0089da | ||
|
|
d0718f6001 | ||
|
|
b4161198ee | ||
|
|
ca4e14086f | ||
|
|
ce800b7c37 | ||
|
|
a988b10440 | ||
|
|
f36218d566 | ||
|
|
c8a39ad28d | ||
|
|
a35dcf952e | ||
|
|
2fff33cfa4 | ||
|
|
83a8b257d1 | ||
|
|
079fac1309 | ||
|
|
5df7c4aa9c | ||
|
|
e9894c0bd8 | ||
|
|
c57f2be18e | ||
|
|
bda9f6491f | ||
|
|
8f35c31b30 | ||
|
|
b5e0a5b7d6 | ||
|
|
cb97ebd067 | ||
|
|
a0b9d965e5 | ||
|
|
83dd7dcc75 | ||
|
|
cb0d02e40b | ||
|
|
d40afe592c | ||
|
|
e40d962b13 | ||
|
|
3f845af9d4 | ||
|
|
a4a4683ec7 | ||
|
|
2687ad75c1 | ||
|
|
8e11bb2526 | ||
|
|
94f850979f | ||
|
|
8640f27516 | ||
|
|
56c3618c1d | ||
|
|
966d710df5 | ||
|
|
ebe75cc3e3 | ||
|
|
7a08ef2f8d | ||
|
|
4e024bfb43 | ||
|
|
a2216ec05a | ||
|
|
01895bf87e | ||
|
|
d821d4e572 | ||
|
|
b0345d50e8 | ||
|
|
911742e26e | ||
|
|
347ce5e3bc | ||
|
|
ab25592533 | ||
|
|
fa3c3eb2ce | ||
|
|
3e807300ac | ||
|
|
876c6c832d | ||
|
|
9725df658f | ||
|
|
7ca8db1e61 | ||
|
|
0754a17fed | ||
|
|
2cf9018e35 | ||
|
|
0b2af07b02 | ||
|
|
3ea23528a5 | ||
|
|
642aa12889 | ||
|
|
24fe232e56 | ||
|
|
e50324ba5b | ||
|
|
993a4846c8 | ||
|
|
d95d1796b2 | ||
|
|
501d8d1916 | ||
|
|
964c91f8fc | ||
|
|
68e67d7f8a | ||
|
|
b147cb1319 | ||
|
|
8a4ae090e6 | ||
|
|
afe09803a1 | ||
|
|
88f746dea8 | ||
|
|
eec397ac08 | ||
|
|
bb7fd335f3 | ||
|
|
330bfff4be | ||
|
|
0a73638744 | ||
|
|
2695007ef8 | ||
|
|
66e58d6490 | ||
|
|
b71bb47e21 | ||
|
|
308442e887 | ||
|
|
2149fc0eee | ||
|
|
d2613025b7 | ||
|
|
499e18c876 |
6
.github/actionlint.yaml
vendored
6
.github/actionlint.yaml
vendored
@@ -28,3 +28,9 @@ self-hosted-runner:
|
||||
- s390x-large
|
||||
- tdx
|
||||
- ubuntu-24.04-arm
|
||||
|
||||
paths:
|
||||
.github/workflows/**/*.{yml,yaml}:
|
||||
ignore:
|
||||
# We use if: false to "temporarily" skip jobs with issues
|
||||
- 'constant expression "false" in condition'
|
||||
|
||||
9
.github/workflows/actionlint.yaml
vendored
9
.github/workflows/actionlint.yaml
vendored
@@ -13,18 +13,13 @@ concurrency:
|
||||
jobs:
|
||||
run-actionlint:
|
||||
name: run-actionlint
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install actionlint gh extension
|
||||
run: gh extension install https://github.com/cschleiden/gh-actionlint
|
||||
|
||||
- name: Run actionlint
|
||||
run: gh actionlint
|
||||
uses: raven-actions/actionlint@e01d1ea33dd6a5ed517d95b4c0c357560ac6f518 # v2.1.1
|
||||
|
||||
17
.github/workflows/basic-ci-amd64.yaml
vendored
17
.github/workflows/basic-ci-amd64.yaml
vendored
@@ -47,6 +47,23 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install yq
|
||||
run: |
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
env:
|
||||
|
||||
19
.github/workflows/basic-ci-s390x.yaml
vendored
19
.github/workflows/basic-ci-s390x.yaml
vendored
@@ -47,8 +47,25 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install yq
|
||||
run: |
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
|
||||
@@ -82,11 +82,17 @@ jobs:
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
- name: Read properties from versions.yaml
|
||||
if: contains(matrix.component.needs, 'golang')
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
if: contains(matrix.component.needs, 'golang')
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup rust
|
||||
if: contains(matrix.component.needs, 'rust')
|
||||
run: |
|
||||
|
||||
@@ -143,7 +143,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-extratarballs-artifact ${{ matrix.asset }}
|
||||
if: ${{ matrix.asset == 'kernel' || startsWith(matrix.asset, 'kernel-nvidia-gpu') }}
|
||||
if: ${{ startsWith(matrix.asset, 'kernel-nvidia-gpu') }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset }}-modules${{ inputs.tarball-suffix }}
|
||||
@@ -235,7 +235,6 @@ jobs:
|
||||
asset:
|
||||
- busybox
|
||||
- coco-guest-components
|
||||
- kernel-modules
|
||||
- kernel-nvidia-gpu-modules
|
||||
- pause-image
|
||||
steps:
|
||||
@@ -368,7 +367,6 @@ jobs:
|
||||
matrix:
|
||||
asset:
|
||||
- agent-ctl
|
||||
- csi-kata-directvolume
|
||||
- genpolicy
|
||||
- kata-ctl
|
||||
- kata-manager
|
||||
|
||||
@@ -120,15 +120,6 @@ jobs:
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-extratarballs-artifact ${{ matrix.asset }}
|
||||
if: ${{ matrix.asset == 'kernel' }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset }}-modules${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}-modules.tar.zst
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
name: build-asset-rootfs
|
||||
runs-on: s390x
|
||||
|
||||
1
.github/workflows/ci-devel.yaml
vendored
1
.github/workflows/ci-devel.yaml
vendored
@@ -17,6 +17,7 @@ jobs:
|
||||
pr-number: "dev"
|
||||
tag: ${{ github.sha }}-dev
|
||||
target-branch: ${{ github.ref_name }}
|
||||
extensive-matrix-autogenerated-policy: "yes"
|
||||
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
|
||||
1
.github/workflows/ci-nightly.yaml
vendored
1
.github/workflows/ci-nightly.yaml
vendored
@@ -22,6 +22,7 @@ jobs:
|
||||
pr-number: "nightly"
|
||||
tag: ${{ github.sha }}-nightly
|
||||
target-branch: ${{ github.ref_name }}
|
||||
extensive-matrix-autogenerated-policy: "yes"
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
|
||||
61
.github/workflows/ci.yaml
vendored
61
.github/workflows/ci.yaml
vendored
@@ -19,6 +19,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
extensive-matrix-autogenerated-policy:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
@@ -212,61 +216,6 @@ jobs:
|
||||
platforms: linux/amd64, linux/s390x
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
publish-csi-driver-amd64:
|
||||
name: publish-csi-driver-amd64
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tools-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-tools-static-tarball-amd64-${{ inputs.tag }}
|
||||
path: kata-tools-artifacts
|
||||
|
||||
- name: Install kata-tools
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-tools-artifacts
|
||||
|
||||
- name: Copy binary into Docker context
|
||||
run: |
|
||||
# Copy to the location where the Dockerfile expects the binary.
|
||||
mkdir -p src/tools/csi-kata-directvolume/bin/
|
||||
cp /opt/kata/bin/csi-kata-directvolume src/tools/csi-kata-directvolume/bin/directvolplugin
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/csi-kata-directvolume:${{ inputs.pr-number }}
|
||||
push: true
|
||||
context: src/tools/csi-kata-directvolume/
|
||||
platforms: linux/amd64
|
||||
file: src/tools/csi-kata-directvolume/Dockerfile
|
||||
|
||||
run-kata-monitor-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
@@ -345,7 +294,6 @@ jobs:
|
||||
needs:
|
||||
- publish-kata-deploy-payload-amd64
|
||||
- build-and-publish-tee-confidential-unencrypted-image
|
||||
- publish-csi-driver-amd64
|
||||
uses: ./.github/workflows/run-kata-coco-tests.yaml
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -358,6 +306,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
extensive-matrix-autogenerated-policy: ${{ inputs.extensive-matrix-autogenerated-policy }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
|
||||
18
.github/workflows/darwin-tests.yaml
vendored
18
.github/workflows/darwin-tests.yaml
vendored
@@ -31,10 +31,22 @@ jobs:
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install golang
|
||||
- name: Install yq
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Install Rust
|
||||
run: ./tests/install_rust.sh
|
||||
|
||||
18
.github/workflows/docs-url-alive-check.yaml
vendored
18
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -24,10 +24,22 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install golang
|
||||
- name: Install yq
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Docs URL Alive Check
|
||||
run: |
|
||||
|
||||
18
.github/workflows/govulncheck.yaml
vendored
18
.github/workflows/govulncheck.yaml
vendored
@@ -27,10 +27,22 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install golang
|
||||
- name: Install yq
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Install govulncheck
|
||||
run: |
|
||||
|
||||
19
.github/workflows/run-cri-containerd-tests.yaml
vendored
19
.github/workflows/run-cri-containerd-tests.yaml
vendored
@@ -53,6 +53,25 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install yq
|
||||
run: |
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
# Setup-go doesn't work properly with ppc64le: https://github.com/actions/setup-go/issues/648
|
||||
architecture: ${{ inputs.arch == 'ppc64le' && 'ppc64le' || '' }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
20
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
20
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
@@ -57,10 +57,24 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install golang
|
||||
- name: Install yq
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
# Setup-go doesn't work properly with ppc64le: https://github.com/actions/setup-go/issues/648
|
||||
architecture: 'ppc64le'
|
||||
|
||||
- name: Prepare the runner for k8s test suite
|
||||
run: bash "${HOME}/scripts/k8s_cluster_prepare.sh"
|
||||
|
||||
147
.github/workflows/run-kata-coco-tests.yaml
vendored
147
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -24,6 +24,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
extensive-matrix-autogenerated-policy:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
@@ -106,10 +110,6 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 100
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
@@ -130,10 +130,6 @@ jobs:
|
||||
[[ "${KATA_HYPERVISOR}" == "qemu-tdx" ]] && echo "ITA_KEY=${GH_ITA_KEY}" >> "${GITHUB_ENV}"
|
||||
bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
run-k8s-tests-coco-nontee:
|
||||
name: run-k8s-tests-coco-nontee
|
||||
@@ -231,10 +227,6 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 80
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
@@ -253,10 +245,126 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
# Extensive matrix: autogenerated policy tests (nydus + experimental-force-guest-pull) on k0s, k3s, rke2, microk8s with qemu-coco-dev / qemu-coco-dev-runtime-rs
|
||||
run-k8s-tests-coco-nontee-extensive-matrix:
|
||||
if: ${{ inputs.extensive-matrix-autogenerated-policy == 'yes' }}
|
||||
name: run-k8s-tests-coco-nontee-extensive-matrix
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
environment: [
|
||||
{ k8s: k0s, vmm: qemu-coco-dev, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: k0s, vmm: qemu-coco-dev, snapshotter: "", pull_type: experimental-force-guest-pull },
|
||||
{ k8s: k0s, vmm: qemu-coco-dev-runtime-rs, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: k3s, vmm: qemu-coco-dev, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: k3s, vmm: qemu-coco-dev, snapshotter: "", pull_type: experimental-force-guest-pull },
|
||||
{ k8s: k3s, vmm: qemu-coco-dev-runtime-rs, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: rke2, vmm: qemu-coco-dev, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: rke2, vmm: qemu-coco-dev, snapshotter: "", pull_type: experimental-force-guest-pull },
|
||||
{ k8s: rke2, vmm: qemu-coco-dev-runtime-rs, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: microk8s, vmm: qemu-coco-dev, snapshotter: nydus, pull_type: guest-pull },
|
||||
{ k8s: microk8s, vmm: qemu-coco-dev, snapshotter: "", pull_type: experimental-force-guest-pull },
|
||||
{ k8s: microk8s, vmm: qemu-coco-dev-runtime-rs, snapshotter: nydus, pull_type: guest-pull },
|
||||
]
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
contents: read
|
||||
environment: ci
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.environment.vmm }}
|
||||
KBS: "true"
|
||||
KBS_INGRESS: "nodeport"
|
||||
KUBERNETES: ${{ matrix.environment.k8s }}
|
||||
SNAPSHOTTER: ${{ matrix.environment.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.environment.pull_type }}
|
||||
EXPERIMENTAL_FORCE_GUEST_PULL: ${{ matrix.environment.pull_type == 'experimental-force-guest-pull' && matrix.environment.vmm || '' }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
K8S_TEST_HOST_TYPE: "all"
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tools-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-tools-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-tools-artifacts
|
||||
|
||||
- name: Install kata-tools
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-tools-artifacts
|
||||
|
||||
- name: Remove unnecessary directories to free up space
|
||||
run: |
|
||||
sudo rm -rf /usr/local/.ghcup
|
||||
sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /usr/local/share/boost
|
||||
sudo rm -rf /usr/lib/jvm
|
||||
sudo rm -rf /usr/share/swift
|
||||
sudo rm -rf /usr/local/share/powershell
|
||||
sudo rm -rf /usr/local/julia*
|
||||
sudo rm -rf /opt/az
|
||||
sudo rm -rf /usr/local/share/chromium
|
||||
sudo rm -rf /opt/microsoft
|
||||
sudo rm -rf /opt/google
|
||||
sudo rm -rf /usr/lib/firefox
|
||||
|
||||
- name: Deploy ${{ matrix.environment.k8s }}
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
env:
|
||||
USE_EXPERIMENTAL_SETUP_SNAPSHOTTER: ${{ matrix.environment.snapshotter == 'nydus' }}
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
|
||||
- name: Install `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 80
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: always()
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
# Generate jobs for testing CoCo on non-TEE environments with erofs-snapshotter
|
||||
run-k8s-tests-coco-nontee-with-erofs-snapshotter:
|
||||
@@ -344,10 +452,6 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 80
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
@@ -360,8 +464,3 @@ jobs:
|
||||
if: always()
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
|
||||
- name: Delete CSI driver
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
11
.github/workflows/static-checks.yaml
vendored
11
.github/workflows/static-checks.yaml
vendored
@@ -126,11 +126,16 @@ jobs:
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
- name: Read properties from versions.yaml
|
||||
run: |
|
||||
cd "${GOPATH}/src/github.com/${GITHUB_REPOSITORY}"
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
go_version="$(yq '.languages.golang.version' versions.yaml)"
|
||||
[ -n "$go_version" ]
|
||||
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
|
||||
- name: Setup Golang version ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -20,3 +20,6 @@ tools/packaging/static-build/agent/install_libseccomp.sh
|
||||
.direnv
|
||||
**/.DS_Store
|
||||
site/
|
||||
opt/
|
||||
tools/packaging/kernel/configs/**/.config
|
||||
root_hash.txt
|
||||
|
||||
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -3922,6 +3922,7 @@ dependencies = [
|
||||
"agent",
|
||||
"anyhow",
|
||||
"common",
|
||||
"containerd-shim-protos",
|
||||
"hyper",
|
||||
"hypervisor",
|
||||
"kata-sys-util",
|
||||
|
||||
@@ -187,9 +187,10 @@ different compared to `runc` containers:
|
||||
into the guest and exposes it directly to the container.
|
||||
|
||||
**Mounting guest devices**: When the source path of a hostPath volume is
|
||||
under `/dev`, and the path either corresponds to a host device or is not
|
||||
accessible by the Kata shim, the Kata agent bind mounts the source path
|
||||
directly from the *guest* filesystem into the container.
|
||||
under `/dev` (or `/dev` itself), and the path corresponds to a
|
||||
non-regular file (i.e., a device, directory, or any other special file)
|
||||
or is not accessible by the Kata shim, the Kata agent bind mounts the
|
||||
source path directly from the *guest* filesystem into the container.
|
||||
|
||||
[runtime-config]: /src/runtime/README.md#configuration
|
||||
[k8s-hostpath]: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
|
||||
@@ -226,6 +227,35 @@ Importantly, the default behavior to pass the host devices to a
|
||||
privileged container is not supported in Kata Containers and needs to be
|
||||
disabled, see [Privileged Kata Containers](how-to/privileged.md).
|
||||
|
||||
## Guest pulled container images
|
||||
|
||||
When using features like **nydus guest-pull**, set user/group IDs explicitly in the pod spec.
|
||||
If the ID values are omitted:
|
||||
|
||||
- Your workload might be executed with unexpected user/group ID values, because image layers
|
||||
may be unavailable to containerd, so image config (including user/group) is not applied.
|
||||
- If using policy or genpolicy, the generated policy may detect these unexpected values and
|
||||
reject the creation of workload containers.
|
||||
|
||||
Set `securityContext` explicitly. Use **pod-level** `spec.securityContext` (for Pods) or
|
||||
`spec.template.spec.securityContext` (for controllers like Deployments) and/or **container-level**
|
||||
`spec.containers[].securityContext`. Include at least:
|
||||
- `runAsUser` — primary user ID
|
||||
- `runAsGroup` — primary group ID
|
||||
- `fsGroup` — volume group ownership (often reflected as a supplemental group)
|
||||
- `supplementalGroups` — list of additional group IDs (if needed)
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
# Explicit user/group/supplementary groups to support nydus guest-pull
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
runAsGroup: 0
|
||||
fsGroup: 0
|
||||
supplementalGroups: [1, 2, 3, 4, 6, 10, 11, 20, 26, 27]
|
||||
```
|
||||
|
||||
# Appendices
|
||||
|
||||
## The constraints challenge
|
||||
|
||||
@@ -99,6 +99,9 @@ The [`genpolicy`](../../src/tools/genpolicy/) application can be used to generat
|
||||
|
||||
**Warning** Users should review carefully the automatically-generated Policy, and modify the Policy file if needed to match better their use case, before using this Policy.
|
||||
|
||||
**Important — User / Group / Supplemental groups for Policy and genpolicy**
|
||||
When using features like **nydus guest-pull**, set user/group IDs explicitly in the pod spec, as described in [Limitations](../Limitations.md#guest-pulled-container-images).
|
||||
|
||||
See the [`genpolicy` documentation](../../src/tools/genpolicy/README.md) and the [Policy contents examples](#policy-contents) for additional information.
|
||||
|
||||
## Policy contents
|
||||
|
||||
7
src/agent/Cargo.lock
generated
7
src/agent/Cargo.lock
generated
@@ -979,6 +979,12 @@ dependencies = [
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "data-encoding"
|
||||
version = "2.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
|
||||
|
||||
[[package]]
|
||||
name = "deranged"
|
||||
version = "0.5.5"
|
||||
@@ -3428,6 +3434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "843c3d97f07e3b5ac0955d53ad0af4c91fe4a4f8525843ece5bf014f27829b73"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"data-encoding",
|
||||
"lazy_static",
|
||||
"rand",
|
||||
"regex",
|
||||
|
||||
@@ -18,6 +18,8 @@ serde_json.workspace = true
|
||||
# Agent Policy
|
||||
regorus = { version = "0.2.8", default-features = false, features = [
|
||||
"arc",
|
||||
"base64",
|
||||
"base64url",
|
||||
"regex",
|
||||
"std",
|
||||
] }
|
||||
|
||||
@@ -2308,9 +2308,6 @@ fn is_sealed_secret_path(source_path: &str) -> bool {
|
||||
}
|
||||
|
||||
async fn cdh_handler_trusted_storage(oci: &mut Spec) -> Result<()> {
|
||||
if !confidential_data_hub::is_cdh_client_initialized() {
|
||||
return Ok(());
|
||||
}
|
||||
let linux = oci
|
||||
.linux()
|
||||
.as_ref()
|
||||
@@ -2320,26 +2317,8 @@ async fn cdh_handler_trusted_storage(oci: &mut Spec) -> Result<()> {
|
||||
for specdev in devices.iter() {
|
||||
if specdev.path().as_path().to_str() == Some(TRUSTED_IMAGE_STORAGE_DEVICE) {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major(), specdev.minor());
|
||||
let secure_storage_integrity = AGENT_CONFIG.secure_storage_integrity.to_string();
|
||||
info!(
|
||||
sl(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
secure_storage_integrity
|
||||
);
|
||||
|
||||
let options = std::collections::HashMap::from([
|
||||
("deviceId".to_string(), dev_major_minor),
|
||||
("encryptType".to_string(), "LUKS".to_string()),
|
||||
("dataIntegrity".to_string(), secure_storage_integrity),
|
||||
]);
|
||||
confidential_data_hub::secure_mount(
|
||||
"BlockDevice",
|
||||
&options,
|
||||
vec![],
|
||||
KATA_IMAGE_WORK_DIR,
|
||||
)
|
||||
.await?;
|
||||
cdh_secure_mount("BlockDevice", &dev_major_minor, "LUKS", KATA_IMAGE_WORK_DIR)
|
||||
.await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -2347,6 +2326,38 @@ async fn cdh_handler_trusted_storage(oci: &mut Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn cdh_secure_mount(
|
||||
device_type: &str,
|
||||
device_id: &str,
|
||||
encrypt_type: &str,
|
||||
mount_point: &str,
|
||||
) -> Result<()> {
|
||||
if !confidential_data_hub::is_cdh_client_initialized() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let integrity = AGENT_CONFIG.secure_storage_integrity.to_string();
|
||||
|
||||
info!(
|
||||
sl(),
|
||||
"cdh_secure_mount: device_type {}, device_id {}, encrypt_type {}, integrity {}",
|
||||
device_type,
|
||||
device_id,
|
||||
encrypt_type,
|
||||
integrity
|
||||
);
|
||||
|
||||
let options = std::collections::HashMap::from([
|
||||
("deviceId".to_string(), device_id.to_string()),
|
||||
("encryptType".to_string(), encrypt_type.to_string()),
|
||||
("dataIntegrity".to_string(), integrity),
|
||||
]);
|
||||
|
||||
confidential_data_hub::secure_mount(device_type, &options, vec![], mount_point).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cdh_handler_sealed_secrets(oci: &mut Spec) -> Result<()> {
|
||||
if !confidential_data_hub::is_cdh_client_initialized() {
|
||||
return Ok(());
|
||||
|
||||
@@ -65,6 +65,12 @@ type UeventWatcher = (Box<dyn UeventMatcher>, oneshot::Sender<Uevent>);
|
||||
pub struct StorageState {
|
||||
count: Arc<AtomicU32>,
|
||||
device: Arc<dyn StorageDevice>,
|
||||
|
||||
/// Whether the storage is shared across multiple containers (e.g.
|
||||
/// block-based emptyDirs). Shared storages should not be cleaned up
|
||||
/// when a container exits; cleanup happens only when the sandbox is
|
||||
/// destroyed.
|
||||
shared: bool,
|
||||
}
|
||||
|
||||
impl Debug for StorageState {
|
||||
@@ -74,17 +80,11 @@ impl Debug for StorageState {
|
||||
}
|
||||
|
||||
impl StorageState {
|
||||
fn new() -> Self {
|
||||
fn new(shared: bool) -> Self {
|
||||
StorageState {
|
||||
count: Arc::new(AtomicU32::new(1)),
|
||||
device: Arc::new(StorageDeviceGeneric::default()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_device(device: Arc<dyn StorageDevice>) -> Self {
|
||||
Self {
|
||||
count: Arc::new(AtomicU32::new(1)),
|
||||
device,
|
||||
shared,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,6 +92,10 @@ impl StorageState {
|
||||
self.device.path()
|
||||
}
|
||||
|
||||
pub fn is_shared(&self) -> bool {
|
||||
self.shared
|
||||
}
|
||||
|
||||
pub async fn ref_count(&self) -> u32 {
|
||||
self.count.load(Ordering::Relaxed)
|
||||
}
|
||||
@@ -171,8 +175,10 @@ impl Sandbox {
|
||||
|
||||
/// Add a new storage object or increase reference count of existing one.
|
||||
/// The caller may detect new storage object by checking `StorageState.refcount == 1`.
|
||||
/// The `shared` flag indicates if this storage is shared across multiple containers;
|
||||
/// if true, cleanup will be skipped when containers exit.
|
||||
#[instrument]
|
||||
pub async fn add_sandbox_storage(&mut self, path: &str) -> StorageState {
|
||||
pub async fn add_sandbox_storage(&mut self, path: &str, shared: bool) -> StorageState {
|
||||
match self.storages.entry(path.to_string()) {
|
||||
Entry::Occupied(e) => {
|
||||
let state = e.get().clone();
|
||||
@@ -180,7 +186,7 @@ impl Sandbox {
|
||||
state
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
let state = StorageState::new();
|
||||
let state = StorageState::new(shared);
|
||||
e.insert(state.clone());
|
||||
state
|
||||
}
|
||||
@@ -188,22 +194,32 @@ impl Sandbox {
|
||||
}
|
||||
|
||||
/// Update the storage device associated with a path.
|
||||
/// Preserves the existing shared flag and reference count.
|
||||
pub fn update_sandbox_storage(
|
||||
&mut self,
|
||||
path: &str,
|
||||
device: Arc<dyn StorageDevice>,
|
||||
) -> std::result::Result<Arc<dyn StorageDevice>, Arc<dyn StorageDevice>> {
|
||||
if !self.storages.contains_key(path) {
|
||||
return Err(device);
|
||||
match self.storages.get(path) {
|
||||
None => Err(device),
|
||||
Some(existing) => {
|
||||
let state = StorageState {
|
||||
device,
|
||||
..existing.clone()
|
||||
};
|
||||
// Safe to unwrap() because we have just ensured existence of entry via get().
|
||||
let state = self.storages.insert(path.to_string(), state).unwrap();
|
||||
Ok(state.device)
|
||||
}
|
||||
}
|
||||
|
||||
let state = StorageState::from_device(device);
|
||||
// Safe to unwrap() because we have just ensured existence of entry.
|
||||
let state = self.storages.insert(path.to_string(), state).unwrap();
|
||||
Ok(state.device)
|
||||
}
|
||||
|
||||
/// Decrease reference count and destroy the storage object if reference count reaches zero.
|
||||
///
|
||||
/// For shared storages (e.g., emptyDir volumes), cleanup is skipped even when refcount
|
||||
/// reaches zero. The storage entry is kept in the map so subsequent containers can reuse
|
||||
/// the already-mounted storage. Actual cleanup happens when the sandbox is destroyed.
|
||||
///
|
||||
/// Returns `Ok(true)` if the reference count has reached zero and the storage object has been
|
||||
/// removed.
|
||||
#[instrument]
|
||||
@@ -212,6 +228,10 @@ impl Sandbox {
|
||||
None => Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
Some(state) => {
|
||||
if state.dec_and_test_ref_count().await {
|
||||
if state.is_shared() {
|
||||
state.count.store(1, Ordering::Release);
|
||||
return Ok(false);
|
||||
}
|
||||
if let Some(storage) = self.storages.remove(path) {
|
||||
storage.device.cleanup()?;
|
||||
}
|
||||
@@ -720,7 +740,7 @@ mod tests {
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
// Add a new sandbox storage
|
||||
let new_storage = s.add_sandbox_storage(tmpdir_path).await;
|
||||
let new_storage = s.add_sandbox_storage(tmpdir_path, false).await;
|
||||
|
||||
// Check the reference counter
|
||||
let ref_count = new_storage.ref_count().await;
|
||||
@@ -730,7 +750,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Use the existing sandbox storage
|
||||
let new_storage = s.add_sandbox_storage(tmpdir_path).await;
|
||||
let new_storage = s.add_sandbox_storage(tmpdir_path, false).await;
|
||||
|
||||
// Since we are using existing storage, the reference counter
|
||||
// should be 2 by now.
|
||||
@@ -771,7 +791,7 @@ mod tests {
|
||||
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
|
||||
s.add_sandbox_storage(destdir_path).await;
|
||||
s.add_sandbox_storage(destdir_path, false).await;
|
||||
let storage = StorageDeviceGeneric::new(destdir_path.to_string());
|
||||
assert!(s
|
||||
.update_sandbox_storage(destdir_path, Arc::new(storage))
|
||||
@@ -789,7 +809,7 @@ mod tests {
|
||||
let other_dir_path = other_dir.path().to_str().unwrap();
|
||||
other_dir_str = other_dir_path.to_string();
|
||||
|
||||
s.add_sandbox_storage(other_dir_path).await;
|
||||
s.add_sandbox_storage(other_dir_path, false).await;
|
||||
let storage = StorageDeviceGeneric::new(other_dir_path.to_string());
|
||||
assert!(s
|
||||
.update_sandbox_storage(other_dir_path, Arc::new(storage))
|
||||
@@ -808,9 +828,9 @@ mod tests {
|
||||
let storage_path = "/tmp/testEphe";
|
||||
|
||||
// Add a new sandbox storage
|
||||
s.add_sandbox_storage(storage_path).await;
|
||||
s.add_sandbox_storage(storage_path, false).await;
|
||||
// Use the existing sandbox storage
|
||||
let state = s.add_sandbox_storage(storage_path).await;
|
||||
let state = s.add_sandbox_storage(storage_path, false).await;
|
||||
assert!(
|
||||
state.ref_count().await > 1,
|
||||
"Expects false as the storage is not new."
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
use crate::linux_abi::pcipath_from_dev_tree_path;
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::os::unix::fs::{MetadataExt, PermissionsExt};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -17,6 +17,7 @@ use kata_types::device::{
|
||||
DRIVER_BLK_MMIO_TYPE, DRIVER_BLK_PCI_TYPE, DRIVER_NVDIMM_TYPE, DRIVER_SCSI_TYPE,
|
||||
};
|
||||
use kata_types::mount::StorageDevice;
|
||||
use nix::sys::stat::{major, minor};
|
||||
use protocols::agent::Storage;
|
||||
use tracing::instrument;
|
||||
|
||||
@@ -29,10 +30,44 @@ use crate::device::block_device_handler::{
|
||||
};
|
||||
use crate::device::nvdimm_device_handler::wait_for_pmem_device;
|
||||
use crate::device::scsi_device_handler::get_scsi_device_name;
|
||||
use crate::storage::{common_storage_handler, new_device, StorageContext, StorageHandler};
|
||||
use crate::storage::{
|
||||
common_storage_handler, new_device, set_ownership, StorageContext, StorageHandler,
|
||||
};
|
||||
use slog::Logger;
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use std::str::FromStr;
|
||||
|
||||
fn get_device_number(dev_path: &str, metadata: Option<&fs::Metadata>) -> Result<String> {
|
||||
let dev_id = match metadata {
|
||||
Some(m) => m.rdev(),
|
||||
None => {
|
||||
let m =
|
||||
fs::metadata(dev_path).context(format!("get metadata on file {:?}", dev_path))?;
|
||||
m.rdev()
|
||||
}
|
||||
};
|
||||
Ok(format!("{}:{}", major(dev_id), minor(dev_id)))
|
||||
}
|
||||
|
||||
async fn handle_block_storage(
|
||||
logger: &Logger,
|
||||
storage: &Storage,
|
||||
dev_num: &str,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
let has_ephemeral_encryption = storage
|
||||
.driver_options
|
||||
.contains(&"encryption_key=ephemeral".to_string());
|
||||
|
||||
if has_ephemeral_encryption {
|
||||
crate::rpc::cdh_secure_mount("BlockDevice", dev_num, "LUKS", &storage.mount_point).await?;
|
||||
set_ownership(logger, storage)?;
|
||||
new_device(storage.mount_point.clone())
|
||||
} else {
|
||||
let path = common_storage_handler(logger, storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkMmioHandler {}
|
||||
|
||||
@@ -75,6 +110,8 @@ impl StorageHandler for VirtioBlkPciHandler {
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
let dev_num: String;
|
||||
|
||||
// If hot-plugged, get the device node path based on the PCI path
|
||||
// otherwise use the virt path provided in Storage Source
|
||||
if storage.source.starts_with("/dev") {
|
||||
@@ -84,15 +121,16 @@ impl StorageHandler for VirtioBlkPciHandler {
|
||||
if mode & libc::S_IFBLK == 0 {
|
||||
return Err(anyhow!("Invalid device {}", &storage.source));
|
||||
}
|
||||
dev_num = get_device_number(&storage.source, Some(&metadata))?;
|
||||
} else {
|
||||
let (root_complex, pcipath) = pcipath_from_dev_tree_path(&storage.source)?;
|
||||
let dev_path =
|
||||
get_virtio_blk_pci_device_name(ctx.sandbox, root_complex, &pcipath).await?;
|
||||
storage.source = dev_path;
|
||||
dev_num = get_device_number(&storage.source, None)?;
|
||||
}
|
||||
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
handle_block_storage(ctx.logger, &storage, &dev_num).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,10 +189,10 @@ impl StorageHandler for ScsiHandler {
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
// Retrieve the device path from SCSI address.
|
||||
let dev_path = get_scsi_device_name(ctx.sandbox, &storage.source).await?;
|
||||
storage.source = dev_path;
|
||||
storage.source = dev_path.clone();
|
||||
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
let dev_num = get_device_number(&dev_path, None)?;
|
||||
handle_block_storage(ctx.logger, &storage, &dev_num).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -172,7 +172,11 @@ pub async fn add_storages(
|
||||
|
||||
for storage in storages {
|
||||
let path = storage.mount_point.clone();
|
||||
let state = sandbox.lock().await.add_sandbox_storage(&path).await;
|
||||
let state = sandbox
|
||||
.lock()
|
||||
.await
|
||||
.add_sandbox_storage(&path, storage.shared)
|
||||
.await;
|
||||
if state.ref_count().await > 1 {
|
||||
if let Some(path) = state.path() {
|
||||
if !path.is_empty() {
|
||||
|
||||
@@ -242,7 +242,7 @@ mod tests {
|
||||
|
||||
let metrics = Arc::new(SerialDeviceMetrics::default());
|
||||
|
||||
let out: Arc<Mutex<Option<Box<(dyn std::io::Write + Send + 'static)>>>> =
|
||||
let out: Arc<Mutex<Option<Box<dyn std::io::Write + Send + 'static>>>> =
|
||||
Arc::new(Mutex::new(Some(Box::new(std::io::sink()))));
|
||||
let mut serial = SerialDevice {
|
||||
serial: Serial::with_events(
|
||||
|
||||
@@ -1174,7 +1174,6 @@ pub(crate) mod tests {
|
||||
use dbs_virtio_devices::Result as VirtIoResult;
|
||||
use dbs_virtio_devices::{
|
||||
ActivateResult, VirtioDeviceConfig, VirtioDeviceInfo, VirtioSharedMemory,
|
||||
DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FEATURES_OK, DEVICE_INIT,
|
||||
};
|
||||
|
||||
use dbs_address_space::{AddressSpaceLayout, AddressSpaceRegion, AddressSpaceRegionType};
|
||||
|
||||
@@ -99,76 +99,61 @@ impl Default for EpollManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
use vmm_sys_util::{epoll::EventSet, eventfd::EventFd};
|
||||
|
||||
struct DummySubscriber {
|
||||
pub event: EventFd,
|
||||
pub event: Arc<EventFd>,
|
||||
pub notify: std::sync::mpsc::Sender<()>,
|
||||
}
|
||||
|
||||
impl DummySubscriber {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
event: EventFd::new(0).unwrap(),
|
||||
}
|
||||
fn new(event: Arc<EventFd>, notify: std::sync::mpsc::Sender<()>) -> Self {
|
||||
Self { event, notify }
|
||||
}
|
||||
}
|
||||
|
||||
impl MutEventSubscriber for DummySubscriber {
|
||||
fn process(&mut self, events: Events, _ops: &mut EventOps) {
|
||||
let source = events.fd();
|
||||
let event_set = events.event_set();
|
||||
assert_ne!(source, self.event.as_raw_fd());
|
||||
match event_set {
|
||||
EventSet::IN => {
|
||||
unreachable!()
|
||||
}
|
||||
EventSet::OUT => {
|
||||
self.event.read().unwrap();
|
||||
}
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
fn init(&mut self, ops: &mut EventOps) {
|
||||
ops.add(Events::new(self.event.as_ref(), EventSet::IN))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn init(&mut self, _ops: &mut EventOps) {}
|
||||
fn process(&mut self, events: Events, _ops: &mut EventOps) {
|
||||
if events.fd() == self.event.as_raw_fd() && events.event_set().contains(EventSet::IN) {
|
||||
let _ = self.event.read();
|
||||
let _ = self.notify.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoll_manager() {
|
||||
let mut epoll_manager = EpollManager::default();
|
||||
let epoll_manager_clone = epoll_manager.clone();
|
||||
let thread = std::thread::spawn(move || loop {
|
||||
let count = epoll_manager_clone.handle_events(-1).unwrap();
|
||||
if count == 0 {
|
||||
continue;
|
||||
let epoll_manager = EpollManager::default();
|
||||
let (stop_tx, stop_rx) = channel::<()>();
|
||||
let worker_mgr = epoll_manager.clone();
|
||||
let worker = std::thread::spawn(move || {
|
||||
while stop_rx.try_recv().is_err() {
|
||||
let _ = worker_mgr.handle_events(50);
|
||||
}
|
||||
assert_eq!(count, 1);
|
||||
break;
|
||||
});
|
||||
let handler = DummySubscriber::new();
|
||||
let event = handler.event.try_clone().unwrap();
|
||||
|
||||
let (notify_tx, notify_rx) = channel::<()>();
|
||||
|
||||
let event = Arc::new(EventFd::new(0).unwrap());
|
||||
let handler = DummySubscriber::new(event.clone(), notify_tx);
|
||||
let id = epoll_manager.add_subscriber(Box::new(handler));
|
||||
|
||||
thread.join().unwrap();
|
||||
|
||||
epoll_manager
|
||||
.add_event(id, Events::new(&event, EventSet::OUT))
|
||||
.unwrap();
|
||||
event.write(1).unwrap();
|
||||
|
||||
let epoll_manager_clone = epoll_manager.clone();
|
||||
let thread = std::thread::spawn(move || loop {
|
||||
let count = epoll_manager_clone.handle_events(-1).unwrap();
|
||||
if count == 0 {
|
||||
continue;
|
||||
}
|
||||
assert_eq!(count, 2);
|
||||
break;
|
||||
});
|
||||
notify_rx
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("timeout waiting for subscriber to be processed");
|
||||
|
||||
thread.join().unwrap();
|
||||
epoll_manager.remove_subscriber(id).unwrap();
|
||||
epoll_manager.clone().remove_subscriber(id).unwrap();
|
||||
let _ = stop_tx.send(());
|
||||
worker.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -690,6 +690,15 @@ mod tests {
|
||||
use crate::tests::{create_address_space, create_vm_and_irq_manager};
|
||||
use crate::{create_queue_notifier, VirtioQueueConfig};
|
||||
|
||||
fn unique_tap_name(prefix: &str) -> String {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
static CNT: AtomicUsize = AtomicUsize::new(0);
|
||||
let n = CNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// "vtap" + pid(<=5) + n(<=3) => max len <= 15
|
||||
format!("{}{:x}{:x}", prefix, std::process::id() & 0xfff, n & 0xfff)
|
||||
}
|
||||
|
||||
fn create_vhost_kern_net_epoll_handler(
|
||||
id: String,
|
||||
) -> NetEpollHandler<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap> {
|
||||
@@ -723,13 +732,16 @@ mod tests {
|
||||
let guest_mac = MacAddr::parse_str(guest_mac_str).unwrap();
|
||||
let queue_sizes = Arc::new(vec![128]);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut dev: Net<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap> = Net::new(
|
||||
String::from("test_vhosttap"),
|
||||
Some(&guest_mac),
|
||||
queue_sizes,
|
||||
epoll_mgr,
|
||||
)
|
||||
.unwrap();
|
||||
let tap_name = unique_tap_name("vtap");
|
||||
let dev_result: VirtioResult<Net<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap>> =
|
||||
Net::new(tap_name.clone(), Some(&guest_mac), queue_sizes, epoll_mgr);
|
||||
let mut dev: Net<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap> = match dev_result {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
eprintln!("skip test: failed to create tap {}: {:?}", tap_name, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(dev.device_type(), TYPE_NET);
|
||||
|
||||
@@ -765,14 +777,16 @@ mod tests {
|
||||
{
|
||||
let queue_sizes = Arc::new(vec![128]);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut dev: Net<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap> = Net::new(
|
||||
String::from("test_vhosttap"),
|
||||
Some(&guest_mac),
|
||||
queue_sizes,
|
||||
epoll_mgr,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tap_name = unique_tap_name("vtap");
|
||||
let dev_result: VirtioResult<Net<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap>> =
|
||||
Net::new(tap_name.clone(), Some(&guest_mac), queue_sizes, epoll_mgr);
|
||||
let mut dev: Net<Arc<GuestMemoryMmap>, QueueSync, GuestRegionMmap> = match dev_result {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
eprintln!("skip test: failed to create tap {}: {:?}", tap_name, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let queues = vec![
|
||||
VirtioQueueConfig::create(128, 0).unwrap(),
|
||||
VirtioQueueConfig::create(128, 0).unwrap(),
|
||||
@@ -809,13 +823,17 @@ mod tests {
|
||||
let queue_eventfd2 = Arc::new(EventFd::new(0).unwrap());
|
||||
let queue_sizes = Arc::new(vec![128, 128]);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut dev: Net<Arc<GuestMemoryMmap>, Queue, GuestRegionMmap> = Net::new(
|
||||
String::from("test_vhosttap"),
|
||||
Some(&guest_mac),
|
||||
queue_sizes,
|
||||
epoll_mgr,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tap_name = unique_tap_name("vtap");
|
||||
let dev_result: VirtioResult<Net<Arc<GuestMemoryMmap>, Queue, GuestRegionMmap>> =
|
||||
Net::new(tap_name.clone(), Some(&guest_mac), queue_sizes, epoll_mgr);
|
||||
let mut dev: Net<Arc<GuestMemoryMmap>, Queue, GuestRegionMmap> = match dev_result {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
eprintln!("skip test: failed to create tap {}: {:?}", tap_name, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let queues = vec![
|
||||
VirtioQueueConfig::new(queue, queue_eventfd, notifier.clone(), 1),
|
||||
|
||||
@@ -590,6 +590,7 @@ where
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use dbs_device::resources::DeviceResources;
|
||||
use dbs_interrupt::{InterruptManager, InterruptSourceType, MsiNotifier, NoopNotifier};
|
||||
@@ -609,19 +610,16 @@ mod tests {
|
||||
};
|
||||
use crate::{VirtioDevice, VirtioDeviceConfig, VirtioQueueConfig, TYPE_NET};
|
||||
|
||||
fn connect_slave(path: &str) -> Option<Endpoint<MasterReq>> {
|
||||
let mut retry_count = 5;
|
||||
fn connect_slave(path: &str, timeout: Duration) -> Option<Endpoint<MasterReq>> {
|
||||
let deadline = Instant::now() + timeout;
|
||||
loop {
|
||||
match Endpoint::<MasterReq>::connect(path) {
|
||||
Ok(endpoint) => return Some(endpoint),
|
||||
Ok(ep) => return Some(ep),
|
||||
Err(_) => {
|
||||
if retry_count > 0 {
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
retry_count -= 1;
|
||||
continue;
|
||||
} else {
|
||||
if Instant::now() >= deadline {
|
||||
return None;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -639,62 +637,88 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_vhost_user_net_virtio_device_normal() {
|
||||
let device_socket = concat!("vhost.", line!());
|
||||
let queue_sizes = Arc::new(vec![128]);
|
||||
let dir_path = std::path::Path::new("/tmp");
|
||||
let socket_path = dir_path.join(format!(
|
||||
"vhost-user-net-{}-{:?}.sock",
|
||||
std::process::id(),
|
||||
thread::current().id()
|
||||
));
|
||||
let socket_str = socket_path.to_str().unwrap().to_string();
|
||||
|
||||
let _ = std::fs::remove_file(&socket_path);
|
||||
|
||||
let queue_sizes = Arc::new(vec![128u16]);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let handler = thread::spawn(move || {
|
||||
let mut slave = connect_slave(device_socket).unwrap();
|
||||
|
||||
let socket_for_slave = socket_str.clone();
|
||||
let slave_th = thread::spawn(move || {
|
||||
let mut slave = connect_slave(&socket_for_slave, Duration::from_secs(5))
|
||||
.unwrap_or_else(|| panic!("slave connect timeout: {}", socket_for_slave));
|
||||
create_vhost_user_net_slave(&mut slave);
|
||||
});
|
||||
let mut dev: VhostUserNet<Arc<GuestMemoryMmap>> =
|
||||
VhostUserNet::new_server(device_socket, None, queue_sizes, epoll_mgr).unwrap();
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
let socket_for_master = socket_str.clone();
|
||||
let queue_sizes_for_master = queue_sizes.clone();
|
||||
let epoll_mgr_for_master = epoll_mgr.clone();
|
||||
thread::spawn(move || {
|
||||
let res = VhostUserNet::<Arc<GuestMemoryMmap>>::new_server(
|
||||
&socket_for_master,
|
||||
None,
|
||||
queue_sizes_for_master,
|
||||
epoll_mgr_for_master,
|
||||
);
|
||||
let _ = tx.send(res);
|
||||
});
|
||||
|
||||
let dev_res = rx
|
||||
.recv_timeout(Duration::from_secs(5))
|
||||
.unwrap_or_else(|_| panic!("new_server() stuck/timeout: {}", socket_str));
|
||||
|
||||
let dev: VhostUserNet<Arc<GuestMemoryMmap>> = dev_res.unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"new_server() returned error: {:?}, socket={}",
|
||||
e, socket_str
|
||||
)
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::device_type(&dev),
|
||||
TYPE_NET
|
||||
);
|
||||
let queue_size = [128];
|
||||
|
||||
let queue_size = [128u16];
|
||||
assert_eq!(
|
||||
VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::queue_max_sizes(
|
||||
&dev
|
||||
),
|
||||
&queue_size[..]
|
||||
);
|
||||
assert_eq!(
|
||||
VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::get_avail_features(&dev, 0),
|
||||
dev.device().device_info.get_avail_features(0)
|
||||
);
|
||||
assert_eq!(
|
||||
VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::get_avail_features(&dev, 1),
|
||||
dev.device().device_info.get_avail_features(1)
|
||||
);
|
||||
assert_eq!(
|
||||
VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::get_avail_features(&dev, 2),
|
||||
dev.device().device_info.get_avail_features(2)
|
||||
);
|
||||
VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::set_acked_features(
|
||||
&mut dev, 2, 0,
|
||||
);
|
||||
assert_eq!(VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::get_avail_features(&dev, 2), 0);
|
||||
let config: [u8; 8] = [0; 8];
|
||||
let _ = VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::write_config(
|
||||
&mut dev, 0, &config,
|
||||
);
|
||||
let mut data: [u8; 8] = [1; 8];
|
||||
let _ = VirtioDevice::<Arc<GuestMemoryMmap<()>>, QueueSync, GuestRegionMmap>::read_config(
|
||||
&mut dev, 0, &mut data,
|
||||
);
|
||||
assert_eq!(config, data);
|
||||
handler.join().unwrap();
|
||||
|
||||
slave_th.join().unwrap();
|
||||
|
||||
let _ = std::fs::remove_file(&socket_path);
|
||||
drop(dev);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vhost_user_net_virtio_device_activate() {
|
||||
skip_if_kvm_unaccessable!();
|
||||
let device_socket = concat!("vhost.", line!());
|
||||
let queue_sizes = Arc::new(vec![128]);
|
||||
let dir_path = std::path::Path::new("/tmp");
|
||||
let socket_path = dir_path.join(format!(
|
||||
"vhost-user-net-{}-{:?}.sock",
|
||||
std::process::id(),
|
||||
thread::current().id()
|
||||
));
|
||||
let socket_str = socket_path.to_str().unwrap().to_string();
|
||||
let _ = std::fs::remove_file(&socket_path);
|
||||
|
||||
let queue_sizes = Arc::new(vec![128u16]);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let handler = thread::spawn(move || {
|
||||
let mut slave = connect_slave(device_socket).unwrap();
|
||||
let socket_for_slave = socket_str.clone();
|
||||
let slave_th = thread::spawn(move || {
|
||||
let mut slave = connect_slave(&socket_for_slave, Duration::from_secs(10))
|
||||
.unwrap_or_else(|| panic!("slave connect timeout: {}", socket_for_slave));
|
||||
create_vhost_user_net_slave(&mut slave);
|
||||
let mut pfeatures = VhostUserProtocolFeatures::all();
|
||||
// A workaround for no support for `INFLIGHT_SHMFD`. File an issue to track
|
||||
@@ -702,8 +726,30 @@ mod tests {
|
||||
pfeatures -= VhostUserProtocolFeatures::INFLIGHT_SHMFD;
|
||||
negotiate_slave(&mut slave, pfeatures, true, 1);
|
||||
});
|
||||
let mut dev: VhostUserNet<Arc<GuestMemoryMmap>> =
|
||||
VhostUserNet::new_server(device_socket, None, queue_sizes, epoll_mgr).unwrap();
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
let socket_for_master = socket_str.clone();
|
||||
let queue_sizes_for_master = queue_sizes.clone();
|
||||
let epoll_mgr_for_master = epoll_mgr.clone();
|
||||
thread::spawn(move || {
|
||||
let res = VhostUserNet::<Arc<GuestMemoryMmap>>::new_server(
|
||||
&socket_for_master,
|
||||
None,
|
||||
queue_sizes_for_master,
|
||||
epoll_mgr_for_master,
|
||||
);
|
||||
let _ = tx.send(res);
|
||||
});
|
||||
let mut dev: VhostUserNet<Arc<GuestMemoryMmap>> = rx
|
||||
.recv_timeout(Duration::from_secs(10))
|
||||
.unwrap_or_else(|_| panic!("new_server() stuck/timeout: {}", socket_str))
|
||||
.unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"new_server() returned error: {:?}, socket={}",
|
||||
e, socket_str
|
||||
)
|
||||
});
|
||||
|
||||
// invalid queue size
|
||||
{
|
||||
let kvm = Kvm::new().unwrap();
|
||||
@@ -760,6 +806,9 @@ mod tests {
|
||||
);
|
||||
dev.activate(config).unwrap();
|
||||
}
|
||||
handler.join().unwrap();
|
||||
slave_th.join().unwrap();
|
||||
|
||||
let _ = std::fs::remove_file(&socket_path);
|
||||
drop(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -867,56 +867,96 @@ mod tests {
|
||||
.set_read_timeout(Some(Duration::from_millis(150)))
|
||||
.is_ok());
|
||||
|
||||
let cond_pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
let cond_pair_2 = Arc::clone(&cond_pair);
|
||||
let handler = thread::Builder::new()
|
||||
.spawn(move || {
|
||||
// notify handler thread start
|
||||
let (lock, cvar) = &*cond_pair_2;
|
||||
let mut started = lock.lock().unwrap();
|
||||
*started = true;
|
||||
// stage:
|
||||
// 0 = handler started
|
||||
// 1 = first read timed out (main can do first write now)
|
||||
// 2 = timeout cancelled, handler is about to do 3rd blocking read
|
||||
let stage = Arc::new((Mutex::new(0u32), Condvar::new()));
|
||||
let stage2 = Arc::clone(&stage);
|
||||
|
||||
let handler = thread::spawn(move || {
|
||||
// notify started
|
||||
{
|
||||
let (lock, cvar) = &*stage2;
|
||||
let mut s = lock.lock().unwrap();
|
||||
*s = 0;
|
||||
cvar.notify_one();
|
||||
drop(started);
|
||||
}
|
||||
|
||||
let start_time1 = Instant::now();
|
||||
let mut reader_buf = [0; 5];
|
||||
// first read would timed out
|
||||
assert_eq!(
|
||||
outer_stream.read_exact(&mut reader_buf).unwrap_err().kind(),
|
||||
ErrorKind::TimedOut
|
||||
);
|
||||
let end_time1 = Instant::now().duration_since(start_time1).as_millis();
|
||||
assert!((150..250).contains(&end_time1));
|
||||
let mut reader_buf = [0u8; 5];
|
||||
|
||||
// second read would ok
|
||||
assert!(outer_stream.read_exact(&mut reader_buf).is_ok());
|
||||
assert_eq!(reader_buf, [1, 2, 3, 4, 5]);
|
||||
// 1) first read should timed out
|
||||
let start_time1 = Instant::now();
|
||||
assert_eq!(
|
||||
outer_stream.read_exact(&mut reader_buf).unwrap_err().kind(),
|
||||
ErrorKind::TimedOut
|
||||
);
|
||||
let end_time1 = start_time1.elapsed().as_millis();
|
||||
assert!((150..300).contains(&end_time1));
|
||||
|
||||
// cancel the read timeout
|
||||
let start_time2 = Instant::now();
|
||||
outer_stream.set_read_timeout(None).unwrap();
|
||||
assert!(outer_stream.read_exact(&mut reader_buf).is_ok());
|
||||
let end_time2 = Instant::now().duration_since(start_time2).as_millis();
|
||||
assert!(end_time2 >= 500);
|
||||
})
|
||||
.unwrap();
|
||||
outer_stream
|
||||
.set_read_timeout(Some(Duration::from_secs(10)))
|
||||
.unwrap();
|
||||
|
||||
// wait handler thread started
|
||||
let (lock, cvar) = &*cond_pair;
|
||||
let mut started = lock.lock().unwrap();
|
||||
while !*started {
|
||||
started = cvar.wait(started).unwrap();
|
||||
// notify main: timeout observed, now do first write
|
||||
{
|
||||
let (lock, cvar) = &*stage2;
|
||||
let mut s = lock.lock().unwrap();
|
||||
*s = 1;
|
||||
cvar.notify_one();
|
||||
}
|
||||
|
||||
// 2) second read should ok (main will write after stage==1)
|
||||
outer_stream.read_exact(&mut reader_buf).unwrap();
|
||||
assert_eq!(reader_buf, [1, 2, 3, 4, 5]);
|
||||
|
||||
// 3) cancel timeout, then do a blocking read; notify main before blocking
|
||||
outer_stream.set_read_timeout(None).unwrap();
|
||||
{
|
||||
let (lock, cvar) = &*stage2;
|
||||
let mut s = lock.lock().unwrap();
|
||||
*s = 2;
|
||||
cvar.notify_one();
|
||||
}
|
||||
|
||||
let start_time2 = Instant::now();
|
||||
outer_stream.read_exact(&mut reader_buf).unwrap();
|
||||
let end_time2 = start_time2.elapsed().as_millis();
|
||||
assert!(end_time2 >= 500);
|
||||
assert_eq!(reader_buf, [1, 2, 3, 4, 5]);
|
||||
});
|
||||
|
||||
// wait handler started (stage==0)
|
||||
{
|
||||
let (lock, cvar) = &*stage;
|
||||
let mut s = lock.lock().unwrap();
|
||||
while *s != 0 {
|
||||
s = cvar.wait(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// sleep 300ms, test timeout
|
||||
thread::sleep(Duration::from_millis(300));
|
||||
let writer_buf = [1, 2, 3, 4, 5];
|
||||
inner_stream.write_all(&writer_buf).unwrap();
|
||||
// wait first timeout done (stage==1), then do first write
|
||||
{
|
||||
let (lock, cvar) = &*stage;
|
||||
let mut s = lock.lock().unwrap();
|
||||
while *s < 1 {
|
||||
s = cvar.wait(s).unwrap();
|
||||
}
|
||||
}
|
||||
inner_stream.write_all(&[1, 2, 3, 4, 5]).unwrap();
|
||||
|
||||
// wait handler cancelled timeout and is about to block-read (stage==2)
|
||||
{
|
||||
let (lock, cvar) = &*stage;
|
||||
let mut s = lock.lock().unwrap();
|
||||
while *s < 2 {
|
||||
s = cvar.wait(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// sleep 500ms again, test cancel timeout
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
let writer_buf = [1, 2, 3, 4, 5];
|
||||
inner_stream.write_all(&writer_buf).unwrap();
|
||||
inner_stream.write_all(&[1, 2, 3, 4, 5]).unwrap();
|
||||
|
||||
handler.join().unwrap();
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ mod tests {
|
||||
|
||||
use libc::{cpu_set_t, syscall};
|
||||
use std::convert::TryInto;
|
||||
use std::{mem, process, thread};
|
||||
use std::{mem, thread};
|
||||
|
||||
use seccompiler::{apply_filter, BpfProgram, SeccompAction, SeccompFilter};
|
||||
|
||||
@@ -157,6 +157,16 @@ mod tests {
|
||||
let child = thread::spawn(move || {
|
||||
assert!(register_signal_handlers().is_ok());
|
||||
|
||||
// Trigger SIGBUS/SIGSEGV *before* installing the seccomp filter.
|
||||
// Call SIGBUS signal handler.
|
||||
assert_eq!(METRICS.read().unwrap().signals.sigbus.count(), 0);
|
||||
unsafe { libc::raise(SIGBUS) };
|
||||
|
||||
// Call SIGSEGV signal handler.
|
||||
assert_eq!(METRICS.read().unwrap().signals.sigsegv.count(), 0);
|
||||
unsafe { libc::raise(SIGSEGV) };
|
||||
|
||||
// Install a seccomp filter that traps a known syscall so that we can verify SIGSYS handling.
|
||||
let filter = SeccompFilter::new(
|
||||
vec![(libc::SYS_mkdirat, vec![])].into_iter().collect(),
|
||||
SeccompAction::Allow,
|
||||
@@ -168,20 +178,8 @@ mod tests {
|
||||
assert!(apply_filter(&TryInto::<BpfProgram>::try_into(filter).unwrap()).is_ok());
|
||||
assert_eq!(METRICS.read().unwrap().seccomp.num_faults.count(), 0);
|
||||
|
||||
// Call the blacklisted `SYS_mkdirat`.
|
||||
// Invoke the blacklisted syscall to trigger SIGSYS and exercise the SIGSYS handler.
|
||||
unsafe { syscall(libc::SYS_mkdirat, "/foo/bar\0") };
|
||||
|
||||
// Call SIGBUS signal handler.
|
||||
assert_eq!(METRICS.read().unwrap().signals.sigbus.count(), 0);
|
||||
unsafe {
|
||||
syscall(libc::SYS_kill, process::id(), SIGBUS);
|
||||
}
|
||||
|
||||
// Call SIGSEGV signal handler.
|
||||
assert_eq!(METRICS.read().unwrap().signals.sigsegv.count(), 0);
|
||||
unsafe {
|
||||
syscall(libc::SYS_kill, process::id(), SIGSEGV);
|
||||
}
|
||||
});
|
||||
assert!(child.join().is_ok());
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ use super::{default, register_hypervisor_plugin};
|
||||
use crate::config::default::MAX_CH_VCPUS;
|
||||
use crate::config::default::MIN_CH_MEMORY_SIZE_MB;
|
||||
|
||||
use crate::config::hypervisor::VIRTIO_BLK_MMIO;
|
||||
use crate::config::{ConfigPlugin, TomlConfig};
|
||||
use crate::{resolve_path, validate_path};
|
||||
|
||||
@@ -104,6 +105,16 @@ impl ConfigPlugin for CloudHypervisorConfig {
|
||||
));
|
||||
}
|
||||
|
||||
// CoCo guest hardening: virtio-mmio is not hardened for confidential computing.
|
||||
if ch.security_info.confidential_guest
|
||||
&& ch.boot_info.vm_rootfs_driver == VIRTIO_BLK_MMIO
|
||||
{
|
||||
return Err(std::io::Error::other(
|
||||
"Confidential guests must not use virtio-blk-mmio (use virtio-blk-pci); \
|
||||
virtio-mmio is not hardened for CoCo",
|
||||
));
|
||||
}
|
||||
|
||||
if ch.boot_info.kernel.is_empty() {
|
||||
return Err(std::io::Error::other("Guest kernel image for CH is empty"));
|
||||
}
|
||||
|
||||
@@ -124,6 +124,17 @@ impl ConfigPlugin for QemuConfig {
|
||||
));
|
||||
}
|
||||
|
||||
// CoCo guest hardening: virtio-mmio transport is not hardened for confidential
|
||||
// computing; only virtio-pci is. Ensure we never use virtio-blk-mmio for rootfs.
|
||||
if qemu.security_info.confidential_guest
|
||||
&& qemu.boot_info.vm_rootfs_driver == VIRTIO_BLK_MMIO
|
||||
{
|
||||
return Err(std::io::Error::other(
|
||||
"Confidential guests must not use virtio-blk-mmio (use virtio-blk-pci); \
|
||||
virtio-mmio is not hardened for CoCo",
|
||||
));
|
||||
}
|
||||
|
||||
if qemu.boot_info.kernel.is_empty() {
|
||||
return Err(std::io::Error::other(
|
||||
"Guest kernel image for qemu is empty",
|
||||
|
||||
@@ -520,6 +520,11 @@ message Storage {
|
||||
// FSGroup consists of the group ID and group ownership change policy
|
||||
// that the mounted volume must have its group ID changed to when specified.
|
||||
FSGroup fs_group = 7;
|
||||
// Shared indicates this storage is shared across multiple containers
|
||||
// (e.g., block-based emptyDirs). When true, the agent should not clean up
|
||||
// the storage when a container using it exits, as other containers
|
||||
// may still need it. Cleanup will happen when the sandbox is destroyed.
|
||||
bool shared = 8;
|
||||
}
|
||||
|
||||
// Device represents only the devices that could have been defined through the
|
||||
|
||||
@@ -470,7 +470,10 @@ impl CloudHypervisorInner {
|
||||
net_config.id = None;
|
||||
|
||||
net_config.num_queues = network_queues_pairs * 2;
|
||||
info!(sl!(), "network device queue pairs {:?}", network_queues_pairs);
|
||||
info!(
|
||||
sl!(),
|
||||
"network device queue pairs {:?}", network_queues_pairs
|
||||
);
|
||||
|
||||
// we need ensure opening network device happens in netns.
|
||||
let netns = self.netns.clone().unwrap_or_default();
|
||||
|
||||
@@ -9,8 +9,8 @@ use crate::device::topology::PCIePort;
|
||||
use crate::qemu::qmp::get_qmp_socket_path;
|
||||
use crate::{
|
||||
device::driver::ProtectionDeviceConfig, hypervisor_persist::HypervisorState, selinux,
|
||||
HypervisorConfig, MemoryConfig, VcpuThreadIds, VsockDevice, HYPERVISOR_QEMU,
|
||||
KATA_BLK_DEV_TYPE, KATA_CCW_DEV_TYPE, KATA_NVDIMM_DEV_TYPE, KATA_SCSI_DEV_TYPE,
|
||||
HypervisorConfig, MemoryConfig, VcpuThreadIds, VsockDevice, HYPERVISOR_QEMU, KATA_BLK_DEV_TYPE,
|
||||
KATA_CCW_DEV_TYPE, KATA_NVDIMM_DEV_TYPE, KATA_SCSI_DEV_TYPE,
|
||||
};
|
||||
|
||||
use crate::utils::{
|
||||
@@ -138,15 +138,16 @@ impl QemuInner {
|
||||
&block_dev.config.path_on_host,
|
||||
block_dev.config.is_readonly,
|
||||
)?,
|
||||
KATA_CCW_DEV_TYPE | KATA_BLK_DEV_TYPE | KATA_SCSI_DEV_TYPE => cmdline.add_block_device(
|
||||
block_dev.device_id.as_str(),
|
||||
&block_dev.config.path_on_host,
|
||||
block_dev
|
||||
.config
|
||||
.is_direct
|
||||
.unwrap_or(self.config.blockdev_info.block_device_cache_direct),
|
||||
block_dev.config.driver_option.as_str() == KATA_SCSI_DEV_TYPE,
|
||||
)?,
|
||||
KATA_CCW_DEV_TYPE | KATA_BLK_DEV_TYPE | KATA_SCSI_DEV_TYPE => cmdline
|
||||
.add_block_device(
|
||||
block_dev.device_id.as_str(),
|
||||
&block_dev.config.path_on_host,
|
||||
block_dev
|
||||
.config
|
||||
.is_direct
|
||||
.unwrap_or(self.config.blockdev_info.block_device_cache_direct),
|
||||
block_dev.config.driver_option.as_str() == KATA_SCSI_DEV_TYPE,
|
||||
)?,
|
||||
unsupported => {
|
||||
info!(sl!(), "unsupported block device driver: {}", unsupported)
|
||||
}
|
||||
|
||||
@@ -187,11 +187,21 @@ impl Qmp {
|
||||
continue;
|
||||
}
|
||||
(None, _) => {
|
||||
warn!(sl!(), "hotpluggable vcpu {} has no socket_id for driver {}, skipping", core_id, driver);
|
||||
warn!(
|
||||
sl!(),
|
||||
"hotpluggable vcpu {} has no socket_id for driver {}, skipping",
|
||||
core_id,
|
||||
driver
|
||||
);
|
||||
continue;
|
||||
}
|
||||
(_, None) => {
|
||||
warn!(sl!(), "hotpluggable vcpu {} has no thread_id for driver {}, skipping", core_id, driver);
|
||||
warn!(
|
||||
sl!(),
|
||||
"hotpluggable vcpu {} has no thread_id for driver {}, skipping",
|
||||
core_id,
|
||||
driver
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -753,10 +763,9 @@ impl Qmp {
|
||||
|
||||
Ok((None, Some(scsi_addr)))
|
||||
} else if block_driver == VIRTIO_BLK_CCW {
|
||||
let subchannel = self
|
||||
.ccw_subchannel
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("CCW subchannel not available for virtio-blk-ccw hotplug"))?;
|
||||
let subchannel = self.ccw_subchannel.as_mut().ok_or_else(|| {
|
||||
anyhow!("CCW subchannel not available for virtio-blk-ccw hotplug")
|
||||
})?;
|
||||
|
||||
let slot = subchannel
|
||||
.add_device(&node_name)
|
||||
|
||||
@@ -11,6 +11,7 @@ lazy_static = { workspace = true }
|
||||
netns-rs = { workspace = true }
|
||||
slog = { workspace = true }
|
||||
slog-scope = { workspace = true }
|
||||
containerd-shim-protos = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-opentelemetry = { workspace = true }
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use containerd_shim_protos::events::task::{TaskExit, TaskOOM};
|
||||
use containerd_shim_protos::events::task::{TaskCreate, TaskDelete, TaskExit, TaskOOM, TaskStart};
|
||||
use containerd_shim_protos::protobuf::Message as ProtobufMessage;
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
|
||||
@@ -49,9 +49,15 @@ impl Message {
|
||||
|
||||
const TASK_OOM_EVENT_TOPIC: &str = "/tasks/oom";
|
||||
const TASK_EXIT_EVENT_TOPIC: &str = "/tasks/exit";
|
||||
const TASK_START_EVENT_TOPIC: &str = "/tasks/start";
|
||||
const TASK_CREATE_EVENT_TOPIC: &str = "/tasks/create";
|
||||
const TASK_DELETE_EVENT_TOPIC: &str = "/tasks/delete";
|
||||
|
||||
const TASK_OOM_EVENT_URL: &str = "containerd.events.TaskOOM";
|
||||
const TASK_EXIT_EVENT_URL: &str = "containerd.events.TaskExit";
|
||||
const TASK_START_EVENT_URL: &str = "containerd.events.TaskStart";
|
||||
const TASK_CREATE_EVENT_URL: &str = "containerd.events.TaskCreate";
|
||||
const TASK_DELETE_EVENT_URL: &str = "containerd.events.TaskDelete";
|
||||
|
||||
pub trait Event: std::fmt::Debug + Send {
|
||||
fn r#type(&self) -> String;
|
||||
@@ -86,3 +92,45 @@ impl Event for TaskExit {
|
||||
self.write_to_bytes().context("get exit value")
|
||||
}
|
||||
}
|
||||
|
||||
impl Event for TaskStart {
|
||||
fn r#type(&self) -> String {
|
||||
TASK_START_EVENT_TOPIC.to_string()
|
||||
}
|
||||
|
||||
fn type_url(&self) -> String {
|
||||
TASK_START_EVENT_URL.to_string()
|
||||
}
|
||||
|
||||
fn value(&self) -> Result<Vec<u8>> {
|
||||
self.write_to_bytes().context("get start value")
|
||||
}
|
||||
}
|
||||
|
||||
impl Event for TaskCreate {
|
||||
fn r#type(&self) -> String {
|
||||
TASK_CREATE_EVENT_TOPIC.to_string()
|
||||
}
|
||||
|
||||
fn type_url(&self) -> String {
|
||||
TASK_CREATE_EVENT_URL.to_string()
|
||||
}
|
||||
|
||||
fn value(&self) -> Result<Vec<u8>> {
|
||||
self.write_to_bytes().context("get create value")
|
||||
}
|
||||
}
|
||||
|
||||
impl Event for TaskDelete {
|
||||
fn r#type(&self) -> String {
|
||||
TASK_DELETE_EVENT_TOPIC.to_string()
|
||||
}
|
||||
|
||||
fn type_url(&self) -> String {
|
||||
TASK_DELETE_EVENT_URL.to_string()
|
||||
}
|
||||
|
||||
fn value(&self) -> Result<Vec<u8>> {
|
||||
self.write_to_bytes().context("get delete value")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,14 +6,16 @@
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use common::{
|
||||
message::Message,
|
||||
message::{Action, Message},
|
||||
types::{
|
||||
ContainerProcess, PlatformInfo, SandboxConfig, SandboxRequest, SandboxResponse,
|
||||
SandboxStatusInfo, StartSandboxInfo, TaskRequest, TaskResponse, DEFAULT_SHM_SIZE,
|
||||
ContainerProcess, PlatformInfo, ProcessType, SandboxConfig, SandboxRequest,
|
||||
SandboxResponse, SandboxStatusInfo, StartSandboxInfo, TaskRequest, TaskResponse,
|
||||
DEFAULT_SHM_SIZE,
|
||||
},
|
||||
RuntimeHandler, RuntimeInstance, Sandbox, SandboxNetworkEnv,
|
||||
};
|
||||
|
||||
use containerd_shim_protos::events::task::{TaskCreate, TaskDelete, TaskStart};
|
||||
use hypervisor::{
|
||||
utils::{create_dir_all_with_inherit_owner, create_vmm_user, remove_vmm_user},
|
||||
Param,
|
||||
@@ -33,13 +35,13 @@ use netns_rs::{Env, NetNs};
|
||||
use nix::{sys::statfs, unistd::User};
|
||||
use oci_spec::runtime as oci;
|
||||
use persist::sandbox_persist::Persist;
|
||||
use protobuf::Message as ProtobufMessage;
|
||||
use resource::{
|
||||
cpu_mem::initial_size::InitialSizeManager,
|
||||
network::{dan_config_path, generate_netns_name},
|
||||
};
|
||||
use runtime_spec as spec;
|
||||
use shim_interface::shim_mgmt::ERR_NO_SHIM_SERVER;
|
||||
use protobuf::Message as ProtobufMessage;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
env,
|
||||
@@ -480,6 +482,7 @@ impl RuntimeHandlerManager {
|
||||
.await
|
||||
.context("start sandbox in task handler")?;
|
||||
|
||||
let bundle = container_config.bundle.clone();
|
||||
let container_id = container_config.container_id.clone();
|
||||
let shim_pid = instance
|
||||
.container_manager
|
||||
@@ -501,6 +504,19 @@ impl RuntimeHandlerManager {
|
||||
}
|
||||
});
|
||||
|
||||
let msg_sender = self.inner.read().await.msg_sender.clone();
|
||||
let event = TaskCreate {
|
||||
container_id,
|
||||
bundle,
|
||||
pid,
|
||||
..Default::default()
|
||||
};
|
||||
let msg = Message::new(Action::Event(Arc::new(event)));
|
||||
msg_sender
|
||||
.send(msg)
|
||||
.await
|
||||
.context("send task create event")?;
|
||||
|
||||
Ok(TaskResponse::CreateContainer(shim_pid))
|
||||
} else {
|
||||
self.handler_task_request(req)
|
||||
@@ -570,6 +586,7 @@ impl RuntimeHandlerManager {
|
||||
.context("get runtime instance")?;
|
||||
let sandbox = instance.sandbox.clone();
|
||||
let cm = instance.container_manager.clone();
|
||||
let msg_sender = self.inner.read().await.msg_sender.clone();
|
||||
|
||||
match req {
|
||||
TaskRequest::CreateContainer(req) => Err(anyhow!("Unreachable TaskRequest {:?}", req)),
|
||||
@@ -579,6 +596,20 @@ impl RuntimeHandlerManager {
|
||||
}
|
||||
TaskRequest::DeleteProcess(process_id) => {
|
||||
let resp = cm.delete_process(&process_id).await.context("do delete")?;
|
||||
if process_id.process_type == ProcessType::Container {
|
||||
let event = TaskDelete {
|
||||
id: process_id.container_id().to_string(),
|
||||
pid: resp.pid.pid,
|
||||
exit_status: resp.exit_status as u32,
|
||||
..Default::default()
|
||||
};
|
||||
let msg = Message::new(Action::Event(Arc::new(event)));
|
||||
msg_sender
|
||||
.send(msg)
|
||||
.await
|
||||
.context("send task delete event")?;
|
||||
}
|
||||
|
||||
Ok(TaskResponse::DeleteProcess(resp))
|
||||
}
|
||||
TaskRequest::ExecProcess(req) => {
|
||||
@@ -614,12 +645,28 @@ impl RuntimeHandlerManager {
|
||||
.context("start process")?;
|
||||
|
||||
let pid = shim_pid.pid;
|
||||
let process_type = process_id.process_type;
|
||||
let container_id = process_id.container_id().to_string();
|
||||
tokio::spawn(async move {
|
||||
let result = sandbox.wait_process(cm, process_id, pid).await;
|
||||
if let Err(e) = result {
|
||||
error!(sl!(), "sandbox wait process error: {:?}", e);
|
||||
}
|
||||
});
|
||||
|
||||
if process_type == ProcessType::Container {
|
||||
let event = TaskStart {
|
||||
container_id,
|
||||
pid,
|
||||
..Default::default()
|
||||
};
|
||||
let msg = Message::new(Action::Event(Arc::new(event)));
|
||||
msg_sender
|
||||
.send(msg)
|
||||
.await
|
||||
.context("send task start event")?;
|
||||
}
|
||||
|
||||
Ok(TaskResponse::StartProcess(shim_pid))
|
||||
}
|
||||
|
||||
|
||||
@@ -147,10 +147,14 @@ DEFROOTFSTYPE := $(ROOTFSTYPE_EXT4)
|
||||
FIRMWAREPATH :=
|
||||
FIRMWAREVOLUMEPATH :=
|
||||
|
||||
FIRMWAREPATH_NV = $(FIRMWAREPATH)
|
||||
|
||||
FIRMWARETDVFPATH := $(PREFIXDEPS)/share/ovmf/OVMF.inteltdx.fd
|
||||
FIRMWARETDVFPATH_NV := $(FIRMWARETDVFPATH)
|
||||
FIRMWARETDVFVOLUMEPATH :=
|
||||
|
||||
FIRMWARESNPPATH := $(PREFIXDEPS)/share/ovmf/AMDSEV.fd
|
||||
FIRMWARESNPPATH_NV := $(FIRMWARESNPPATH)
|
||||
|
||||
KERNELVERITYPARAMS ?= ""
|
||||
KERNELVERITYPARAMS_NV ?= ""
|
||||
@@ -221,6 +225,8 @@ DEFENABLEANNOTATIONS := [\"enable_iommu\", \"virtio_fs_extra_args\", \"kernel_pa
|
||||
DEFENABLEANNOTATIONS_COCO := [\"enable_iommu\", \"virtio_fs_extra_args\", \"kernel_params\", \"kernel_verity_params\", \"default_vcpus\", \"default_memory\", \"cc_init_data\"]
|
||||
DEFDISABLEGUESTSECCOMP := true
|
||||
DEFDISABLEGUESTEMPTYDIR := false
|
||||
DEFEMPTYDIRMODE := shared-fs
|
||||
DEFEMPTYDIRMODE_COCO := block-encrypted
|
||||
#Default experimental features enabled
|
||||
DEFAULTEXPFEATURES := []
|
||||
|
||||
@@ -301,9 +307,11 @@ DEFDANCONF := /run/kata-containers/dans
|
||||
|
||||
DEFFORCEGUESTPULL := false
|
||||
|
||||
DEFKUBELETROOTDIR := /var/lib/kubelet
|
||||
|
||||
# Device cold plug
|
||||
DEFPODRESOURCEAPISOCK := ""
|
||||
DEFPODRESOURCEAPISOCK_NV := "/var/lib/kubelet/pod-resources/kubelet.sock"
|
||||
DEFPODRESOURCEAPISOCK_NV := "$(DEFKUBELETROOTDIR)/pod-resources/kubelet.sock"
|
||||
|
||||
SED = sed
|
||||
|
||||
@@ -468,8 +476,8 @@ ifneq (,$(QEMUCMD))
|
||||
KERNELSEPATH = $(KERNELDIR)/$(KERNELSENAME)
|
||||
|
||||
# NVIDIA GPU specific options (all should be suffixed by _NV)
|
||||
# Normal: uncompressed (KERNELTYPE). Confidential: compressed (KERNELCONFIDENTIALTYPE).
|
||||
KERNELNAME_NV = $(call MAKE_KERNEL_NAME_NV,$(KERNELTYPE))
|
||||
KERNELTYPE_NV = compressed
|
||||
KERNELNAME_NV = $(call MAKE_KERNEL_NAME_NV,$(KERNELTYPE_NV))
|
||||
KERNELPATH_NV = $(KERNELDIR)/$(KERNELNAME_NV)
|
||||
KERNELNAME_CONFIDENTIAL_NV = $(call MAKE_KERNEL_NAME_NV,$(KERNELCONFIDENTIALTYPE))
|
||||
KERNELPATH_CONFIDENTIAL_NV = $(KERNELDIR)/$(KERNELNAME_CONFIDENTIAL_NV)
|
||||
@@ -485,6 +493,9 @@ ifneq (,$(QEMUCMD))
|
||||
# using an image and /dev is already mounted.
|
||||
KERNELPARAMS_NV = "cgroup_no_v1=all"
|
||||
KERNELPARAMS_NV += "devtmpfs.mount=0"
|
||||
KERNELPARAMS_NV += "pci=realloc"
|
||||
KERNELPARAMS_NV += "pci=nocrs"
|
||||
KERNELPARAMS_NV += "pci=assign-busses"
|
||||
|
||||
# Setting this to false can lead to cgroup leakages in the host
|
||||
# Best practice for production is to set this to true
|
||||
@@ -681,10 +692,13 @@ USER_VARS += KERNELPATH_FC
|
||||
USER_VARS += KERNELPATH_STRATOVIRT
|
||||
USER_VARS += KERNELVIRTIOFSPATH
|
||||
USER_VARS += FIRMWAREPATH
|
||||
USER_VARS += FIRMWAREPATH_NV
|
||||
USER_VARS += FIRMWARETDVFPATH
|
||||
USER_VARS += FIRMWAREVOLUMEPATH
|
||||
USER_VARS += FIRMWARETDVFVOLUMEPATH
|
||||
USER_VARS += FIRMWARESNPPATH
|
||||
USER_VARS += FIRMWARETDVFPATH_NV
|
||||
USER_VARS += FIRMWARESNPPATH_NV
|
||||
USER_VARS += MACHINEACCELERATORS
|
||||
USER_VARS += CPUFEATURES
|
||||
USER_VARS += TDXCPUFEATURES
|
||||
@@ -738,6 +752,8 @@ USER_VARS += DEFNETWORKMODEL_FC
|
||||
USER_VARS += DEFNETWORKMODEL_QEMU
|
||||
USER_VARS += DEFNETWORKMODEL_STRATOVIRT
|
||||
USER_VARS += DEFDISABLEGUESTEMPTYDIR
|
||||
USER_VARS += DEFEMPTYDIRMODE
|
||||
USER_VARS += DEFEMPTYDIRMODE_COCO
|
||||
USER_VARS += DEFDISABLEGUESTSECCOMP
|
||||
USER_VARS += DEFDISABLESELINUX
|
||||
USER_VARS += DEFDISABLEGUESTSELINUX
|
||||
@@ -785,6 +801,7 @@ USER_VARS += DEFSTATICRESOURCEMGMT_NV
|
||||
USER_VARS += DEFBINDMOUNTS
|
||||
USER_VARS += DEFCREATECONTAINERTIMEOUT
|
||||
USER_VARS += DEFDANCONF
|
||||
USER_VARS += DEFKUBELETROOTDIR
|
||||
USER_VARS += DEFFORCEGUESTPULL
|
||||
USER_VARS += DEFVFIOMODE
|
||||
USER_VARS += DEFVFIOMODE_SE
|
||||
|
||||
@@ -463,6 +463,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -491,6 +503,11 @@ create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
# (default: /run/kata-containers/dans)
|
||||
dan_conf = "@DEFDANCONF@"
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -354,6 +354,18 @@ static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_FC@
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -382,6 +394,11 @@ create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
# (default: /run/kata-containers/dans)
|
||||
dan_conf = "@DEFDANCONF@"
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -638,6 +638,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -670,6 +682,12 @@ dan_conf = "@DEFDANCONF@"
|
||||
# the container image should be pulled in the guest, without using an external snapshotter.
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -701,6 +701,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE_COCO@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -734,6 +746,11 @@ dan_conf = "@DEFDANCONF@"
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -99,7 +99,7 @@ kernel_verity_params = "@KERNELVERITYPARAMS_CONFIDENTIAL_NV@"
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want that qemu uses the default firmware leave this option empty
|
||||
firmware = "@FIRMWARESNPPATH@"
|
||||
firmware = "@FIRMWARESNPPATH_NV@"
|
||||
|
||||
# Path to the firmware volume.
|
||||
# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables
|
||||
@@ -717,6 +717,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -750,6 +762,11 @@ dan_conf = "@DEFDANCONF@"
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -76,7 +76,7 @@ kernel_verity_params = "@KERNELVERITYPARAMS_CONFIDENTIAL_NV@"
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want that qemu uses the default firmware leave this option empty
|
||||
firmware = "@FIRMWARETDVFPATH@"
|
||||
firmware = "@FIRMWARETDVFPATH_NV@"
|
||||
|
||||
# Path to the firmware volume.
|
||||
# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables
|
||||
@@ -694,6 +694,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -727,6 +739,11 @@ dan_conf = "@DEFDANCONF@"
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -58,7 +58,7 @@ kernel_verity_params = "@KERNELVERITYPARAMS_NV@"
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want that qemu uses the default firmware leave this option empty
|
||||
firmware = "@FIRMWAREPATH@"
|
||||
firmware = "@FIRMWAREPATH_NV@"
|
||||
|
||||
# Path to the firmware volume.
|
||||
# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables
|
||||
@@ -696,6 +696,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -724,6 +736,11 @@ create_container_timeout = @DEFAULTTIMEOUT_NV@
|
||||
# (default: /run/kata-containers/dans)
|
||||
dan_conf = "@DEFDANCONF@"
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -679,6 +679,18 @@ vfio_mode = "@DEFVFIOMODE_SE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -712,6 +724,11 @@ dan_conf = "@DEFDANCONF@"
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -704,6 +704,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE_COCO@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -737,6 +749,11 @@ dan_conf = "@DEFDANCONF@"
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -686,6 +686,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE_COCO@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -719,6 +731,11 @@ dan_conf = "@DEFDANCONF@"
|
||||
# This is an experimental feature and might be removed in the future.
|
||||
experimental_force_guest_pull = @DEFFORCEGUESTPULL@
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -695,6 +695,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -723,6 +735,11 @@ create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
# (default: /run/kata-containers/dans)
|
||||
dan_conf = "@DEFDANCONF@"
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -262,6 +262,18 @@ vfio_mode = "@DEFVFIOMODE@"
|
||||
# Note: remote hypervisor has no sharing of emptydir mounts from host to guest
|
||||
disable_guest_empty_dir = false
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -290,6 +302,11 @@ create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
# (default: /run/kata-containers/dans)
|
||||
dan_conf = "@DEFDANCONF@"
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -397,6 +397,18 @@ static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_STRATOVIRT@
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Specifies how Kubernetes emptyDir volumes are handled.
|
||||
# Options:
|
||||
#
|
||||
# - shared-fs (default)
|
||||
# Shares the emptyDir folder with the guest using the method given
|
||||
# by the `shared_fs` setting.
|
||||
#
|
||||
# - block-encrypted
|
||||
# Plugs a block device to be encrypted in the guest.
|
||||
#
|
||||
emptydir_mode = "@DEFEMPTYDIRMODE@"
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
@@ -425,6 +437,11 @@ create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
# (default: /run/kata-containers/dans)
|
||||
dan_conf = "@DEFDANCONF@"
|
||||
|
||||
# kubelet_root_dir is the kubelet root directory used to match ConfigMap/Secret
|
||||
# volume paths for propagation. Override for distros that use a different path
|
||||
# (e.g. k0s: /var/lib/k0s/kubelet).
|
||||
kubelet_root_dir = "@DEFKUBELETROOTDIR@"
|
||||
|
||||
# pod_resource_api_sock specifies the unix socket for the Kubelet's
|
||||
# PodResource API endpoint. If empty, kubernetes based cold plug
|
||||
# will not be attempted. In order for this feature to work, the
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
module github.com/kata-containers/kata-containers/src/runtime
|
||||
|
||||
// Keep in sync with version in versions.yaml
|
||||
go 1.24.13
|
||||
go 1.25.7
|
||||
|
||||
// WARNING: Do NOT use `replace` directives as those break dependabot:
|
||||
// https://github.com/kata-containers/kata-containers/issues/11020
|
||||
@@ -26,7 +26,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/go-openapi/errors v0.22.6
|
||||
github.com/go-openapi/errors v0.22.1
|
||||
github.com/go-openapi/runtime v0.28.0
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-openapi/swag v0.23.1
|
||||
@@ -52,11 +52,10 @@ require (
|
||||
github.com/urfave/cli v1.22.17
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
github.com/vishvananda/netns v0.0.5
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220601114329-47893b162965
|
||||
go.opentelemetry.io/otel v1.35.0
|
||||
go.opentelemetry.io/otel v1.40.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
|
||||
go.opentelemetry.io/otel/sdk v1.35.0
|
||||
go.opentelemetry.io/otel/trace v1.35.0
|
||||
go.opentelemetry.io/otel/sdk v1.40.0
|
||||
go.opentelemetry.io/otel/trace v1.40.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sys v0.40.0
|
||||
google.golang.org/grpc v1.72.0
|
||||
@@ -127,9 +126,9 @@ require (
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect
|
||||
golang.org/x/mod v0.31.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
|
||||
@@ -107,8 +107,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
|
||||
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
|
||||
github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo=
|
||||
github.com/go-openapi/errors v0.22.6/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk=
|
||||
github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU=
|
||||
github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
@@ -123,8 +123,6 @@ github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMg
|
||||
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
|
||||
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
|
||||
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
|
||||
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
|
||||
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
|
||||
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
|
||||
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
|
||||
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
|
||||
@@ -268,8 +266,8 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.6.2 h1:O3ZPFAKEUEfbtE6J/feEe2Ft7dIJ2Sy8t4SdMRiIMHY=
|
||||
@@ -311,31 +309,29 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220601114329-47893b162965 h1:EXE1ZsUqiUWGV5Dw2oTYpXx24ffxj0//yhTB0Ppv+4s=
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220601114329-47893b162965/go.mod h1:TBB3sR7/jg4RCThC/cgT4fB8mAbbMO307TycfgeR59w=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
|
||||
go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0 h1:cLhx8llHw02h5JTqGqaRbYn+QVKHmrzD9vEbKnSPk5U=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0/go.mod h1:q10N1AolE1JjqKrFJK2tYw0iZpmX+HBaXBtuCzRnBGQ=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.0.0/go.mod h1:PCrDHlSy5x1kjezSdL37PhbFUMjrsLRshJ2zCzeXwbM=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
const (
|
||||
mountInfoFileName = "mountInfo.json"
|
||||
|
||||
EncryptionKeyMetadataKey = "encryptionKey"
|
||||
FSGroupMetadataKey = "fsGroup"
|
||||
FSGroupChangePolicyMetadataKey = "fsGroupChangePolicy"
|
||||
)
|
||||
@@ -77,6 +78,14 @@ func Add(volumePath string, mountInfo string) error {
|
||||
return os.WriteFile(filepath.Join(volumeDir, mountInfoFileName), []byte(mountInfo), 0600)
|
||||
}
|
||||
|
||||
func AddMountInfo(volumePath string, mountInfo MountInfo) error {
|
||||
s, err := json.Marshal(&mountInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Add(volumePath, string(s))
|
||||
}
|
||||
|
||||
// Remove deletes the direct volume path including all the files inside it.
|
||||
func Remove(volumePath string) error {
|
||||
return os.RemoveAll(filepath.Join(kataDirectVolumeRootPath, b64.URLEncoding.EncodeToString([]byte(volumePath))))
|
||||
@@ -99,7 +108,18 @@ func VolumeMountInfo(volumePath string) (*MountInfo, error) {
|
||||
return &mountInfo, nil
|
||||
}
|
||||
|
||||
// RecordSandboxID associates a sandbox id with a direct volume.
|
||||
// IsVolumeMounted returns whether the direct volume mount is present.
|
||||
func IsVolumeMounted(volumePath string) (bool, error) {
|
||||
if _, err := VolumeMountInfo(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// RecordSandboxId associates a sandbox id with a direct volume.
|
||||
func RecordSandboxID(sandboxID string, volumePath string) error {
|
||||
encodedPath := b64.URLEncoding.EncodeToString([]byte(volumePath))
|
||||
mountInfoFilePath := filepath.Join(kataDirectVolumeRootPath, encodedPath, mountInfoFileName)
|
||||
|
||||
@@ -197,10 +197,28 @@ type runtime struct {
|
||||
StaticSandboxResourceMgmt bool `toml:"static_sandbox_resource_mgmt"`
|
||||
EnablePprof bool `toml:"enable_pprof"`
|
||||
DisableGuestEmptyDir bool `toml:"disable_guest_empty_dir"`
|
||||
EmptyDirMode string `toml:"emptydir_mode"`
|
||||
CreateContainerTimeout uint64 `toml:"create_container_timeout"`
|
||||
DanConf string `toml:"dan_conf"`
|
||||
ForceGuestPull bool `toml:"experimental_force_guest_pull"`
|
||||
PodResourceAPISock string `toml:"pod_resource_api_sock"`
|
||||
KubeletRootDir string `toml:"kubelet_root_dir"`
|
||||
}
|
||||
|
||||
// emptyDirMode returns a valid emptydir_mode value, defaulting to shared-fs
|
||||
// if the TOML field is unset.
|
||||
func (r runtime) emptyDirMode() (string, error) {
|
||||
if r.EmptyDirMode == "" {
|
||||
return vc.EmptyDirModeSharedFs, nil
|
||||
}
|
||||
|
||||
switch r.EmptyDirMode {
|
||||
case vc.EmptyDirModeSharedFs, vc.EmptyDirModeVirtioBlkEncrypted:
|
||||
return r.EmptyDirMode, nil
|
||||
default:
|
||||
return "", fmt.Errorf("invalid emptydir_mode=%q, allowed values: %q, %q",
|
||||
r.EmptyDirMode, vc.EmptyDirModeSharedFs, vc.EmptyDirModeVirtioBlkEncrypted)
|
||||
}
|
||||
}
|
||||
|
||||
type agent struct {
|
||||
@@ -1389,6 +1407,16 @@ func updateRuntimeConfigAgent(configPath string, tomlConf tomlConfig, config *oc
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateRuntimeConfigRuntime(configPath string, tomlConf tomlConfig, config *oci.RuntimeConfig) error {
|
||||
emptyDirMode, err := tomlConf.Runtime.emptyDirMode()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v: %v", configPath, err)
|
||||
}
|
||||
config.EmptyDirMode = emptyDirMode
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetKernelParams adds the user-specified kernel parameters (from the
|
||||
// configuration file) to the defaults so that the former take priority.
|
||||
func SetKernelParams(runtimeConfig *oci.RuntimeConfig) error {
|
||||
@@ -1453,6 +1481,10 @@ func updateRuntimeConfig(configPath string, tomlConf tomlConfig, config *oci.Run
|
||||
return err
|
||||
}
|
||||
|
||||
if err := updateRuntimeConfigRuntime(configPath, tomlConf, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fConfig, err := newFactoryConfig(tomlConf.Factory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v: %v", configPath, err)
|
||||
@@ -1642,6 +1674,7 @@ func LoadConfiguration(configPath string, ignoreLogging bool) (resolvedConfigPat
|
||||
|
||||
config.ForceGuestPull = tomlConf.Runtime.ForceGuestPull
|
||||
config.PodResourceAPISock = tomlConf.Runtime.PodResourceAPISock
|
||||
config.KubeletRootDir = tomlConf.Runtime.KubeletRootDir
|
||||
|
||||
return resolved, config, nil
|
||||
}
|
||||
|
||||
@@ -218,6 +218,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (testConfig testRuntime
|
||||
JaegerPassword: jaegerPassword,
|
||||
|
||||
FactoryConfig: factoryConfig,
|
||||
EmptyDirMode: vc.EmptyDirModeSharedFs,
|
||||
}
|
||||
|
||||
err = SetKernelParams(&runtimeConfig)
|
||||
@@ -599,6 +600,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
AgentConfig: expectedAgentConfig,
|
||||
|
||||
FactoryConfig: expectedFactoryConfig,
|
||||
EmptyDirMode: vc.EmptyDirModeSharedFs,
|
||||
}
|
||||
err = SetKernelParams(&expectedConfig)
|
||||
if err != nil {
|
||||
@@ -1609,6 +1611,39 @@ func TestCheckNetNsConfig(t *testing.T) {
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestCheckEmptyDirMode(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// Valid values
|
||||
r := runtime{EmptyDirMode: vc.EmptyDirModeSharedFs}
|
||||
mode, err := r.emptyDirMode()
|
||||
assert.NoError(err)
|
||||
assert.Equal(vc.EmptyDirModeSharedFs, mode)
|
||||
|
||||
r = runtime{EmptyDirMode: vc.EmptyDirModeVirtioBlkEncrypted}
|
||||
mode, err = r.emptyDirMode()
|
||||
assert.NoError(err)
|
||||
assert.Equal(vc.EmptyDirModeVirtioBlkEncrypted, mode)
|
||||
|
||||
r = runtime{}
|
||||
mode, err = r.emptyDirMode()
|
||||
assert.NoError(err)
|
||||
assert.Equal(vc.EmptyDirModeSharedFs, mode)
|
||||
|
||||
// Invalid values
|
||||
r = runtime{EmptyDirMode: "invalid"}
|
||||
_, err = r.emptyDirMode()
|
||||
assert.Error(err)
|
||||
|
||||
r = runtime{EmptyDirMode: "shared_fs"}
|
||||
_, err = r.emptyDirMode()
|
||||
assert.Error(err)
|
||||
|
||||
r = runtime{EmptyDirMode: "block_encrypted"}
|
||||
_, err = r.emptyDirMode()
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestCheckFactoryConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
|
||||
@@ -98,12 +98,12 @@ func HandleFactory(ctx context.Context, vci vc.VC, runtimeConfig *oci.RuntimeCon
|
||||
// For the given pod ephemeral volume is created only once
|
||||
// backed by tmpfs inside the VM. For successive containers
|
||||
// of the same pod the already existing volume is reused.
|
||||
func SetEphemeralStorageType(ociSpec specs.Spec, disableGuestEmptyDir bool) specs.Spec {
|
||||
func SetEphemeralStorageType(ociSpec specs.Spec, disableGuestEmptyDir bool, emptyDirMode string) specs.Spec {
|
||||
for idx, mnt := range ociSpec.Mounts {
|
||||
if vc.IsEphemeralStorage(mnt.Source) {
|
||||
ociSpec.Mounts[idx].Type = vc.KataEphemeralDevType
|
||||
}
|
||||
if vc.Isk8sHostEmptyDir(mnt.Source) && !disableGuestEmptyDir {
|
||||
if vc.Isk8sHostEmptyDir(mnt.Source) && !disableGuestEmptyDir && emptyDirMode != vc.EmptyDirModeVirtioBlkEncrypted {
|
||||
ociSpec.Mounts[idx].Type = vc.KataLocalDevType
|
||||
}
|
||||
}
|
||||
@@ -243,7 +243,8 @@ func CreateContainer(ctx context.Context, sandbox vc.VCSandbox, ociSpec specs.Sp
|
||||
// The value of this annotation is sent to the sandbox using init data.
|
||||
delete(ociSpec.Annotations, vcAnnotations.Initdata)
|
||||
|
||||
ociSpec = SetEphemeralStorageType(ociSpec, disableGuestEmptyDir)
|
||||
emptyDirMode := sandbox.Status().EmptyDirMode
|
||||
ociSpec = SetEphemeralStorageType(ociSpec, disableGuestEmptyDir, emptyDirMode)
|
||||
|
||||
contConfig, err := oci.ContainerConfig(ociSpec, bundlePath, containerID, disableOutput)
|
||||
if err != nil {
|
||||
|
||||
@@ -141,7 +141,7 @@ func TestSetEphemeralStorageType(t *testing.T) {
|
||||
|
||||
ociMounts = append(ociMounts, mount)
|
||||
ociSpec.Mounts = ociMounts
|
||||
ociSpec = SetEphemeralStorageType(ociSpec, false)
|
||||
ociSpec = SetEphemeralStorageType(ociSpec, false, vc.EmptyDirModeSharedFs)
|
||||
|
||||
mountType := ociSpec.Mounts[0].Type
|
||||
assert.Equal(mountType, "ephemeral",
|
||||
|
||||
@@ -165,6 +165,10 @@ type RuntimeConfig struct {
|
||||
// Determines if Kata creates emptyDir on the guest
|
||||
DisableGuestEmptyDir bool
|
||||
|
||||
// EmptyDirMode specifies how Kubernetes emptyDir volumes are handled.
|
||||
// Valid values are "shared-fs" (default) or "block-encrypted".
|
||||
EmptyDirMode string
|
||||
|
||||
// CreateContainer timeout which, if provided, indicates the createcontainer request timeout
|
||||
// needed for the workload ( Mostly used for pulling images in the guest )
|
||||
CreateContainerTimeout uint64
|
||||
@@ -193,6 +197,10 @@ type RuntimeConfig struct {
|
||||
// ColdPlugVFIO != NoPort AND PodResourceAPISock != "" => kubelet
|
||||
// based cold plug.
|
||||
PodResourceAPISock string
|
||||
|
||||
// KubeletRootDir is the kubelet root directory used to match ConfigMap/Secret
|
||||
// volume paths (e.g. /var/lib/k0s/kubelet for k0s). If empty, default is used.
|
||||
KubeletRootDir string
|
||||
}
|
||||
|
||||
// AddKernelParam allows the addition of new kernel parameters to an existing
|
||||
@@ -1207,6 +1215,8 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid st
|
||||
|
||||
DisableGuestSeccomp: runtime.DisableGuestSeccomp,
|
||||
|
||||
EmptyDirMode: runtime.EmptyDirMode,
|
||||
|
||||
EnableVCPUsPinning: runtime.EnableVCPUsPinning,
|
||||
|
||||
GuestSeLinuxLabel: runtime.GuestSeLinuxLabel,
|
||||
@@ -1216,6 +1226,8 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid st
|
||||
CreateContainerTimeout: runtime.CreateContainerTimeout,
|
||||
|
||||
ForceGuestPull: runtime.ForceGuestPull,
|
||||
|
||||
KubeletRootDir: runtime.KubeletRootDir,
|
||||
}
|
||||
|
||||
if err := addAnnotations(ocispec, &sandboxConfig, runtime); err != nil {
|
||||
|
||||
@@ -186,13 +186,15 @@ func NewResourceController(path string, resources *specs.LinuxResources) (Resour
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSandboxResourceController(path string, resources *specs.LinuxResources, sandboxCgroupOnly bool) (ResourceController, error) {
|
||||
func NewSandboxResourceController(path string, resources *specs.LinuxResources, sandboxCgroupOnly bool, needsHypervisorDevices bool) (ResourceController, error) {
|
||||
sandboxResources := *resources
|
||||
sandboxDevices, err := sandboxDevices()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if needsHypervisorDevices {
|
||||
sandboxDevs, err := sandboxDevices()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sandboxResources.Devices = append(sandboxResources.Devices, sandboxDevs...)
|
||||
}
|
||||
sandboxResources.Devices = append(sandboxResources.Devices, sandboxDevices...)
|
||||
|
||||
// Currently we know to handle systemd cgroup path only when it's the only cgroup (no overhead group), hence,
|
||||
// if sandboxCgroupOnly is not true we treat it as cgroupfs path as it used to be, although it may be incorrect.
|
||||
|
||||
@@ -21,7 +21,7 @@ func NewResourceController(path string, resources *specs.LinuxResources) (Resour
|
||||
return &DarwinResourceController{}, nil
|
||||
}
|
||||
|
||||
func NewSandboxResourceController(path string, resources *specs.LinuxResources, sandboxCgroupOnly bool) (ResourceController, error) {
|
||||
func NewSandboxResourceController(path string, resources *specs.LinuxResources, sandboxCgroupOnly bool, needsHypervisorDevices bool) (ResourceController, error) {
|
||||
return &DarwinResourceController{}, nil
|
||||
}
|
||||
|
||||
|
||||
181
src/runtime/vendor/github.com/go-openapi/errors/.cliff.toml
generated
vendored
181
src/runtime/vendor/github.com/go-openapi/errors/.cliff.toml
generated
vendored
@@ -1,181 +0,0 @@
|
||||
# git-cliff ~ configuration file
|
||||
# https://git-cliff.org/docs/configuration
|
||||
|
||||
[changelog]
|
||||
header = """
|
||||
"""
|
||||
|
||||
footer = """
|
||||
|
||||
-----
|
||||
|
||||
**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms**
|
||||
|
||||
[![License][license-badge]][license-url]
|
||||
|
||||
[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
|
||||
[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
|
||||
body = """
|
||||
{%- if version %}
|
||||
## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{%- else %}
|
||||
## [unreleased]
|
||||
{%- endif %}
|
||||
{%- if message %}
|
||||
{%- raw %}\n{% endraw %}
|
||||
{{ message }}
|
||||
{%- raw %}\n{% endraw %}
|
||||
{%- endif %}
|
||||
{%- if version %}
|
||||
{%- if previous.version %}
|
||||
|
||||
**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}>
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
{%- raw %}\n{% endraw %}
|
||||
{%- endif %}
|
||||
|
||||
{%- if statistics %}{% if statistics.commit_count %}
|
||||
{%- raw %}\n{% endraw %}
|
||||
{{ statistics.commit_count }} commits in this release.
|
||||
{%- raw %}\n{% endraw %}
|
||||
{%- endif %}{% endif %}
|
||||
-----
|
||||
|
||||
{%- for group, commits in commits | group_by(attribute="group") %}
|
||||
{%- raw %}\n{% endraw %}
|
||||
### {{ group | upper_first }}
|
||||
{%- raw %}\n{% endraw %}
|
||||
{%- for commit in commits %}
|
||||
{%- if commit.remote.pr_title %}
|
||||
{%- set commit_message = commit.remote.pr_title %}
|
||||
{%- else %}
|
||||
{%- set commit_message = commit.message %}
|
||||
{%- endif %}
|
||||
* {{ commit_message | split(pat="\n") | first | trim }}
|
||||
{%- if commit.remote.username %}
|
||||
{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }})
|
||||
{%- endif %}
|
||||
{%- if commit.remote.pr_number %}
|
||||
{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }})
|
||||
{%- endif %}
|
||||
{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }})
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
|
||||
{%- if github %}
|
||||
{%- raw %}\n{% endraw -%}
|
||||
{%- set all_contributors = github.contributors | length %}
|
||||
{%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %}
|
||||
-----
|
||||
|
||||
### People who contributed to this release
|
||||
{% endif %}
|
||||
{%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %}
|
||||
{%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
|
||||
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }})
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||
-----
|
||||
{%- raw %}\n{% endraw %}
|
||||
|
||||
### New Contributors
|
||||
{%- endif %}
|
||||
|
||||
{%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||
{%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
|
||||
* @{{ contributor.username }} made their first contribution
|
||||
{%- if contributor.pr_number %}
|
||||
in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
{%- raw %}\n{% endraw %}
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
# Remove leading and trailing whitespaces from the changelog's body.
|
||||
trim = true
|
||||
# Render body even when there are no releases to process.
|
||||
render_always = true
|
||||
# An array of regex based postprocessors to modify the changelog.
|
||||
postprocessors = [
|
||||
# Replace the placeholder <REPO> with a URL.
|
||||
#{ pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" },
|
||||
]
|
||||
# output file path
|
||||
# output = "test.md"
|
||||
|
||||
[git]
|
||||
# Parse commits according to the conventional commits specification.
|
||||
# See https://www.conventionalcommits.org
|
||||
conventional_commits = false
|
||||
# Exclude commits that do not match the conventional commits specification.
|
||||
filter_unconventional = false
|
||||
# Require all commits to be conventional.
|
||||
# Takes precedence over filter_unconventional.
|
||||
require_conventional = false
|
||||
# Split commits on newlines, treating each line as an individual commit.
|
||||
split_commits = false
|
||||
# An array of regex based parsers to modify commit messages prior to further processing.
|
||||
commit_preprocessors = [
|
||||
# Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
|
||||
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
|
||||
# Check spelling of the commit message using https://github.com/crate-ci/typos.
|
||||
# If the spelling is incorrect, it will be fixed automatically.
|
||||
#{ pattern = '.*', replace_command = 'typos --write-changes -' }
|
||||
]
|
||||
# Prevent commits that are breaking from being excluded by commit parsers.
|
||||
protect_breaking_commits = false
|
||||
# An array of regex based parsers for extracting data from the commit message.
|
||||
# Assigns commits to groups.
|
||||
# Optionally sets the commit's scope and can decide to exclude commits from further processing.
|
||||
commit_parsers = [
|
||||
{ message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true },
|
||||
{ message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true },
|
||||
{ field = "author.name", pattern = "dependabot*", group = "<!-- 0A -->Updates" },
|
||||
{ message = "([Ss]ecurity)|([Vv]uln)", group = "<!-- 08 -->Security" },
|
||||
{ body = "(.*[Ss]ecurity)|([Vv]uln)", group = "<!-- 08 -->Security" },
|
||||
{ message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "<!-- 05 -->Code quality" },
|
||||
{ message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "<!-- 03 -->Documentation" },
|
||||
{ message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "<!-- 00 -->Implemented enhancements" },
|
||||
{ message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "<!-- 07 -->Miscellaneous tasks" },
|
||||
{ message = "^test", group = "<!-- 06 -->Testing" },
|
||||
{ message = "(^fix)|(panic)", group = "<!-- 01 -->Fixed bugs" },
|
||||
{ message = "(^refact)|(rework)", group = "<!-- 02 -->Refactor" },
|
||||
{ message = "(^[Pp]erf)|(performance)", group = "<!-- 04 -->Performance" },
|
||||
{ message = "(^[Cc]hore)", group = "<!-- 07 -->Miscellaneous tasks" },
|
||||
{ message = "^[Rr]evert", group = "<!-- 09 -->Reverted changes" },
|
||||
{ message = "(upgrade.*?go)|(go\\s+version)", group = "<!-- 0A -->Updates" },
|
||||
{ message = ".*", group = "<!-- 0B -->Other" },
|
||||
]
|
||||
# Exclude commits that are not matched by any commit parser.
|
||||
filter_commits = false
|
||||
# An array of link parsers for extracting external references, and turning them into URLs, using regex.
|
||||
link_parsers = []
|
||||
# Include only the tags that belong to the current branch.
|
||||
use_branch_tags = false
|
||||
# Order releases topologically instead of chronologically.
|
||||
topo_order = false
|
||||
# Order releases topologically instead of chronologically.
|
||||
topo_order_commits = true
|
||||
# Order of commits in each group/release within the changelog.
|
||||
# Allowed values: newest, oldest
|
||||
sort_commits = "newest"
|
||||
# Process submodules commits
|
||||
recurse_submodules = false
|
||||
|
||||
#[remote.github]
|
||||
#owner = "go-openapi"
|
||||
26
src/runtime/vendor/github.com/go-openapi/errors/.editorconfig
generated
vendored
26
src/runtime/vendor/github.com/go-openapi/errors/.editorconfig
generated
vendored
@@ -1,26 +0,0 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Set default charset
|
||||
[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
|
||||
charset = utf-8
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
3
src/runtime/vendor/github.com/go-openapi/errors/.gitignore
generated
vendored
3
src/runtime/vendor/github.com/go-openapi/errors/.gitignore
generated
vendored
@@ -1,3 +1,2 @@
|
||||
secrets.yml
|
||||
*.out
|
||||
settings.local.json
|
||||
coverage.out
|
||||
|
||||
107
src/runtime/vendor/github.com/go-openapi/errors/.golangci.yml
generated
vendored
107
src/runtime/vendor/github.com/go-openapi/errors/.golangci.yml
generated
vendored
@@ -1,66 +1,55 @@
|
||||
version: "2"
|
||||
linters-settings:
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
default: all
|
||||
enable-all: true
|
||||
disable:
|
||||
- depguard
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- funlen
|
||||
- godox
|
||||
- exhaustruct
|
||||
- nlreturn
|
||||
- nonamedreturns
|
||||
- noinlineerr
|
||||
- paralleltest
|
||||
- recvcheck
|
||||
- testpackage
|
||||
- tparallel
|
||||
- varnamelen
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- wsl_v5
|
||||
settings:
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
cyclop:
|
||||
max-complexity: 20
|
||||
gocyclo:
|
||||
min-complexity: 20
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: true
|
||||
default-case-required: true
|
||||
lll:
|
||||
line-length: 180
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
- gofumpt
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
# Maximum issues count per one linter.
|
||||
# Set to 0 to disable.
|
||||
# Default: 50
|
||||
max-issues-per-linter: 0
|
||||
# Maximum count of issues with the same text.
|
||||
# Set to 0 to disable.
|
||||
# Default: 3
|
||||
max-same-issues: 0
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
#- deadcode
|
||||
#- interfacer
|
||||
#- scopelint
|
||||
#- varcheck
|
||||
#- structcheck
|
||||
#- golint
|
||||
#- nosnakecase
|
||||
#- maligned
|
||||
#- goerr113
|
||||
#- ifshort
|
||||
#- gomnd
|
||||
#- exhaustivestruct
|
||||
|
||||
24
src/runtime/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md
generated
vendored
24
src/runtime/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md
generated
vendored
@@ -1,24 +0,0 @@
|
||||
# Contributors
|
||||
|
||||
- Repository: ['go-openapi/errors']
|
||||
|
||||
| Total Contributors | Total Contributions |
|
||||
| --- | --- |
|
||||
| 12 | 105 |
|
||||
|
||||
| Username | All Time Contribution Count | All Commits |
|
||||
| --- | --- | --- |
|
||||
| @casualjim | 58 | https://github.com/go-openapi/errors/commits?author=casualjim |
|
||||
| @fredbi | 32 | https://github.com/go-openapi/errors/commits?author=fredbi |
|
||||
| @youyuanwu | 5 | https://github.com/go-openapi/errors/commits?author=youyuanwu |
|
||||
| @alexandear | 2 | https://github.com/go-openapi/errors/commits?author=alexandear |
|
||||
| @fiorix | 1 | https://github.com/go-openapi/errors/commits?author=fiorix |
|
||||
| @ligustah | 1 | https://github.com/go-openapi/errors/commits?author=ligustah |
|
||||
| @artemseleznev | 1 | https://github.com/go-openapi/errors/commits?author=artemseleznev |
|
||||
| @gautierdelorme | 1 | https://github.com/go-openapi/errors/commits?author=gautierdelorme |
|
||||
| @guillemj | 1 | https://github.com/go-openapi/errors/commits?author=guillemj |
|
||||
| @maxatome | 1 | https://github.com/go-openapi/errors/commits?author=maxatome |
|
||||
| @Simon-Li | 1 | https://github.com/go-openapi/errors/commits?author=Simon-Li |
|
||||
| @ujjwalsh | 1 | https://github.com/go-openapi/errors/commits?author=ujjwalsh |
|
||||
|
||||
_this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_
|
||||
125
src/runtime/vendor/github.com/go-openapi/errors/README.md
generated
vendored
125
src/runtime/vendor/github.com/go-openapi/errors/README.md
generated
vendored
@@ -1,123 +1,8 @@
|
||||
# errors
|
||||
# OpenAPI errors [](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/errors)
|
||||
|
||||
<!-- Badges: status -->
|
||||
[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url]
|
||||
<!-- Badges: release & docker images -->
|
||||
<!-- Badges: code quality -->
|
||||
<!-- Badges: license & compliance -->
|
||||
[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url]
|
||||
<!-- Badges: documentation & support -->
|
||||
<!-- Badges: others & stats -->
|
||||
[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
|
||||
|
||||
---
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE)
|
||||
[](https://pkg.go.dev/github.com/go-openapi/errors)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/errors)
|
||||
|
||||
Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit.
|
||||
|
||||
## Announcements
|
||||
|
||||
* **2025-12-19** : new community chat on discord
|
||||
* a new discord community channel is available to be notified of changes and support users
|
||||
* our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31**
|
||||
|
||||
You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url]
|
||||
|
||||
Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url]
|
||||
|
||||
## Status
|
||||
|
||||
API is stable.
|
||||
|
||||
## Import this library in your project
|
||||
|
||||
```cmd
|
||||
go get github.com/go-openapi/errors
|
||||
```
|
||||
|
||||
## Basic usage
|
||||
|
||||
```go
|
||||
const url = "https://www.example.com/#"
|
||||
|
||||
errGeneric := New(401,"onvalid argument: %s", url)
|
||||
|
||||
errNotFound := NotFound("resource not found: %s", url)
|
||||
|
||||
errNotImplemented := NotImplemented("method: %s", url)
|
||||
```
|
||||
|
||||
## Change log
|
||||
|
||||
See <https://github.com/go-openapi/errors/releases>
|
||||
|
||||
<!--
|
||||
## References
|
||||
-->
|
||||
|
||||
## Licensing
|
||||
|
||||
This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE).
|
||||
|
||||
<!--
|
||||
See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software
|
||||
on top of which it has been built.
|
||||
-->
|
||||
|
||||
<!--
|
||||
## Limitations
|
||||
-->
|
||||
|
||||
## Other documentation
|
||||
|
||||
* [All-time contributors](./CONTRIBUTORS.md)
|
||||
* [Contributing guidelines](.github/CONTRIBUTING.md)
|
||||
* [Maintainers documentation](docs/MAINTAINERS.md)
|
||||
* [Code style](docs/STYLE.md)
|
||||
|
||||
## Cutting a new release
|
||||
|
||||
Maintainers can cut a new release by either:
|
||||
|
||||
* running [this workflow](https://github.com/go-openapi/errors/actions/workflows/bump-release.yml)
|
||||
* or pushing a semver tag
|
||||
* signed tags are preferred
|
||||
* The tag message is prepended to release notes
|
||||
|
||||
<!-- Badges: status -->
|
||||
[test-badge]: https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg
|
||||
[test-url]: https://github.com/go-openapi/errors/actions/workflows/go-test.yml
|
||||
[cov-badge]: https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg
|
||||
[cov-url]: https://codecov.io/gh/go-openapi/errors
|
||||
[vuln-scan-badge]: https://github.com/go-openapi/errors/actions/workflows/scanner.yml/badge.svg
|
||||
[vuln-scan-url]: https://github.com/go-openapi/errors/actions/workflows/scanner.yml
|
||||
[codeql-badge]: https://github.com/go-openapi/errors/actions/workflows/codeql.yml/badge.svg
|
||||
[codeql-url]: https://github.com/go-openapi/errors/actions/workflows/codeql.yml
|
||||
<!-- Badges: release & docker images -->
|
||||
[release-badge]: https://badge.fury.io/gh/go-openapi%2Ferrors.svg
|
||||
[release-url]: https://badge.fury.io/gh/go-openapi%2Ferrors
|
||||
[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Ferrors.svg
|
||||
[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Ferrors
|
||||
<!-- Badges: code quality -->
|
||||
[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/errors
|
||||
[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/errors
|
||||
[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/errors
|
||||
[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/errors
|
||||
<!-- Badges: documentation & support -->
|
||||
[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F
|
||||
[doc-url]: https://goswagger.io/go-openapi
|
||||
[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/errors
|
||||
[godoc-url]: http://pkg.go.dev/github.com/go-openapi/errors
|
||||
[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png
|
||||
[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM
|
||||
[slack-url]: https://goswagger.slack.com/archives/C04R30YMU
|
||||
[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue
|
||||
[discord-url]: https://discord.gg/DrafRmZx
|
||||
|
||||
<!-- Badges: license & compliance -->
|
||||
[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
|
||||
[license-url]: https://github.com/go-openapi/errors/?tab=Apache-2.0-1-ov-file#readme
|
||||
<!-- Badges: others & stats -->
|
||||
[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/errors
|
||||
[goversion-url]: https://github.com/go-openapi/errors/blob/master/go.mod
|
||||
[top-badge]: https://img.shields.io/github/languages/top/go-openapi/errors
|
||||
[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/errors/latest
|
||||
|
||||
19
src/runtime/vendor/github.com/go-openapi/errors/SECURITY.md
generated
vendored
19
src/runtime/vendor/github.com/go-openapi/errors/SECURITY.md
generated
vendored
@@ -1,19 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
This policy outlines the commitment and practices of the go-openapi maintainers regarding security.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.22.x | :white_check_mark: |
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
If you become aware of a security vulnerability that affects the current repository,
|
||||
please report it privately to the maintainers.
|
||||
|
||||
Please follow the instructions provided by github to
|
||||
[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability).
|
||||
|
||||
TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability".
|
||||
139
src/runtime/vendor/github.com/go-openapi/errors/api.go
generated
vendored
139
src/runtime/vendor/github.com/go-openapi/errors/api.go
generated
vendored
@@ -1,11 +1,21 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
@@ -13,11 +23,9 @@ import (
|
||||
)
|
||||
|
||||
// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code.
|
||||
//
|
||||
//nolint:gochecknoglobals // it should have been a constant in the first place, but now it is mutable so we have to leave it here or introduce a breaking change.
|
||||
var DefaultHTTPCode = http.StatusUnprocessableEntity
|
||||
|
||||
// Error represents a error interface all swagger framework errors implement.
|
||||
// Error represents a error interface all swagger framework errors implement
|
||||
type Error interface {
|
||||
error
|
||||
Code() int32
|
||||
@@ -28,26 +36,24 @@ type apiError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
// Error implements the standard error interface.
|
||||
func (a *apiError) Error() string {
|
||||
return a.message
|
||||
}
|
||||
|
||||
// Code returns the HTTP status code associated with this error.
|
||||
func (a *apiError) Code() int32 {
|
||||
return a.code
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface.
|
||||
// MarshalJSON implements the JSON encoding interface
|
||||
func (a apiError) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"code": a.code,
|
||||
"message": a.message,
|
||||
})
|
||||
}
|
||||
|
||||
// New creates a new API error with a code and a message.
|
||||
func New(code int32, message string, args ...any) Error {
|
||||
// New creates a new API error with a code and a message
|
||||
func New(code int32, message string, args ...interface{}) Error {
|
||||
if len(args) > 0 {
|
||||
return &apiError{
|
||||
code: code,
|
||||
@@ -60,39 +66,38 @@ func New(code int32, message string, args ...any) Error {
|
||||
}
|
||||
}
|
||||
|
||||
// NotFound creates a new not found error.
|
||||
func NotFound(message string, args ...any) Error {
|
||||
// NotFound creates a new not found error
|
||||
func NotFound(message string, args ...interface{}) Error {
|
||||
if message == "" {
|
||||
message = "Not found"
|
||||
}
|
||||
return New(http.StatusNotFound, message, args...)
|
||||
return New(http.StatusNotFound, fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
// NotImplemented creates a new not implemented error.
|
||||
// NotImplemented creates a new not implemented error
|
||||
func NotImplemented(message string) Error {
|
||||
return New(http.StatusNotImplemented, "%s", message)
|
||||
return New(http.StatusNotImplemented, message)
|
||||
}
|
||||
|
||||
// MethodNotAllowedError represents an error for when the path matches but the method doesn't.
|
||||
// MethodNotAllowedError represents an error for when the path matches but the method doesn't
|
||||
type MethodNotAllowedError struct {
|
||||
code int32
|
||||
Allowed []string
|
||||
message string
|
||||
}
|
||||
|
||||
// Error implements the standard error interface.
|
||||
func (m *MethodNotAllowedError) Error() string {
|
||||
return m.message
|
||||
}
|
||||
|
||||
// Code returns 405 (Method Not Allowed) as the HTTP status code.
|
||||
// Code the error code
|
||||
func (m *MethodNotAllowedError) Code() int32 {
|
||||
return m.code
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface.
|
||||
// MarshalJSON implements the JSON encoding interface
|
||||
func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"code": m.code,
|
||||
"message": m.message,
|
||||
"allowed": m.Allowed,
|
||||
@@ -110,33 +115,25 @@ func errorAsJSON(err Error) []byte {
|
||||
|
||||
func flattenComposite(errs *CompositeError) *CompositeError {
|
||||
var res []error
|
||||
|
||||
for _, err := range errs.Errors {
|
||||
if err == nil {
|
||||
continue
|
||||
for _, er := range errs.Errors {
|
||||
switch e := er.(type) {
|
||||
case *CompositeError:
|
||||
if e != nil && len(e.Errors) > 0 {
|
||||
flat := flattenComposite(e)
|
||||
if len(flat.Errors) > 0 {
|
||||
res = append(res, flat.Errors...)
|
||||
}
|
||||
}
|
||||
default:
|
||||
if e != nil {
|
||||
res = append(res, e)
|
||||
}
|
||||
}
|
||||
|
||||
e := &CompositeError{}
|
||||
if !errors.As(err, &e) {
|
||||
res = append(res, err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if len(e.Errors) == 0 {
|
||||
res = append(res, e)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
flat := flattenComposite(e)
|
||||
res = append(res, flat.Errors...)
|
||||
}
|
||||
|
||||
return CompositeValidationError(res...)
|
||||
}
|
||||
|
||||
// MethodNotAllowed creates a new method not allowed error.
|
||||
// MethodNotAllowed creates a new method not allowed error
|
||||
func MethodNotAllowed(requested string, allow []string) Error {
|
||||
msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ","))
|
||||
return &MethodNotAllowedError{
|
||||
@@ -146,59 +143,43 @@ func MethodNotAllowed(requested string, allow []string) Error {
|
||||
}
|
||||
}
|
||||
|
||||
// ServeError implements the http error handler interface.
|
||||
// ServeError implements the http error handler interface
|
||||
func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if err == nil {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error")))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
errComposite := &CompositeError{}
|
||||
errMethodNotAllowed := &MethodNotAllowedError{}
|
||||
var errError Error
|
||||
|
||||
switch {
|
||||
case errors.As(err, &errComposite):
|
||||
er := flattenComposite(errComposite)
|
||||
switch e := err.(type) {
|
||||
case *CompositeError:
|
||||
er := flattenComposite(e)
|
||||
// strips composite errors to first element only
|
||||
if len(er.Errors) > 0 {
|
||||
ServeError(rw, r, er.Errors[0])
|
||||
|
||||
return
|
||||
} else {
|
||||
// guard against empty CompositeError (invalid construct)
|
||||
ServeError(rw, r, nil)
|
||||
}
|
||||
|
||||
// guard against empty CompositeError (invalid construct)
|
||||
ServeError(rw, r, nil)
|
||||
|
||||
case errors.As(err, &errMethodNotAllowed):
|
||||
rw.Header().Add("Allow", strings.Join(errMethodNotAllowed.Allowed, ","))
|
||||
rw.WriteHeader(asHTTPCode(int(errMethodNotAllowed.Code())))
|
||||
case *MethodNotAllowedError:
|
||||
rw.Header().Add("Allow", strings.Join(e.Allowed, ","))
|
||||
rw.WriteHeader(asHTTPCode(int(e.Code())))
|
||||
if r == nil || r.Method != http.MethodHead {
|
||||
_, _ = rw.Write(errorAsJSON(errMethodNotAllowed))
|
||||
_, _ = rw.Write(errorAsJSON(e))
|
||||
}
|
||||
|
||||
case errors.As(err, &errError):
|
||||
value := reflect.ValueOf(errError)
|
||||
case Error:
|
||||
value := reflect.ValueOf(e)
|
||||
if value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error")))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
rw.WriteHeader(asHTTPCode(int(errError.Code())))
|
||||
rw.WriteHeader(asHTTPCode(int(e.Code())))
|
||||
if r == nil || r.Method != http.MethodHead {
|
||||
_, _ = rw.Write(errorAsJSON(errError))
|
||||
_, _ = rw.Write(errorAsJSON(e))
|
||||
}
|
||||
|
||||
case nil:
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error")))
|
||||
default:
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
if r == nil || r.Method != http.MethodHead {
|
||||
_, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "%v", err)))
|
||||
_, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
17
src/runtime/vendor/github.com/go-openapi/errors/auth.go
generated
vendored
17
src/runtime/vendor/github.com/go-openapi/errors/auth.go
generated
vendored
@@ -1,11 +1,22 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Unauthenticated returns an unauthenticated error.
|
||||
// Unauthenticated returns an unauthenticated error
|
||||
func Unauthenticated(scheme string) Error {
|
||||
return New(http.StatusUnauthorized, "unauthenticated for %s", scheme)
|
||||
}
|
||||
|
||||
15
src/runtime/vendor/github.com/go-openapi/errors/doc.go
generated
vendored
15
src/runtime/vendor/github.com/go-openapi/errors/doc.go
generated
vendored
@@ -1,5 +1,16 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package errors provides an Error interface and several concrete types
|
||||
|
||||
41
src/runtime/vendor/github.com/go-openapi/errors/headers.go
generated
vendored
41
src/runtime/vendor/github.com/go-openapi/errors/headers.go
generated
vendored
@@ -1,5 +1,16 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
@@ -9,30 +20,28 @@ import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Validation represents a failure of a precondition.
|
||||
type Validation struct { //nolint: errname // changing the name to abide by the naming rule would bring a breaking change.
|
||||
// Validation represents a failure of a precondition
|
||||
type Validation struct { //nolint: errname
|
||||
code int32
|
||||
Name string
|
||||
In string
|
||||
Value any
|
||||
Value interface{}
|
||||
message string
|
||||
Values []any
|
||||
Values []interface{}
|
||||
}
|
||||
|
||||
// Error implements the standard error interface.
|
||||
func (e *Validation) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Code returns the HTTP status code for this validation error.
|
||||
// Returns 422 (Unprocessable Entity) by default.
|
||||
// Code the error code
|
||||
func (e *Validation) Code() int32 {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface.
|
||||
// MarshalJSON implements the JSON encoding interface
|
||||
func (e Validation) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"code": e.code,
|
||||
"message": e.message,
|
||||
"in": e.In,
|
||||
@@ -42,7 +51,7 @@ func (e Validation) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// ValidateName sets the name for a validation or updates it for a nested property.
|
||||
// ValidateName sets the name for a validation or updates it for a nested property
|
||||
func (e *Validation) ValidateName(name string) *Validation {
|
||||
if name != "" {
|
||||
if e.Name == "" {
|
||||
@@ -61,9 +70,9 @@ const (
|
||||
responseFormatFail = `unsupported media type requested, only %v are available`
|
||||
)
|
||||
|
||||
// InvalidContentType error for an invalid content type.
|
||||
// InvalidContentType error for an invalid content type
|
||||
func InvalidContentType(value string, allowed []string) *Validation {
|
||||
values := make([]any, 0, len(allowed))
|
||||
values := make([]interface{}, 0, len(allowed))
|
||||
for _, v := range allowed {
|
||||
values = append(values, v)
|
||||
}
|
||||
@@ -77,9 +86,9 @@ func InvalidContentType(value string, allowed []string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidResponseFormat error for an unacceptable response format request.
|
||||
// InvalidResponseFormat error for an unacceptable response format request
|
||||
func InvalidResponseFormat(value string, allowed []string) *Validation {
|
||||
values := make([]any, 0, len(allowed))
|
||||
values := make([]interface{}, 0, len(allowed))
|
||||
for _, v := range allowed {
|
||||
values = append(values, v)
|
||||
}
|
||||
|
||||
22
src/runtime/vendor/github.com/go-openapi/errors/middleware.go
generated
vendored
22
src/runtime/vendor/github.com/go-openapi/errors/middleware.go
generated
vendored
@@ -1,5 +1,16 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
@@ -10,14 +21,13 @@ import (
|
||||
)
|
||||
|
||||
// APIVerificationFailed is an error that contains all the missing info for a mismatched section
|
||||
// between the api registrations and the api spec.
|
||||
// between the api registrations and the api spec
|
||||
type APIVerificationFailed struct { //nolint: errname
|
||||
Section string `json:"section,omitempty"`
|
||||
MissingSpecification []string `json:"missingSpecification,omitempty"`
|
||||
MissingRegistration []string `json:"missingRegistration,omitempty"`
|
||||
}
|
||||
|
||||
// Error implements the standard error interface.
|
||||
func (v *APIVerificationFailed) Error() string {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
@@ -25,7 +35,7 @@ func (v *APIVerificationFailed) Error() string {
|
||||
hasSpecMissing := len(v.MissingSpecification) > 0
|
||||
|
||||
if hasRegMissing {
|
||||
fmt.Fprintf(buf, "missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)
|
||||
buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section))
|
||||
}
|
||||
|
||||
if hasRegMissing && hasSpecMissing {
|
||||
@@ -33,7 +43,7 @@ func (v *APIVerificationFailed) Error() string {
|
||||
}
|
||||
|
||||
if hasSpecMissing {
|
||||
fmt.Fprintf(buf, "missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)
|
||||
buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section))
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
|
||||
80
src/runtime/vendor/github.com/go-openapi/errors/parsing.go
generated
vendored
80
src/runtime/vendor/github.com/go-openapi/errors/parsing.go
generated
vendored
@@ -1,5 +1,16 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
@@ -9,7 +20,7 @@ import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ParseError represents a parsing error.
|
||||
// ParseError represents a parsing error
|
||||
type ParseError struct {
|
||||
code int32
|
||||
Name string
|
||||
@@ -19,7 +30,37 @@ type ParseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
// NewParseError creates a new parse error.
|
||||
func (e *ParseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Code returns the http status code for this error
|
||||
func (e *ParseError) Code() int32 {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface
|
||||
func (e ParseError) MarshalJSON() ([]byte, error) {
|
||||
var reason string
|
||||
if e.Reason != nil {
|
||||
reason = e.Reason.Error()
|
||||
}
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"code": e.code,
|
||||
"message": e.message,
|
||||
"in": e.In,
|
||||
"name": e.Name,
|
||||
"value": e.Value,
|
||||
"reason": reason,
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
parseErrorTemplContent = `parsing %s %s from %q failed, because %s`
|
||||
parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s`
|
||||
)
|
||||
|
||||
// NewParseError creates a new parse error
|
||||
func NewParseError(name, in, value string, reason error) *ParseError {
|
||||
var msg string
|
||||
if in == "" {
|
||||
@@ -36,34 +77,3 @@ func NewParseError(name, in, value string, reason error) *ParseError {
|
||||
message: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements the standard error interface.
|
||||
func (e *ParseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Code returns 400 (Bad Request) as the HTTP status code for parsing errors.
|
||||
func (e *ParseError) Code() int32 {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface.
|
||||
func (e ParseError) MarshalJSON() ([]byte, error) {
|
||||
var reason string
|
||||
if e.Reason != nil {
|
||||
reason = e.Reason.Error()
|
||||
}
|
||||
return json.Marshal(map[string]any{
|
||||
"code": e.code,
|
||||
"message": e.message,
|
||||
"in": e.In,
|
||||
"name": e.Name,
|
||||
"value": e.Value,
|
||||
"reason": reason,
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
parseErrorTemplContent = `parsing %s %s from %q failed, because %s`
|
||||
parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s`
|
||||
)
|
||||
|
||||
142
src/runtime/vendor/github.com/go-openapi/errors/schema.go
generated
vendored
142
src/runtime/vendor/github.com/go-openapi/errors/schema.go
generated
vendored
@@ -1,11 +1,21 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -63,15 +73,14 @@ const (
|
||||
const maximumValidHTTPCode = 600
|
||||
|
||||
// All code responses can be used to differentiate errors for different handling
|
||||
// by the consuming program.
|
||||
// by the consuming program
|
||||
const (
|
||||
// CompositeErrorCode remains 422 for backwards-compatibility
|
||||
// and to separate it from validation errors with cause.
|
||||
// and to separate it from validation errors with cause
|
||||
CompositeErrorCode = http.StatusUnprocessableEntity
|
||||
|
||||
// InvalidTypeCode is used for any subclass of invalid types.
|
||||
// InvalidTypeCode is used for any subclass of invalid types
|
||||
InvalidTypeCode = maximumValidHTTPCode + iota
|
||||
// RequiredFailCode indicates a required field is missing.
|
||||
RequiredFailCode
|
||||
TooLongFailCode
|
||||
TooShortFailCode
|
||||
@@ -92,26 +101,22 @@ const (
|
||||
ReadOnlyFailCode
|
||||
)
|
||||
|
||||
// CompositeError is an error that groups several errors together.
|
||||
// CompositeError is an error that groups several errors together
|
||||
type CompositeError struct {
|
||||
Errors []error
|
||||
code int32
|
||||
message string
|
||||
}
|
||||
|
||||
// Code returns the HTTP status code for this composite error.
|
||||
// Code for this error
|
||||
func (c *CompositeError) Code() int32 {
|
||||
return c.code
|
||||
}
|
||||
|
||||
// Error implements the standard error interface.
|
||||
func (c *CompositeError) Error() string {
|
||||
if len(c.Errors) > 0 {
|
||||
msgs := []string{c.message + ":"}
|
||||
for _, e := range c.Errors {
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
msgs = append(msgs, e.Error())
|
||||
}
|
||||
return strings.Join(msgs, "\n")
|
||||
@@ -119,21 +124,20 @@ func (c *CompositeError) Error() string {
|
||||
return c.message
|
||||
}
|
||||
|
||||
// Unwrap implements the [errors.Unwrap] interface.
|
||||
func (c *CompositeError) Unwrap() []error {
|
||||
return c.Errors
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface.
|
||||
// MarshalJSON implements the JSON encoding interface
|
||||
func (c CompositeError) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"code": c.code,
|
||||
"message": c.message,
|
||||
"errors": c.Errors,
|
||||
})
|
||||
}
|
||||
|
||||
// CompositeValidationError an error to wrap a bunch of other errors.
|
||||
// CompositeValidationError an error to wrap a bunch of other errors
|
||||
func CompositeValidationError(errors ...error) *CompositeError {
|
||||
return &CompositeError{
|
||||
code: CompositeErrorCode,
|
||||
@@ -142,33 +146,20 @@ func CompositeValidationError(errors ...error) *CompositeError {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateName recursively sets the name for all validations or updates them for nested properties.
|
||||
// ValidateName recursively sets the name for all validations or updates them for nested properties
|
||||
func (c *CompositeError) ValidateName(name string) *CompositeError {
|
||||
for i, e := range c.Errors {
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ce := &CompositeError{}
|
||||
if errors.As(e, &ce) {
|
||||
c.Errors[i] = ce.ValidateName(name)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
ve := &Validation{}
|
||||
if errors.As(e, &ve) {
|
||||
if ve, ok := e.(*Validation); ok {
|
||||
c.Errors[i] = ve.ValidateName(name)
|
||||
|
||||
continue
|
||||
} else if ce, ok := e.(*CompositeError); ok {
|
||||
c.Errors[i] = ce.ValidateName(name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// FailedAllPatternProperties an error for when the property doesn't match a pattern.
|
||||
// FailedAllPatternProperties an error for when the property doesn't match a pattern
|
||||
func FailedAllPatternProperties(name, in, key string) *Validation {
|
||||
msg := fmt.Sprintf(failedAllPatternProps, name, key, in)
|
||||
if in == "" {
|
||||
@@ -183,7 +174,7 @@ func FailedAllPatternProperties(name, in, key string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// PropertyNotAllowed an error for when the property doesn't match a pattern.
|
||||
// PropertyNotAllowed an error for when the property doesn't match a pattern
|
||||
func PropertyNotAllowed(name, in, key string) *Validation {
|
||||
msg := fmt.Sprintf(unallowedProperty, name, key, in)
|
||||
if in == "" {
|
||||
@@ -198,7 +189,7 @@ func PropertyNotAllowed(name, in, key string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// TooFewProperties an error for an object with too few properties.
|
||||
// TooFewProperties an error for an object with too few properties
|
||||
func TooFewProperties(name, in string, n int64) *Validation {
|
||||
msg := fmt.Sprintf(tooFewProperties, name, in, n)
|
||||
if in == "" {
|
||||
@@ -213,7 +204,7 @@ func TooFewProperties(name, in string, n int64) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// TooManyProperties an error for an object with too many properties.
|
||||
// TooManyProperties an error for an object with too many properties
|
||||
func TooManyProperties(name, in string, n int64) *Validation {
|
||||
msg := fmt.Sprintf(tooManyProperties, name, in, n)
|
||||
if in == "" {
|
||||
@@ -228,7 +219,7 @@ func TooManyProperties(name, in string, n int64) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// AdditionalItemsNotAllowed an error for invalid additional items.
|
||||
// AdditionalItemsNotAllowed an error for invalid additional items
|
||||
func AdditionalItemsNotAllowed(name, in string) *Validation {
|
||||
msg := fmt.Sprintf(noAdditionalItems, name, in)
|
||||
if in == "" {
|
||||
@@ -242,7 +233,7 @@ func AdditionalItemsNotAllowed(name, in string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidCollectionFormat another flavor of invalid type error.
|
||||
// InvalidCollectionFormat another flavor of invalid type error
|
||||
func InvalidCollectionFormat(name, in, format string) *Validation {
|
||||
return &Validation{
|
||||
code: InvalidTypeCode,
|
||||
@@ -253,7 +244,7 @@ func InvalidCollectionFormat(name, in, format string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidTypeName an error for when the type is invalid.
|
||||
// InvalidTypeName an error for when the type is invalid
|
||||
func InvalidTypeName(typeName string) *Validation {
|
||||
return &Validation{
|
||||
code: InvalidTypeCode,
|
||||
@@ -262,8 +253,8 @@ func InvalidTypeName(typeName string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidType creates an error for when the type is invalid.
|
||||
func InvalidType(name, in, typeName string, value any) *Validation {
|
||||
// InvalidType creates an error for when the type is invalid
|
||||
func InvalidType(name, in, typeName string, value interface{}) *Validation {
|
||||
var message string
|
||||
|
||||
if in != "" {
|
||||
@@ -293,9 +284,10 @@ func InvalidType(name, in, typeName string, value any) *Validation {
|
||||
Value: value,
|
||||
message: message,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// DuplicateItems error for when an array contains duplicates.
|
||||
// DuplicateItems error for when an array contains duplicates
|
||||
func DuplicateItems(name, in string) *Validation {
|
||||
msg := fmt.Sprintf(uniqueFail, name, in)
|
||||
if in == "" {
|
||||
@@ -309,8 +301,8 @@ func DuplicateItems(name, in string) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// TooManyItems error for when an array contains too many items.
|
||||
func TooManyItems(name, in string, maximum int64, value any) *Validation {
|
||||
// TooManyItems error for when an array contains too many items
|
||||
func TooManyItems(name, in string, maximum int64, value interface{}) *Validation {
|
||||
msg := fmt.Sprintf(maximumItemsFail, name, in, maximum)
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum)
|
||||
@@ -325,8 +317,8 @@ func TooManyItems(name, in string, maximum int64, value any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// TooFewItems error for when an array contains too few items.
|
||||
func TooFewItems(name, in string, minimum int64, value any) *Validation {
|
||||
// TooFewItems error for when an array contains too few items
|
||||
func TooFewItems(name, in string, minimum int64, value interface{}) *Validation {
|
||||
msg := fmt.Sprintf(minItemsFail, name, in, minimum)
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(minItemsFailNoIn, name, minimum)
|
||||
@@ -340,8 +332,8 @@ func TooFewItems(name, in string, minimum int64, value any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsMaximumInt error for when maximum validation fails.
|
||||
func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value any) *Validation {
|
||||
// ExceedsMaximumInt error for when maximumimum validation fails
|
||||
func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := maximumIncFailNoIn
|
||||
@@ -365,8 +357,8 @@ func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value any
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsMaximumUint error for when maximum validation fails.
|
||||
func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value any) *Validation {
|
||||
// ExceedsMaximumUint error for when maximumimum validation fails
|
||||
func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := maximumIncFailNoIn
|
||||
@@ -390,8 +382,8 @@ func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value a
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsMaximum error for when maximum validation fails.
|
||||
func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value any) *Validation {
|
||||
// ExceedsMaximum error for when maximumimum validation fails
|
||||
func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := maximumIncFailNoIn
|
||||
@@ -415,8 +407,8 @@ func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value any)
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsMinimumInt error for when minimum validation fails.
|
||||
func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value any) *Validation {
|
||||
// ExceedsMinimumInt error for when minimum validation fails
|
||||
func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := minIncFailNoIn
|
||||
@@ -440,8 +432,8 @@ func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value any
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsMinimumUint error for when minimum validation fails.
|
||||
func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value any) *Validation {
|
||||
// ExceedsMinimumUint error for when minimum validation fails
|
||||
func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := minIncFailNoIn
|
||||
@@ -465,8 +457,8 @@ func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value a
|
||||
}
|
||||
}
|
||||
|
||||
// ExceedsMinimum error for when minimum validation fails.
|
||||
func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value any) *Validation {
|
||||
// ExceedsMinimum error for when minimum validation fails
|
||||
func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := minIncFailNoIn
|
||||
@@ -490,8 +482,8 @@ func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value any)
|
||||
}
|
||||
}
|
||||
|
||||
// NotMultipleOf error for when multiple of validation fails.
|
||||
func NotMultipleOf(name, in string, multiple, value any) *Validation {
|
||||
// NotMultipleOf error for when multiple of validation fails
|
||||
func NotMultipleOf(name, in string, multiple, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple)
|
||||
@@ -507,8 +499,8 @@ func NotMultipleOf(name, in string, multiple, value any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// EnumFail error for when an enum validation fails.
|
||||
func EnumFail(name, in string, value any, values []any) *Validation {
|
||||
// EnumFail error for when an enum validation fails
|
||||
func EnumFail(name, in string, value interface{}, values []interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(enumFailNoIn, name, values)
|
||||
@@ -526,8 +518,8 @@ func EnumFail(name, in string, value any, values []any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// Required error for when a value is missing.
|
||||
func Required(name, in string, value any) *Validation {
|
||||
// Required error for when a value is missing
|
||||
func Required(name, in string, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(requiredFailNoIn, name)
|
||||
@@ -543,8 +535,8 @@ func Required(name, in string, value any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// ReadOnly error for when a value is present in request.
|
||||
func ReadOnly(name, in string, value any) *Validation {
|
||||
// ReadOnly error for when a value is present in request
|
||||
func ReadOnly(name, in string, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(readOnlyFailNoIn, name)
|
||||
@@ -560,8 +552,8 @@ func ReadOnly(name, in string, value any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// TooLong error for when a string is too long.
|
||||
func TooLong(name, in string, maximum int64, value any) *Validation {
|
||||
// TooLong error for when a string is too long
|
||||
func TooLong(name, in string, maximum int64, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum)
|
||||
@@ -577,8 +569,8 @@ func TooLong(name, in string, maximum int64, value any) *Validation {
|
||||
}
|
||||
}
|
||||
|
||||
// TooShort error for when a string is too short.
|
||||
func TooShort(name, in string, minimum int64, value any) *Validation {
|
||||
// TooShort error for when a string is too short
|
||||
func TooShort(name, in string, minimum int64, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum)
|
||||
@@ -597,7 +589,7 @@ func TooShort(name, in string, minimum int64, value any) *Validation {
|
||||
|
||||
// FailedPattern error for when a string fails a regex pattern match
|
||||
// the pattern that is returned is the ECMA syntax version of the pattern not the golang version.
|
||||
func FailedPattern(name, in, pattern string, value any) *Validation {
|
||||
func FailedPattern(name, in, pattern string, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(patternFailNoIn, name, pattern)
|
||||
@@ -615,8 +607,8 @@ func FailedPattern(name, in, pattern string, value any) *Validation {
|
||||
}
|
||||
|
||||
// MultipleOfMustBePositive error for when a
|
||||
// multipleOf factor is negative.
|
||||
func MultipleOfMustBePositive(name, in string, factor any) *Validation {
|
||||
// multipleOf factor is negative
|
||||
func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation {
|
||||
return &Validation{
|
||||
code: MultipleOfMustBePositiveCode,
|
||||
Name: name,
|
||||
|
||||
202
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/LICENSE
generated
vendored
202
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
94
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes/bytes.go
generated
vendored
94
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes/bytes.go
generated
vendored
@@ -1,94 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package bytes
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Raw returns just the bytes without any assumptions about layout
|
||||
type Raw interface {
|
||||
Raw() *[]byte
|
||||
}
|
||||
|
||||
// Reader used to read various data sizes in the byte array
|
||||
type Reader interface {
|
||||
Read8(pos int) uint8
|
||||
Read16(pos int) uint16
|
||||
Read32(pos int) uint32
|
||||
Read64(pos int) uint64
|
||||
Len() int
|
||||
}
|
||||
|
||||
// Writer used to write various sizes of data in the byte array
|
||||
type Writer interface {
|
||||
Write8(pos int, value uint8)
|
||||
Write16(pos int, value uint16)
|
||||
Write32(pos int, value uint32)
|
||||
Write64(pos int, value uint64)
|
||||
Len() int
|
||||
}
|
||||
|
||||
// Bytes object for manipulating arbitrary byte arrays
|
||||
type Bytes interface {
|
||||
Raw
|
||||
Reader
|
||||
Writer
|
||||
Slice(offset int, size int) Bytes
|
||||
LittleEndian() Bytes
|
||||
BigEndian() Bytes
|
||||
}
|
||||
|
||||
var nativeByteOrder binary.ByteOrder
|
||||
|
||||
func init() {
|
||||
buf := [2]byte{}
|
||||
*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0x00FF)
|
||||
|
||||
switch buf {
|
||||
case [2]byte{0xFF, 0x00}:
|
||||
nativeByteOrder = binary.LittleEndian
|
||||
case [2]byte{0x00, 0xFF}:
|
||||
nativeByteOrder = binary.BigEndian
|
||||
default:
|
||||
panic("Unable to infer byte order")
|
||||
}
|
||||
}
|
||||
|
||||
// New raw bytearray
|
||||
func New(data *[]byte) Bytes {
|
||||
return (*native)(data)
|
||||
}
|
||||
|
||||
// NewLittleEndian little endian ordering of bytes
|
||||
func NewLittleEndian(data *[]byte) Bytes {
|
||||
if nativeByteOrder == binary.LittleEndian {
|
||||
return (*native)(data)
|
||||
}
|
||||
|
||||
return (*swapbo)(data)
|
||||
}
|
||||
|
||||
// NewBigEndian big endian ordering of bytes
|
||||
func NewBigEndian(data *[]byte) Bytes {
|
||||
if nativeByteOrder == binary.BigEndian {
|
||||
return (*native)(data)
|
||||
}
|
||||
|
||||
return (*swapbo)(data)
|
||||
}
|
||||
78
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes/native.go
generated
vendored
78
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes/native.go
generated
vendored
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package bytes
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type native []byte
|
||||
|
||||
var _ Bytes = (*native)(nil)
|
||||
|
||||
func (b *native) Read8(pos int) uint8 {
|
||||
return (*b)[pos]
|
||||
}
|
||||
|
||||
func (b *native) Read16(pos int) uint16 {
|
||||
return *(*uint16)(unsafe.Pointer(&((*b)[pos])))
|
||||
}
|
||||
|
||||
func (b *native) Read32(pos int) uint32 {
|
||||
return *(*uint32)(unsafe.Pointer(&((*b)[pos])))
|
||||
}
|
||||
|
||||
func (b *native) Read64(pos int) uint64 {
|
||||
return *(*uint64)(unsafe.Pointer(&((*b)[pos])))
|
||||
}
|
||||
|
||||
func (b *native) Write8(pos int, value uint8) {
|
||||
(*b)[pos] = value
|
||||
}
|
||||
|
||||
func (b *native) Write16(pos int, value uint16) {
|
||||
*(*uint16)(unsafe.Pointer(&((*b)[pos]))) = value
|
||||
}
|
||||
|
||||
func (b *native) Write32(pos int, value uint32) {
|
||||
*(*uint32)(unsafe.Pointer(&((*b)[pos]))) = value
|
||||
}
|
||||
|
||||
func (b *native) Write64(pos int, value uint64) {
|
||||
*(*uint64)(unsafe.Pointer(&((*b)[pos]))) = value
|
||||
}
|
||||
|
||||
func (b *native) Slice(offset int, size int) Bytes {
|
||||
nb := (*b)[offset : offset+size]
|
||||
return &nb
|
||||
}
|
||||
|
||||
func (b *native) LittleEndian() Bytes {
|
||||
return NewLittleEndian((*[]byte)(b))
|
||||
}
|
||||
|
||||
func (b *native) BigEndian() Bytes {
|
||||
return NewBigEndian((*[]byte)(b))
|
||||
}
|
||||
|
||||
func (b *native) Raw() *[]byte {
|
||||
return (*[]byte)(b)
|
||||
}
|
||||
|
||||
func (b *native) Len() int {
|
||||
return len(*b)
|
||||
}
|
||||
112
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes/swapbo.go
generated
vendored
112
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes/swapbo.go
generated
vendored
@@ -1,112 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package bytes
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type swapbo []byte
|
||||
|
||||
var _ Bytes = (*swapbo)(nil)
|
||||
|
||||
func (b *swapbo) Read8(pos int) uint8 {
|
||||
return (*b)[pos]
|
||||
}
|
||||
|
||||
func (b *swapbo) Read16(pos int) uint16 {
|
||||
buf := [2]byte{}
|
||||
buf[0] = (*b)[pos+1]
|
||||
buf[1] = (*b)[pos+0]
|
||||
return *(*uint16)(unsafe.Pointer(&buf[0]))
|
||||
}
|
||||
|
||||
func (b *swapbo) Read32(pos int) uint32 {
|
||||
buf := [4]byte{}
|
||||
buf[0] = (*b)[pos+3]
|
||||
buf[1] = (*b)[pos+2]
|
||||
buf[2] = (*b)[pos+1]
|
||||
buf[3] = (*b)[pos+0]
|
||||
return *(*uint32)(unsafe.Pointer(&buf[0]))
|
||||
}
|
||||
|
||||
func (b *swapbo) Read64(pos int) uint64 {
|
||||
buf := [8]byte{}
|
||||
buf[0] = (*b)[pos+7]
|
||||
buf[1] = (*b)[pos+6]
|
||||
buf[2] = (*b)[pos+5]
|
||||
buf[3] = (*b)[pos+4]
|
||||
buf[4] = (*b)[pos+3]
|
||||
buf[5] = (*b)[pos+2]
|
||||
buf[6] = (*b)[pos+1]
|
||||
buf[7] = (*b)[pos+0]
|
||||
return *(*uint64)(unsafe.Pointer(&buf[0]))
|
||||
}
|
||||
|
||||
func (b *swapbo) Write8(pos int, value uint8) {
|
||||
(*b)[pos] = value
|
||||
}
|
||||
|
||||
func (b *swapbo) Write16(pos int, value uint16) {
|
||||
buf := [2]byte{}
|
||||
*(*uint16)(unsafe.Pointer(&buf[0])) = value
|
||||
(*b)[pos+0] = buf[1]
|
||||
(*b)[pos+1] = buf[0]
|
||||
}
|
||||
|
||||
func (b *swapbo) Write32(pos int, value uint32) {
|
||||
buf := [4]byte{}
|
||||
*(*uint32)(unsafe.Pointer(&buf[0])) = value
|
||||
(*b)[pos+0] = buf[3]
|
||||
(*b)[pos+1] = buf[2]
|
||||
(*b)[pos+2] = buf[1]
|
||||
(*b)[pos+3] = buf[0]
|
||||
}
|
||||
|
||||
func (b *swapbo) Write64(pos int, value uint64) {
|
||||
buf := [8]byte{}
|
||||
*(*uint64)(unsafe.Pointer(&buf[0])) = value
|
||||
(*b)[pos+0] = buf[7]
|
||||
(*b)[pos+1] = buf[6]
|
||||
(*b)[pos+2] = buf[5]
|
||||
(*b)[pos+3] = buf[4]
|
||||
(*b)[pos+4] = buf[3]
|
||||
(*b)[pos+5] = buf[2]
|
||||
(*b)[pos+6] = buf[1]
|
||||
(*b)[pos+7] = buf[0]
|
||||
}
|
||||
|
||||
func (b *swapbo) Slice(offset int, size int) Bytes {
|
||||
nb := (*b)[offset : offset+size]
|
||||
return &nb
|
||||
}
|
||||
|
||||
func (b *swapbo) LittleEndian() Bytes {
|
||||
return NewLittleEndian((*[]byte)(b))
|
||||
}
|
||||
|
||||
func (b *swapbo) BigEndian() Bytes {
|
||||
return NewBigEndian((*[]byte)(b))
|
||||
}
|
||||
|
||||
func (b *swapbo) Raw() *[]byte {
|
||||
return (*[]byte)(b)
|
||||
}
|
||||
|
||||
func (b *swapbo) Len() int {
|
||||
return len(*b)
|
||||
}
|
||||
143
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/config.go
generated
vendored
143
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/config.go
generated
vendored
@@ -1,143 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package nvpci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes"
|
||||
)
|
||||
|
||||
const (
|
||||
// PCICfgSpaceStandardSize represents the size in bytes of the standard config space
|
||||
PCICfgSpaceStandardSize = 256
|
||||
// PCICfgSpaceExtendedSize represents the size in bytes of the extended config space
|
||||
PCICfgSpaceExtendedSize = 4096
|
||||
// PCICapabilityListPointer represents offset for the capability list pointer
|
||||
PCICapabilityListPointer = 0x34
|
||||
// PCIStatusCapabilityList represents the status register bit which indicates capability list support
|
||||
PCIStatusCapabilityList = 0x10
|
||||
// PCIStatusBytePosition represents the position of the status register
|
||||
PCIStatusBytePosition = 0x06
|
||||
)
|
||||
|
||||
// ConfigSpace PCI configuration space (standard extended) file path
|
||||
type ConfigSpace struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// ConfigSpaceIO Interface for reading and writing raw and preconfigured values
|
||||
type ConfigSpaceIO interface {
|
||||
bytes.Bytes
|
||||
GetVendorID() uint16
|
||||
GetDeviceID() uint16
|
||||
GetPCICapabilities() (*PCICapabilities, error)
|
||||
}
|
||||
|
||||
type configSpaceIO struct {
|
||||
bytes.Bytes
|
||||
}
|
||||
|
||||
// PCIStandardCapability standard PCI config space
|
||||
type PCIStandardCapability struct {
|
||||
bytes.Bytes
|
||||
}
|
||||
|
||||
// PCIExtendedCapability extended PCI config space
|
||||
type PCIExtendedCapability struct {
|
||||
bytes.Bytes
|
||||
Version uint8
|
||||
}
|
||||
|
||||
// PCICapabilities combines the standard and extended config space
|
||||
type PCICapabilities struct {
|
||||
Standard map[uint8]*PCIStandardCapability
|
||||
Extended map[uint16]*PCIExtendedCapability
|
||||
}
|
||||
|
||||
func (cs *ConfigSpace) Read() (ConfigSpaceIO, error) {
|
||||
config, err := ioutil.ReadFile(cs.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file: %v", err)
|
||||
}
|
||||
return &configSpaceIO{bytes.New(&config)}, nil
|
||||
}
|
||||
|
||||
func (cs *configSpaceIO) GetVendorID() uint16 {
|
||||
return cs.Read16(0)
|
||||
}
|
||||
|
||||
func (cs *configSpaceIO) GetDeviceID() uint16 {
|
||||
return cs.Read16(2)
|
||||
}
|
||||
|
||||
func (cs *configSpaceIO) GetPCICapabilities() (*PCICapabilities, error) {
|
||||
caps := &PCICapabilities{
|
||||
make(map[uint8]*PCIStandardCapability),
|
||||
make(map[uint16]*PCIExtendedCapability),
|
||||
}
|
||||
|
||||
support := cs.Read8(PCIStatusBytePosition) & PCIStatusCapabilityList
|
||||
if support == 0 {
|
||||
return nil, fmt.Errorf("pci device does not support capability list")
|
||||
}
|
||||
|
||||
soffset := cs.Read8(PCICapabilityListPointer)
|
||||
if int(soffset) >= cs.Len() {
|
||||
return nil, fmt.Errorf("capability list pointer out of bounds")
|
||||
}
|
||||
|
||||
for soffset != 0 {
|
||||
if soffset == 0xff {
|
||||
return nil, fmt.Errorf("config space broken")
|
||||
}
|
||||
if int(soffset) >= PCICfgSpaceStandardSize {
|
||||
return nil, fmt.Errorf("standard capability list pointer out of bounds")
|
||||
}
|
||||
data := cs.Read32(int(soffset))
|
||||
id := uint8(data & 0xff)
|
||||
caps.Standard[id] = &PCIStandardCapability{
|
||||
cs.Slice(int(soffset), cs.Len()-int(soffset)),
|
||||
}
|
||||
soffset = uint8((data >> 8) & 0xff)
|
||||
}
|
||||
|
||||
if cs.Len() <= PCICfgSpaceStandardSize {
|
||||
return caps, nil
|
||||
}
|
||||
|
||||
eoffset := uint16(PCICfgSpaceStandardSize)
|
||||
for eoffset != 0 {
|
||||
if eoffset == 0xffff {
|
||||
return nil, fmt.Errorf("config space broken")
|
||||
}
|
||||
if int(eoffset) >= PCICfgSpaceExtendedSize {
|
||||
return nil, fmt.Errorf("extended capability list pointer out of bounds")
|
||||
}
|
||||
data := cs.Read32(int(eoffset))
|
||||
id := uint16(data & 0xffff)
|
||||
version := uint8((data >> 16) & 0xf)
|
||||
caps.Extended[id] = &PCIExtendedCapability{
|
||||
cs.Slice(int(eoffset), cs.Len()-int(eoffset)),
|
||||
version,
|
||||
}
|
||||
eoffset = uint16((data >> 4) & 0xffc)
|
||||
}
|
||||
|
||||
return caps, nil
|
||||
}
|
||||
127
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mmio/mmio.go
generated
vendored
127
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mmio/mmio.go
generated
vendored
@@ -1,127 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package mmio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes"
|
||||
)
|
||||
|
||||
// Mmio memory map a region
|
||||
type Mmio interface {
|
||||
bytes.Raw
|
||||
bytes.Reader
|
||||
bytes.Writer
|
||||
Sync() error
|
||||
Close() error
|
||||
Slice(offset int, size int) Mmio
|
||||
LittleEndian() Mmio
|
||||
BigEndian() Mmio
|
||||
}
|
||||
|
||||
type mmio struct {
|
||||
bytes.Bytes
|
||||
}
|
||||
|
||||
func open(path string, offset int, size int, flags int) (Mmio, error) {
|
||||
var mmapFlags int
|
||||
switch flags {
|
||||
case os.O_RDONLY:
|
||||
mmapFlags = syscall.PROT_READ
|
||||
case os.O_RDWR:
|
||||
mmapFlags = syscall.PROT_READ | syscall.PROT_WRITE
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid flags: %v", flags)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(path, flags, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
fi, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file info: %v", err)
|
||||
}
|
||||
|
||||
if size > int(fi.Size()) {
|
||||
return nil, fmt.Errorf("requested size larger than file size")
|
||||
}
|
||||
|
||||
if size < 0 {
|
||||
size = int(fi.Size())
|
||||
}
|
||||
|
||||
mmap, err := syscall.Mmap(
|
||||
int(file.Fd()),
|
||||
int64(offset),
|
||||
size,
|
||||
mmapFlags,
|
||||
syscall.MAP_SHARED)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to mmap file: %v", err)
|
||||
}
|
||||
|
||||
return &mmio{bytes.New(&mmap)}, nil
|
||||
}
|
||||
|
||||
// OpenRO open region readonly
|
||||
func OpenRO(path string, offset int, size int) (Mmio, error) {
|
||||
return open(path, offset, size, os.O_RDONLY)
|
||||
}
|
||||
|
||||
// OpenRW open region read write
|
||||
func OpenRW(path string, offset int, size int) (Mmio, error) {
|
||||
return open(path, offset, size, os.O_RDWR)
|
||||
}
|
||||
|
||||
func (m *mmio) Slice(offset int, size int) Mmio {
|
||||
return &mmio{m.Bytes.Slice(offset, size)}
|
||||
}
|
||||
|
||||
func (m *mmio) LittleEndian() Mmio {
|
||||
return &mmio{m.Bytes.LittleEndian()}
|
||||
}
|
||||
|
||||
func (m *mmio) BigEndian() Mmio {
|
||||
return &mmio{m.Bytes.BigEndian()}
|
||||
}
|
||||
|
||||
func (m *mmio) Close() error {
|
||||
err := syscall.Munmap(*m.Bytes.Raw())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to munmap file: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mmio) Sync() error {
|
||||
_, _, errno := syscall.Syscall(
|
||||
syscall.SYS_MSYNC,
|
||||
uintptr(unsafe.Pointer(&(*m.Bytes.Raw())[0])),
|
||||
uintptr(m.Len()),
|
||||
uintptr(syscall.MS_SYNC|syscall.MS_INVALIDATE))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to msync file: %v", errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
74
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mmio/mock.go
generated
vendored
74
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mmio/mock.go
generated
vendored
@@ -1,74 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package mmio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes"
|
||||
)
|
||||
|
||||
type mockMmio struct {
|
||||
mmio
|
||||
source *[]byte
|
||||
offset int
|
||||
rw bool
|
||||
}
|
||||
|
||||
func mockOpen(source *[]byte, offset int, size int, rw bool) (Mmio, error) {
|
||||
if size < 0 {
|
||||
size = len(*source) - offset
|
||||
}
|
||||
if (offset + size) > len(*source) {
|
||||
return nil, fmt.Errorf("offset+size out of range")
|
||||
}
|
||||
|
||||
data := append([]byte{}, (*source)[offset:offset+size]...)
|
||||
|
||||
m := &mockMmio{}
|
||||
m.Bytes = bytes.New(&data).LittleEndian()
|
||||
m.source = source
|
||||
m.offset = offset
|
||||
m.rw = rw
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// MockOpenRO open read only
|
||||
func MockOpenRO(source *[]byte, offset int, size int) (Mmio, error) {
|
||||
return mockOpen(source, offset, size, false)
|
||||
}
|
||||
|
||||
// MockOpenRW open read write
|
||||
func MockOpenRW(source *[]byte, offset int, size int) (Mmio, error) {
|
||||
return mockOpen(source, offset, size, true)
|
||||
}
|
||||
|
||||
func (m *mockMmio) Close() error {
|
||||
m = &mockMmio{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockMmio) Sync() error {
|
||||
if !m.rw {
|
||||
return fmt.Errorf("opened read-only")
|
||||
}
|
||||
for i := range *m.Bytes.Raw() {
|
||||
(*m.source)[m.offset+i] = (*m.Bytes.Raw())[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
141
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mock.go
generated
vendored
141
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mock.go
generated
vendored
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package nvpci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/bytes"
|
||||
)
|
||||
|
||||
// MockNvpci mock pci device
|
||||
type MockNvpci struct {
|
||||
*nvpci
|
||||
}
|
||||
|
||||
var _ Interface = (*MockNvpci)(nil)
|
||||
|
||||
// NewMockNvpci create new mock PCI and remove old devices
|
||||
func NewMockNvpci() (mock *MockNvpci, rerr error) {
|
||||
rootDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
os.RemoveAll(rootDir)
|
||||
}
|
||||
}()
|
||||
|
||||
mock = &MockNvpci{
|
||||
NewFrom(rootDir).(*nvpci),
|
||||
}
|
||||
|
||||
return mock, nil
|
||||
}
|
||||
|
||||
// Cleanup remove the mocked PCI devices root folder
|
||||
func (m *MockNvpci) Cleanup() {
|
||||
os.RemoveAll(m.pciDevicesRoot)
|
||||
}
|
||||
|
||||
// AddMockA100 Create an A100 like GPU mock device
|
||||
func (m *MockNvpci) AddMockA100(address string, numaNode int) error {
|
||||
deviceDir := filepath.Join(m.pciDevicesRoot, address)
|
||||
err := os.MkdirAll(deviceDir, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vendor, err := os.Create(filepath.Join(deviceDir, "vendor"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = vendor.WriteString(fmt.Sprintf("0x%x", PCINvidiaVendorID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
class, err := os.Create(filepath.Join(deviceDir, "class"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = class.WriteString(fmt.Sprintf("0x%x", PCI3dControllerClass))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
device, err := os.Create(filepath.Join(deviceDir, "device"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = device.WriteString("0x20bf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numa, err := os.Create(filepath.Join(deviceDir, "numa_node"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = numa.WriteString(fmt.Sprintf("%v", numaNode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := os.Create(filepath.Join(deviceDir, "config"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_data := make([]byte, PCICfgSpaceStandardSize)
|
||||
data := bytes.New(&_data)
|
||||
data.Write16(0, PCINvidiaVendorID)
|
||||
data.Write16(2, uint16(0x20bf))
|
||||
data.Write8(PCIStatusBytePosition, PCIStatusCapabilityList)
|
||||
_, err = config.Write(*data.Raw())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bar0 := []uint64{0x00000000c2000000, 0x00000000c2ffffff, 0x0000000000040200}
|
||||
resource, err := os.Create(filepath.Join(deviceDir, "resource"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = resource.WriteString(fmt.Sprintf("0x%x 0x%x 0x%x", bar0[0], bar0[1], bar0[2]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pmcID := uint32(0x170000a1)
|
||||
resource0, err := os.Create(filepath.Join(deviceDir, "resource0"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_data = make([]byte, bar0[1]-bar0[0]+1)
|
||||
data = bytes.New(&_data).LittleEndian()
|
||||
data.Write32(0, pmcID)
|
||||
_, err = resource0.Write(*data.Raw())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
316
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/nvpci.go
generated
vendored
316
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/nvpci.go
generated
vendored
@@ -1,316 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package nvpci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// PCIDevicesRoot represents base path for all pci devices under sysfs
|
||||
PCIDevicesRoot = "/sys/bus/pci/devices"
|
||||
// PCINvidiaVendorID represents PCI vendor id for NVIDIA
|
||||
PCINvidiaVendorID uint16 = 0x10de
|
||||
// PCIVgaControllerClass represents the PCI class for VGA Controllers
|
||||
PCIVgaControllerClass uint32 = 0x030000
|
||||
// PCI3dControllerClass represents the PCI class for 3D Graphics accellerators
|
||||
PCI3dControllerClass uint32 = 0x030200
|
||||
// PCINvSwitchClass represents the PCI class for NVSwitches
|
||||
PCINvSwitchClass uint32 = 0x068000
|
||||
)
|
||||
|
||||
// Interface allows us to get a list of all NVIDIA PCI devices
|
||||
type Interface interface {
|
||||
GetAllDevices() ([]*NvidiaPCIDevice, error)
|
||||
Get3DControllers() ([]*NvidiaPCIDevice, error)
|
||||
GetVGAControllers() ([]*NvidiaPCIDevice, error)
|
||||
GetNVSwitches() ([]*NvidiaPCIDevice, error)
|
||||
GetGPUs() ([]*NvidiaPCIDevice, error)
|
||||
}
|
||||
|
||||
// MemoryResources a more human readable handle
|
||||
type MemoryResources map[int]*MemoryResource
|
||||
|
||||
// ResourceInterface exposes some higher level functions of resources
|
||||
type ResourceInterface interface {
|
||||
GetTotalAddressableMemory(bool) (uint64, uint64)
|
||||
}
|
||||
|
||||
type nvpci struct {
|
||||
pciDevicesRoot string
|
||||
}
|
||||
|
||||
var _ Interface = (*nvpci)(nil)
|
||||
var _ ResourceInterface = (*MemoryResources)(nil)
|
||||
|
||||
// NvidiaPCIDevice represents a PCI device for an NVIDIA product
|
||||
type NvidiaPCIDevice struct {
|
||||
Path string
|
||||
Address string
|
||||
Vendor uint16
|
||||
Class uint32
|
||||
Device uint16
|
||||
NumaNode int
|
||||
Config *ConfigSpace
|
||||
Resources MemoryResources
|
||||
}
|
||||
|
||||
// IsVGAController if class == 0x300
|
||||
func (d *NvidiaPCIDevice) IsVGAController() bool {
|
||||
return d.Class == PCIVgaControllerClass
|
||||
}
|
||||
|
||||
// Is3DController if class == 0x302
|
||||
func (d *NvidiaPCIDevice) Is3DController() bool {
|
||||
return d.Class == PCI3dControllerClass
|
||||
}
|
||||
|
||||
// IsNVSwitch if classe == 0x068
|
||||
func (d *NvidiaPCIDevice) IsNVSwitch() bool {
|
||||
return d.Class == PCINvSwitchClass
|
||||
}
|
||||
|
||||
// IsGPU either VGA for older cards or 3D for newer
|
||||
func (d *NvidiaPCIDevice) IsGPU() bool {
|
||||
return d.IsVGAController() || d.Is3DController()
|
||||
}
|
||||
|
||||
// IsResetAvailable some devices can be reset without rebooting,
|
||||
// check if applicable
|
||||
func (d *NvidiaPCIDevice) IsResetAvailable() bool {
|
||||
_, err := os.Stat(path.Join(d.Path, "reset"))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Reset perform a reset to apply a new configuration at HW level
|
||||
func (d *NvidiaPCIDevice) Reset() error {
|
||||
err := ioutil.WriteFile(path.Join(d.Path, "reset"), []byte("1"), 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write to reset file: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New interface that allows us to get a list of all NVIDIA PCI devices
|
||||
func New() Interface {
|
||||
return &nvpci{PCIDevicesRoot}
|
||||
}
|
||||
|
||||
// NewFrom interface allows us to get a list of all NVIDIA PCI devices at a specific root directory
|
||||
func NewFrom(root string) Interface {
|
||||
return &nvpci{root}
|
||||
}
|
||||
|
||||
// GetAllDevices returns all Nvidia PCI devices on the system
|
||||
func (p *nvpci) GetAllDevices() ([]*NvidiaPCIDevice, error) {
|
||||
deviceDirs, err := ioutil.ReadDir(p.pciDevicesRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read PCI bus devices: %v", err)
|
||||
}
|
||||
|
||||
var nvdevices []*NvidiaPCIDevice
|
||||
for _, deviceDir := range deviceDirs {
|
||||
devicePath := path.Join(p.pciDevicesRoot, deviceDir.Name())
|
||||
nvdevice, err := NewDevice(devicePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error constructing NVIDIA PCI device %s: %v", deviceDir.Name(), err)
|
||||
}
|
||||
if nvdevice == nil {
|
||||
continue
|
||||
}
|
||||
nvdevices = append(nvdevices, nvdevice)
|
||||
}
|
||||
|
||||
addressToID := func(address string) uint64 {
|
||||
address = strings.ReplaceAll(address, ":", "")
|
||||
address = strings.ReplaceAll(address, ".", "")
|
||||
id, _ := strconv.ParseUint(address, 16, 64)
|
||||
return id
|
||||
}
|
||||
|
||||
sort.Slice(nvdevices, func(i, j int) bool {
|
||||
return addressToID(nvdevices[i].Address) < addressToID(nvdevices[j].Address)
|
||||
})
|
||||
|
||||
return nvdevices, nil
|
||||
}
|
||||
|
||||
// NewDevice constructs an NvidiaPCIDevice
|
||||
func NewDevice(devicePath string) (*NvidiaPCIDevice, error) {
|
||||
address := path.Base(devicePath)
|
||||
|
||||
vendor, err := ioutil.ReadFile(path.Join(devicePath, "vendor"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read PCI device vendor id for %s: %v", address, err)
|
||||
}
|
||||
vendorStr := strings.TrimSpace(string(vendor))
|
||||
vendorID, err := strconv.ParseUint(vendorStr, 0, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to convert vendor string to uint16: %v", vendorStr)
|
||||
}
|
||||
|
||||
if uint16(vendorID) != PCINvidiaVendorID {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
class, err := ioutil.ReadFile(path.Join(devicePath, "class"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read PCI device class for %s: %v", address, err)
|
||||
}
|
||||
classStr := strings.TrimSpace(string(class))
|
||||
classID, err := strconv.ParseUint(classStr, 0, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to convert class string to uint32: %v", classStr)
|
||||
}
|
||||
|
||||
device, err := ioutil.ReadFile(path.Join(devicePath, "device"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read PCI device id for %s: %v", address, err)
|
||||
}
|
||||
deviceStr := strings.TrimSpace(string(device))
|
||||
deviceID, err := strconv.ParseUint(deviceStr, 0, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to convert device string to uint16: %v", deviceStr)
|
||||
}
|
||||
|
||||
numa, err := ioutil.ReadFile(path.Join(devicePath, "numa_node"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read PCI NUMA node for %s: %v", address, err)
|
||||
}
|
||||
numaStr := strings.TrimSpace(string(numa))
|
||||
numaNode, err := strconv.ParseInt(numaStr, 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to convert NUMA node string to int64: %v", numaNode)
|
||||
}
|
||||
|
||||
config := &ConfigSpace{
|
||||
Path: path.Join(devicePath, "config"),
|
||||
}
|
||||
|
||||
resource, err := ioutil.ReadFile(path.Join(devicePath, "resource"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read PCI resource file for %s: %v", address, err)
|
||||
}
|
||||
|
||||
resources := make(map[int]*MemoryResource)
|
||||
for i, line := range strings.Split(strings.TrimSpace(string(resource)), "\n") {
|
||||
values := strings.Split(line, " ")
|
||||
if len(values) != 3 {
|
||||
return nil, fmt.Errorf("more than 3 entries in line '%d' of resource file", i)
|
||||
}
|
||||
|
||||
start, _ := strconv.ParseUint(values[0], 0, 64)
|
||||
end, _ := strconv.ParseUint(values[1], 0, 64)
|
||||
flags, _ := strconv.ParseUint(values[2], 0, 64)
|
||||
|
||||
if (end - start) != 0 {
|
||||
resources[i] = &MemoryResource{
|
||||
uintptr(start),
|
||||
uintptr(end),
|
||||
flags,
|
||||
fmt.Sprintf("%s/resource%d", devicePath, i),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvdevice := &NvidiaPCIDevice{
|
||||
Path: devicePath,
|
||||
Address: address,
|
||||
Vendor: uint16(vendorID),
|
||||
Class: uint32(classID),
|
||||
Device: uint16(deviceID),
|
||||
NumaNode: int(numaNode),
|
||||
Config: config,
|
||||
Resources: resources,
|
||||
}
|
||||
|
||||
return nvdevice, nil
|
||||
}
|
||||
|
||||
// Get3DControllers returns all NVIDIA 3D Controller PCI devices on the system
|
||||
func (p *nvpci) Get3DControllers() ([]*NvidiaPCIDevice, error) {
|
||||
devices, err := p.GetAllDevices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting all NVIDIA devices: %v", err)
|
||||
}
|
||||
|
||||
var filtered []*NvidiaPCIDevice
|
||||
for _, d := range devices {
|
||||
if d.Is3DController() {
|
||||
filtered = append(filtered, d)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// GetVGAControllers returns all NVIDIA VGA Controller PCI devices on the system
|
||||
func (p *nvpci) GetVGAControllers() ([]*NvidiaPCIDevice, error) {
|
||||
devices, err := p.GetAllDevices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting all NVIDIA devices: %v", err)
|
||||
}
|
||||
|
||||
var filtered []*NvidiaPCIDevice
|
||||
for _, d := range devices {
|
||||
if d.IsVGAController() {
|
||||
filtered = append(filtered, d)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// GetNVSwitches returns all NVIDIA NVSwitch PCI devices on the system
|
||||
func (p *nvpci) GetNVSwitches() ([]*NvidiaPCIDevice, error) {
|
||||
devices, err := p.GetAllDevices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting all NVIDIA devices: %v", err)
|
||||
}
|
||||
|
||||
var filtered []*NvidiaPCIDevice
|
||||
for _, d := range devices {
|
||||
if d.IsNVSwitch() {
|
||||
filtered = append(filtered, d)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// GetGPUs returns all NVIDIA GPU devices on the system
|
||||
func (p *nvpci) GetGPUs() ([]*NvidiaPCIDevice, error) {
|
||||
devices, err := p.GetAllDevices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting all NVIDIA devices: %v", err)
|
||||
}
|
||||
|
||||
var filtered []*NvidiaPCIDevice
|
||||
for _, d := range devices {
|
||||
if d.IsGPU() {
|
||||
filtered = append(filtered, d)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
140
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/resources.go
generated
vendored
140
src/runtime/vendor/gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/resources.go
generated
vendored
@@ -1,140 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package nvpci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci/mmio"
|
||||
)
|
||||
|
||||
const (
|
||||
pmcEndianRegister = 0x4
|
||||
pmcLittleEndian = 0x0
|
||||
pmcBigEndian = 0x01000001
|
||||
)
|
||||
|
||||
// MemoryResource represents a mmio region
|
||||
type MemoryResource struct {
|
||||
Start uintptr
|
||||
End uintptr
|
||||
Flags uint64
|
||||
Path string
|
||||
}
|
||||
|
||||
// OpenRW read write mmio region
|
||||
func (mr *MemoryResource) OpenRW() (mmio.Mmio, error) {
|
||||
rw, err := mmio.OpenRW(mr.Path, 0, int(mr.End-mr.Start+1))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file for mmio: %v", err)
|
||||
}
|
||||
switch rw.Read32(pmcEndianRegister) {
|
||||
case pmcBigEndian:
|
||||
return rw.BigEndian(), nil
|
||||
case pmcLittleEndian:
|
||||
return rw.LittleEndian(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown endianness for mmio: %v", err)
|
||||
}
|
||||
|
||||
// OpenRO read only mmio region
|
||||
func (mr *MemoryResource) OpenRO() (mmio.Mmio, error) {
|
||||
ro, err := mmio.OpenRO(mr.Path, 0, int(mr.End-mr.Start+1))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file for mmio: %v", err)
|
||||
}
|
||||
switch ro.Read32(pmcEndianRegister) {
|
||||
case pmcBigEndian:
|
||||
return ro.BigEndian(), nil
|
||||
case pmcLittleEndian:
|
||||
return ro.LittleEndian(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown endianness for mmio: %v", err)
|
||||
}
|
||||
|
||||
// From Bit Twiddling Hacks, great resource for all low level bit manipulations
|
||||
func calcNextPowerOf2(n uint64) uint64 {
|
||||
n--
|
||||
n |= n >> 1
|
||||
n |= n >> 2
|
||||
n |= n >> 4
|
||||
n |= n >> 8
|
||||
n |= n >> 16
|
||||
n |= n >> 32
|
||||
n++
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// GetTotalAddressableMemory will accumulate the 32bit and 64bit memory windows
|
||||
// of each BAR and round the value if needed to the next power of 2; first
|
||||
// return value is the accumulated 32bit addresable memory size the second one
|
||||
// is the accumulated 64bit addressable memory size in bytes. These values are
|
||||
// needed to configure virtualized environments.
|
||||
func (mrs MemoryResources) GetTotalAddressableMemory(roundUp bool) (uint64, uint64) {
|
||||
const pciIOVNumBAR = 6
|
||||
const pciBaseAddressMemTypeMask = 0x06
|
||||
const pciBaseAddressMemType32 = 0x00 /* 32 bit address */
|
||||
const pciBaseAddressMemType64 = 0x04 /* 64 bit address */
|
||||
|
||||
// We need to sort the resources so the first 6 entries are the BARs
|
||||
// How a map is represented in memory is not guaranteed, it is not an
|
||||
// array. Keys do not have an order.
|
||||
keys := make([]int, 0, len(mrs))
|
||||
for k := range mrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Ints(keys)
|
||||
|
||||
numBAR := 0
|
||||
memSize32bit := uint64(0)
|
||||
memSize64bit := uint64(0)
|
||||
|
||||
for _, key := range keys {
|
||||
// The PCIe spec only defines 5 BARs per device, we're
|
||||
// discarding everything after the 5th entry of the resources
|
||||
// file, see lspci.c
|
||||
if key >= pciIOVNumBAR || numBAR == pciIOVNumBAR {
|
||||
break
|
||||
}
|
||||
numBAR = numBAR + 1
|
||||
|
||||
region := mrs[key]
|
||||
|
||||
flags := region.Flags & pciBaseAddressMemTypeMask
|
||||
memType32bit := flags == pciBaseAddressMemType32
|
||||
memType64bit := flags == pciBaseAddressMemType64
|
||||
|
||||
memSize := (region.End - region.Start) + 1
|
||||
|
||||
if memType32bit {
|
||||
memSize32bit = memSize32bit + uint64(memSize)
|
||||
}
|
||||
if memType64bit {
|
||||
memSize64bit = memSize64bit + uint64(memSize)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if roundUp {
|
||||
memSize32bit = calcNextPowerOf2(memSize32bit)
|
||||
memSize64bit = calcNextPowerOf2(memSize64bit)
|
||||
}
|
||||
|
||||
return memSize32bit, memSize64bit
|
||||
}
|
||||
2
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
generated
vendored
2
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
generated
vendored
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
|
||||
func unmarshalJSON(dst []byte, src []byte) error {
|
||||
func unmarshalJSON(dst, src []byte) error {
|
||||
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
|
||||
src = src[1 : l-1]
|
||||
}
|
||||
|
||||
2
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
generated
vendored
2
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
generated
vendored
@@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error {
|
||||
// strings or integers.
|
||||
type protoUint64 uint64
|
||||
|
||||
// Int64 returns the protoUint64 as a uint64.
|
||||
// Uint64 returns the protoUint64 as a uint64.
|
||||
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
|
||||
|
||||
// UnmarshalJSON decodes both strings and integers.
|
||||
|
||||
70
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
generated
vendored
70
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) {
|
||||
}{
|
||||
Alias: Alias(s),
|
||||
ParentSpanID: parentSpanId,
|
||||
StartTime: uint64(startT),
|
||||
EndTime: uint64(endT),
|
||||
StartTime: uint64(startT), // nolint:gosec // >0 checked above.
|
||||
EndTime: uint64(endT), // nolint:gosec // >0 checked above.
|
||||
})
|
||||
}
|
||||
|
||||
@@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error {
|
||||
case "startTimeUnixNano", "start_time_unix_nano":
|
||||
var val protoUint64
|
||||
err = decoder.Decode(&val)
|
||||
s.StartTime = time.Unix(0, int64(val.Uint64()))
|
||||
v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
|
||||
s.StartTime = time.Unix(0, v)
|
||||
case "endTimeUnixNano", "end_time_unix_nano":
|
||||
var val protoUint64
|
||||
err = decoder.Decode(&val)
|
||||
s.EndTime = time.Unix(0, int64(val.Uint64()))
|
||||
v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
|
||||
s.EndTime = time.Unix(0, v)
|
||||
case "attributes":
|
||||
err = decoder.Decode(&s.Attrs)
|
||||
case "droppedAttributesCount", "dropped_attributes_count":
|
||||
@@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error {
|
||||
type SpanFlags int32
|
||||
|
||||
const (
|
||||
// SpanFlagsTraceFlagsMask is a mask for trace-flags.
|
||||
//
|
||||
// Bits 0-7 are used for trace flags.
|
||||
SpanFlagsTraceFlagsMask SpanFlags = 255
|
||||
// Bits 8 and 9 are used to indicate that the parent span or link span is remote.
|
||||
// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
|
||||
// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
|
||||
// SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
|
||||
//
|
||||
// Bits 8 and 9 are used to indicate that the parent span or link span is
|
||||
// remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
|
||||
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
|
||||
// SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
|
||||
// SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
|
||||
//
|
||||
// Bits 8 and 9 are used to indicate that the parent span or link span is
|
||||
// remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
|
||||
// remote.
|
||||
SpanFlagsContextIsRemoteMask SpanFlags = 512
|
||||
)
|
||||
|
||||
@@ -263,26 +273,30 @@ const (
|
||||
type SpanKind int32
|
||||
|
||||
const (
|
||||
// Indicates that the span represents an internal operation within an application,
|
||||
// as opposed to an operation happening at the boundaries. Default value.
|
||||
// SpanKindInternal indicates that the span represents an internal
|
||||
// operation within an application, as opposed to an operation happening at
|
||||
// the boundaries.
|
||||
SpanKindInternal SpanKind = 1
|
||||
// Indicates that the span covers server-side handling of an RPC or other
|
||||
// remote network request.
|
||||
// SpanKindServer indicates that the span covers server-side handling of an
|
||||
// RPC or other remote network request.
|
||||
SpanKindServer SpanKind = 2
|
||||
// Indicates that the span describes a request to some remote service.
|
||||
// SpanKindClient indicates that the span describes a request to some
|
||||
// remote service.
|
||||
SpanKindClient SpanKind = 3
|
||||
// Indicates that the span describes a producer sending a message to a broker.
|
||||
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
|
||||
// between producer and consumer spans. A PRODUCER span ends when the message was accepted
|
||||
// by the broker while the logical processing of the message might span a much longer time.
|
||||
// SpanKindProducer indicates that the span describes a producer sending a
|
||||
// message to a broker. Unlike SpanKindClient and SpanKindServer, there is
|
||||
// often no direct critical path latency relationship between producer and
|
||||
// consumer spans. A SpanKindProducer span ends when the message was
|
||||
// accepted by the broker while the logical processing of the message might
|
||||
// span a much longer time.
|
||||
SpanKindProducer SpanKind = 4
|
||||
// Indicates that the span describes consumer receiving a message from a broker.
|
||||
// Like the PRODUCER kind, there is often no direct critical path latency relationship
|
||||
// between producer and consumer spans.
|
||||
// SpanKindConsumer indicates that the span describes a consumer receiving
|
||||
// a message from a broker. Like SpanKindProducer, there is often no direct
|
||||
// critical path latency relationship between producer and consumer spans.
|
||||
SpanKindConsumer SpanKind = 5
|
||||
)
|
||||
|
||||
// Event is a time-stamped annotation of the span, consisting of user-supplied
|
||||
// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
|
||||
// text description and key-value pairs.
|
||||
type SpanEvent struct {
|
||||
// time_unix_nano is the time the event occurred.
|
||||
@@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) {
|
||||
Time uint64 `json:"timeUnixNano,omitempty"`
|
||||
}{
|
||||
Alias: Alias(e),
|
||||
Time: uint64(t),
|
||||
Time: uint64(t), //nolint:gosec // >0 checked above
|
||||
})
|
||||
}
|
||||
|
||||
@@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
|
||||
case "timeUnixNano", "time_unix_nano":
|
||||
var val protoUint64
|
||||
err = decoder.Decode(&val)
|
||||
se.Time = time.Unix(0, int64(val.Uint64()))
|
||||
v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
|
||||
se.Time = time.Unix(0, v)
|
||||
case "name":
|
||||
err = decoder.Decode(&se.Name)
|
||||
case "attributes":
|
||||
@@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A pointer from the current span to another span in the same trace or in a
|
||||
// different trace. For example, this can be used in batching operations,
|
||||
// where a single batch handler processes multiple requests from different
|
||||
// traces or when the handler receives a request from a different project.
|
||||
// SpanLink is a reference from the current span to another span in the same
|
||||
// trace or in a different trace. For example, this can be used in batching
|
||||
// operations, where a single batch handler processes multiple requests from
|
||||
// different traces or when the handler receives a request from a different
|
||||
// project.
|
||||
type SpanLink struct {
|
||||
// A unique identifier of a trace that this linked span is part of. The ID is a
|
||||
// 16-byte array.
|
||||
|
||||
10
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
generated
vendored
10
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
generated
vendored
@@ -3,17 +3,19 @@
|
||||
|
||||
package telemetry
|
||||
|
||||
// StatusCode is the status of a Span.
|
||||
//
|
||||
// For the semantics of status codes see
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
|
||||
type StatusCode int32
|
||||
|
||||
const (
|
||||
// The default status.
|
||||
// StatusCodeUnset is the default status.
|
||||
StatusCodeUnset StatusCode = 0
|
||||
// The Span has been validated by an Application developer or Operator to
|
||||
// have completed successfully.
|
||||
// StatusCodeOK is used when the Span has been validated by an Application
|
||||
// developer or Operator to have completed successfully.
|
||||
StatusCodeOK StatusCode = 1
|
||||
// The Span contains an error.
|
||||
// StatusCodeError is used when the Span contains an error.
|
||||
StatusCodeError StatusCode = 2
|
||||
)
|
||||
|
||||
|
||||
4
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
generated
vendored
4
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
generated
vendored
@@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A collection of ScopeSpans from a Resource.
|
||||
// ResourceSpans is a collection of ScopeSpans from a Resource.
|
||||
type ResourceSpans struct {
|
||||
// The resource for the spans in this message.
|
||||
// If this field is not set then no resource info is known.
|
||||
@@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A collection of Spans produced by an InstrumentationScope.
|
||||
// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
|
||||
type ScopeSpans struct {
|
||||
// The instrumentation scope information for the spans in this message.
|
||||
// Semantically when InstrumentationScope isn't set, it is equivalent with
|
||||
|
||||
14
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
generated
vendored
14
src/runtime/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
generated
vendored
@@ -1,8 +1,6 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//go:generate stringer -type=ValueKind -trimprefix=ValueKind
|
||||
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
@@ -23,7 +21,7 @@ import (
|
||||
// A zero value is valid and represents an empty value.
|
||||
type Value struct {
|
||||
// Ensure forward compatibility by explicitly making this not comparable.
|
||||
noCmp [0]func() //nolint: unused // This is indeed used.
|
||||
noCmp [0]func() //nolint:unused // This is indeed used.
|
||||
|
||||
// num holds the value for Int64, Float64, and Bool. It holds the length
|
||||
// for String, Bytes, Slice, Map.
|
||||
@@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) }
|
||||
|
||||
// Int64Value returns a [Value] for an int64.
|
||||
func Int64Value(v int64) Value {
|
||||
return Value{num: uint64(v), any: ValueKindInt64}
|
||||
return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv.
|
||||
}
|
||||
|
||||
// Float64Value returns a [Value] for a float64.
|
||||
@@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 {
|
||||
// this will return garbage.
|
||||
func (v Value) asInt64() int64 {
|
||||
// Assumes v.num was a valid int64 (overflow not checked).
|
||||
return int64(v.num) // nolint: gosec
|
||||
return int64(v.num) //nolint:gosec // Bounded.
|
||||
}
|
||||
|
||||
// AsBool returns the value held by v as a bool.
|
||||
@@ -309,13 +307,13 @@ func (v Value) String() string {
|
||||
return v.asString()
|
||||
case ValueKindInt64:
|
||||
// Assumes v.num was a valid int64 (overflow not checked).
|
||||
return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
|
||||
return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded.
|
||||
case ValueKindFloat64:
|
||||
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
|
||||
case ValueKindBool:
|
||||
return strconv.FormatBool(v.asBool())
|
||||
case ValueKindBytes:
|
||||
return fmt.Sprint(v.asBytes())
|
||||
return string(v.asBytes())
|
||||
case ValueKindMap:
|
||||
return fmt.Sprint(v.asMap())
|
||||
case ValueKindSlice:
|
||||
@@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) {
|
||||
case ValueKindInt64:
|
||||
return json.Marshal(struct {
|
||||
Value string `json:"intValue"`
|
||||
}{strconv.FormatInt(int64(v.num), 10)})
|
||||
}{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv.
|
||||
case ValueKindFloat64:
|
||||
return json.Marshal(struct {
|
||||
Value float64 `json:"doubleValue"`
|
||||
|
||||
25
src/runtime/vendor/go.opentelemetry.io/auto/sdk/span.go
generated
vendored
25
src/runtime/vendor/go.opentelemetry.io/auto/sdk/span.go
generated
vendored
@@ -6,6 +6,7 @@ package sdk
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -16,7 +17,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.opentelemetry.io/otel/trace/noop"
|
||||
|
||||
@@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
|
||||
limit := maxSpan.Attrs
|
||||
if limit == 0 {
|
||||
// No attributes allowed.
|
||||
s.span.DroppedAttrs += uint32(len(attrs))
|
||||
n := int64(len(attrs))
|
||||
if n > 0 {
|
||||
s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked.
|
||||
min(n, math.MaxUint32),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
|
||||
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
|
||||
// number of dropped attributes is also returned.
|
||||
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
|
||||
n := len(attrs)
|
||||
if limit == 0 {
|
||||
return nil, uint32(len(attrs))
|
||||
var out uint32
|
||||
if n > 0 {
|
||||
out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked.
|
||||
}
|
||||
return nil, out
|
||||
}
|
||||
|
||||
if limit < 0 {
|
||||
@@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u
|
||||
return convAttrs(attrs), 0
|
||||
}
|
||||
|
||||
limit = min(len(attrs), limit)
|
||||
return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
|
||||
limit = min(n, limit)
|
||||
return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked.
|
||||
}
|
||||
|
||||
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
|
||||
|
||||
29
src/runtime/vendor/go.opentelemetry.io/auto/sdk/tracer.go
generated
vendored
29
src/runtime/vendor/go.opentelemetry.io/auto/sdk/tracer.go
generated
vendored
@@ -5,6 +5,7 @@ package sdk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
@@ -21,15 +22,20 @@ type tracer struct {
|
||||
|
||||
var _ trace.Tracer = tracer{}
|
||||
|
||||
func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
var psc trace.SpanContext
|
||||
func (t tracer) Start(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
opts ...trace.SpanStartOption,
|
||||
) (context.Context, trace.Span) {
|
||||
var psc, sc trace.SpanContext
|
||||
sampled := true
|
||||
span := new(span)
|
||||
|
||||
// Ask eBPF for sampling decision and span context info.
|
||||
t.start(ctx, span, &psc, &sampled, &span.spanContext)
|
||||
t.start(ctx, span, &psc, &sampled, &sc)
|
||||
|
||||
span.sampled.Store(sampled)
|
||||
span.spanContext = sc
|
||||
|
||||
ctx = trace.ContextWithSpan(ctx, span)
|
||||
|
||||
@@ -58,7 +64,13 @@ func (t *tracer) start(
|
||||
// start is used for testing.
|
||||
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
|
||||
|
||||
func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
|
||||
var intToUint32Bound = min(math.MaxInt, math.MaxUint32)
|
||||
|
||||
func (t tracer) traces(
|
||||
name string,
|
||||
cfg trace.SpanConfig,
|
||||
sc, psc trace.SpanContext,
|
||||
) (*telemetry.Traces, *telemetry.Span) {
|
||||
span := &telemetry.Span{
|
||||
TraceID: telemetry.TraceID(sc.TraceID()),
|
||||
SpanID: telemetry.SpanID(sc.SpanID()),
|
||||
@@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont
|
||||
|
||||
links := cfg.Links()
|
||||
if limit := maxSpan.Links; limit == 0 {
|
||||
span.DroppedLinks = uint32(len(links))
|
||||
n := len(links)
|
||||
if n > 0 {
|
||||
bounded := max(min(n, intToUint32Bound), 0)
|
||||
span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
|
||||
}
|
||||
} else {
|
||||
if limit > 0 {
|
||||
n := max(len(links)-limit, 0)
|
||||
span.DroppedLinks = uint32(n)
|
||||
bounded := min(n, intToUint32Bound)
|
||||
span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
|
||||
links = links[n:]
|
||||
}
|
||||
span.Links = convLinks(links)
|
||||
|
||||
3
src/runtime/vendor/go.opentelemetry.io/otel/.clomonitor.yml
generated
vendored
Normal file
3
src/runtime/vendor/go.opentelemetry.io/otel/.clomonitor.yml
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
exemptions:
|
||||
- check: artifacthub_badge
|
||||
reason: "Artifact Hub doesn't support Go packages"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user