mirror of
https://github.com/k8snetworkplumbingwg/multus-cni.git
synced 2026-02-22 07:02:05 +00:00
Compare commits
76 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
705a59eaf9 | ||
|
|
c943f9ffa2 | ||
|
|
56d18efde0 | ||
|
|
921191dece | ||
|
|
e091897b4c | ||
|
|
ea389005a1 | ||
|
|
9c05100972 | ||
|
|
39d6a8ffd2 | ||
|
|
73dd0b4c3b | ||
|
|
0c108bd0fc | ||
|
|
f29a370f8e | ||
|
|
1d0c2272db | ||
|
|
f42e0bd8fa | ||
|
|
173fdf538a | ||
|
|
71d42a1baf | ||
|
|
fc3053fc6d | ||
|
|
2ecd4f4b47 | ||
|
|
cdf603f4db | ||
|
|
7489eea315 | ||
|
|
369722ba7f | ||
|
|
34e6dff08f | ||
|
|
8cf05dac81 | ||
|
|
a234ce68f3 | ||
|
|
00adf22482 | ||
|
|
f6b42791b5 | ||
|
|
6dd955dba5 | ||
|
|
f18d96b648 | ||
|
|
18630fde0b | ||
|
|
19f9283db4 | ||
|
|
1655d540cb | ||
|
|
4517063b79 | ||
|
|
4104fea90d | ||
|
|
528d4f150c | ||
|
|
fa3c7cfee3 | ||
|
|
96bfb26dac | ||
|
|
55ef3b1f0b | ||
|
|
41321963b8 | ||
|
|
ef8f01b299 | ||
|
|
51752f1a6e | ||
|
|
1821311479 | ||
|
|
ccfd8f5fea | ||
|
|
2a91646eaf | ||
|
|
47e5153714 | ||
|
|
21f7282088 | ||
|
|
641f6a3b63 | ||
|
|
e156e815ad | ||
|
|
5892d705da | ||
|
|
431a735eca | ||
|
|
99d72d14a3 | ||
|
|
4a0b5073af | ||
|
|
5216844263 | ||
|
|
7eb9673a1a | ||
|
|
a439f91721 | ||
|
|
6d3d800226 | ||
|
|
fba1fea81e | ||
|
|
f186370654 | ||
|
|
fc72ddbd24 | ||
|
|
fb03b0f754 | ||
|
|
5338017bf6 | ||
|
|
4ff141c18d | ||
|
|
4fc16b3bb8 | ||
|
|
ddbcd2c4ef | ||
|
|
781ecdaecd | ||
|
|
808185b10f | ||
|
|
e1a0d2a3fd | ||
|
|
ecf5854ca9 | ||
|
|
adfb270991 | ||
|
|
b171bb702b | ||
|
|
f1e887e239 | ||
|
|
100766d1a4 | ||
|
|
e074c2a56b | ||
|
|
38d03eb816 | ||
|
|
b554c96160 | ||
|
|
92ff1b1ee8 | ||
|
|
31e77aafab | ||
|
|
dec0607a94 |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -4,7 +4,7 @@ jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.20.x, 1.21.x]
|
||||
go-version: [1.24.x]
|
||||
goarch: [386, amd64, arm, arm64, ppc64le, s390x]
|
||||
os: [ubuntu-latest] #, macos-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
22
.github/workflows/image-build.yml
vendored
22
.github/workflows/image-build.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
|
||||
# note: disable sbom/provenance for now (gchr.io does not managed well yet)
|
||||
- name: Build container image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
# note: disable sbom/provenance for now (gchr.io does not managed well yet)
|
||||
- name: Build container debug image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build container image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
@@ -56,6 +56,22 @@ jobs:
|
||||
sbom: false
|
||||
provenance: false
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
with:
|
||||
image-ref: ghcr.io/${{ github.repository }}:latest-thick
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
format: 'sarif'
|
||||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
if: always()
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
build-origin:
|
||||
name: Image build/origin
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
17
.github/workflows/kind-e2e.yml
vendored
17
.github/workflows/kind-e2e.yml
vendored
@@ -85,15 +85,26 @@ jobs:
|
||||
working-directory: ./e2e
|
||||
run: ./test-default-route1.sh
|
||||
|
||||
- name: Test DRA integration
|
||||
# - name: Test DRA integration
|
||||
# working-directory: ./e2e
|
||||
# run: ./test-dra-integration.sh
|
||||
#
|
||||
|
||||
- name: Test subdirectory CNI chaining
|
||||
if: ${{ matrix.multus-manifest == 'multus-daemonset-thick.yml' }}
|
||||
working-directory: ./e2e
|
||||
run: ./test-dra-integration.sh
|
||||
run: ./test-subdirectory-chaining.sh
|
||||
|
||||
- name: Test subdirectory CNI chaining with passthru CNI / auxiliaryCNIChainName
|
||||
if: ${{ matrix.multus-manifest == 'multus-daemonset-thick.yml' }}
|
||||
working-directory: ./e2e
|
||||
run: ./test-subdirectory-chaining-passthru.sh
|
||||
|
||||
- name: Export kind logs
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p /tmp/kind/logs
|
||||
kind export logs --loglevel=debug /tmp/kind/logs
|
||||
kind export logs /tmp/kind/logs -v 2147483647
|
||||
|
||||
- name: Upload kind logs
|
||||
if: always()
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.24.x
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -4,7 +4,7 @@ jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.20.x, 1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
|
||||
14
Makefile
Normal file
14
Makefile
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
|
||||
.PHONY: deps-update
|
||||
deps-update: ; $(info Updating dependencies...) @ ## Update dependencies
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
||||
PHONY: build test
|
||||
|
||||
build:
|
||||
./hack/build-go.sh
|
||||
|
||||
test:
|
||||
sudo ./hack/test-go.sh
|
||||
@@ -24,10 +24,10 @@ Here's an illustration of the network interfaces attached to a pod, as provision
|
||||
|
||||
The quickstart installation method for Multus requires that you have first installed a Kubernetes CNI plugin to serve as your pod-to-pod network, which we refer to as your "default network" (a network interface that every pod will be created with). Each network attachment created by Multus will be in addition to this default network interface. For more detail on installing a default network CNI plugin, refer to our [quick-start guide](docs/quickstart.md).
|
||||
|
||||
Clone this GitHub repository, and apply a daemonset which installs Multus using `kubectl`. From the root directory of the clone, apply the daemonset YAML file:
|
||||
To use latest features try command below which applies a daemonset and installs thick Multus using `kubectl`:
|
||||
|
||||
```
|
||||
cat ./deployments/multus-daemonset-thick.yml | kubectl apply -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml
|
||||
```
|
||||
|
||||
This will configure your systems to be ready to use Multus CNI, but, to get started with adding additional interfaces to your pods, refer to our complete [quick-start guide](docs/quickstart.md)
|
||||
@@ -39,7 +39,7 @@ With the multus 4.0 release, we introduce a new client/server-style plugin deplo
|
||||
We recommend using the thick plugin in most environments, but if you wish to run the thin plugin, or are in a resource-constrained environment, you may do so with:
|
||||
|
||||
```
|
||||
cat ./deployments/multus-daemonset.yml | kubectl apply -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml
|
||||
```
|
||||
|
||||
## Additional Installation Options
|
||||
|
||||
@@ -53,4 +53,15 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Printf("multus %s copy succeeded!\n", multusFileName)
|
||||
|
||||
// Copy the passthru CNI
|
||||
passthruPath := "/usr/src/multus-cni/bin/passthru"
|
||||
err = cmdutils.CopyFileAtomic(passthruPath, *destDir, fmt.Sprintf("%s.temp", "passthru"), "passthru")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to copy file %s: %v\n", multusFileName, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("passthru cni %s copy succeeded!\n", passthruPath)
|
||||
|
||||
}
|
||||
|
||||
@@ -61,6 +61,7 @@ func main() {
|
||||
|
||||
daemonConf, err := cniServerConfig(*configFilePath)
|
||||
if err != nil {
|
||||
logging.Panicf("startMultusDaemon failed to load the CNI server configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -44,15 +44,23 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
skel.PluginMain(
|
||||
func(args *skel.CmdArgs) error {
|
||||
return api.CmdAdd(args)
|
||||
},
|
||||
func(args *skel.CmdArgs) error {
|
||||
return api.CmdCheck(args)
|
||||
},
|
||||
func(args *skel.CmdArgs) error {
|
||||
return api.CmdDel(args)
|
||||
skel.PluginMainFuncs(
|
||||
skel.CNIFuncs{
|
||||
Add: func(args *skel.CmdArgs) error {
|
||||
return api.CmdAdd(args)
|
||||
},
|
||||
Check: func(args *skel.CmdArgs) error {
|
||||
return api.CmdCheck(args)
|
||||
},
|
||||
Del: func(args *skel.CmdArgs) error {
|
||||
return api.CmdDel(args)
|
||||
},
|
||||
GC: func(args *skel.CmdArgs) error {
|
||||
return api.CmdGC(args)
|
||||
},
|
||||
Status: func(args *skel.CmdArgs) error {
|
||||
return api.CmdStatus(args)
|
||||
},
|
||||
},
|
||||
cniversion.All, "meta-plugin that delegates to other CNI plugins")
|
||||
}
|
||||
|
||||
@@ -43,17 +43,27 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
skel.PluginMain(
|
||||
func(args *skel.CmdArgs) error {
|
||||
result, err := multus.CmdAdd(args, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return result.Print()
|
||||
skel.PluginMainFuncs(
|
||||
skel.CNIFuncs{
|
||||
Add: func(args *skel.CmdArgs) error {
|
||||
result, err := multus.CmdAdd(args, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return result.Print()
|
||||
},
|
||||
Del: func(args *skel.CmdArgs) error {
|
||||
return multus.CmdDel(args, nil, nil)
|
||||
},
|
||||
Check: func(args *skel.CmdArgs) error {
|
||||
return multus.CmdCheck(args, nil, nil)
|
||||
},
|
||||
GC: func(args *skel.CmdArgs) error {
|
||||
return multus.CmdGC(args, nil, nil)
|
||||
},
|
||||
Status: func(args *skel.CmdArgs) error {
|
||||
return multus.CmdStatus(args, nil, nil)
|
||||
},
|
||||
},
|
||||
func(args *skel.CmdArgs) error {
|
||||
return multus.CmdCheck(args, nil, nil)
|
||||
},
|
||||
func(args *skel.CmdArgs) error { return multus.CmdDel(args, nil, nil) },
|
||||
cniversion.All, "meta-plugin that delegates to other CNI plugins")
|
||||
}
|
||||
|
||||
58
cmd/passthru-cni/main.go
Normal file
58
cmd/passthru-cni/main.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Package: passthru-cni
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
cniTypes "github.com/containernetworking/cni/pkg/types"
|
||||
current "github.com/containernetworking/cni/pkg/types/100"
|
||||
cniVersion "github.com/containernetworking/cni/pkg/version"
|
||||
)
|
||||
|
||||
// NetConf is a CNI configuration structure
|
||||
type NetConf struct {
|
||||
cniTypes.NetConf
|
||||
}
|
||||
|
||||
func main() {
|
||||
skel.PluginMain(
|
||||
cmdAdd,
|
||||
nil,
|
||||
cmdDel,
|
||||
cniVersion.PluginSupports("0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0"),
|
||||
"Passthrough CNI Plugin v1.0",
|
||||
)
|
||||
}
|
||||
|
||||
func cmdAdd(args *skel.CmdArgs) error {
|
||||
n, err := loadNetConf(args.StdinData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("passthru cni: error parsing CNI configuration: %s", err)
|
||||
}
|
||||
|
||||
// Create an empty but valid CNI result
|
||||
result := ¤t.Result{
|
||||
CNIVersion: n.CNIVersion,
|
||||
Interfaces: []*current.Interface{},
|
||||
IPs: []*current.IPConfig{},
|
||||
Routes: []*cniTypes.Route{},
|
||||
DNS: cniTypes.DNS{},
|
||||
}
|
||||
|
||||
return cniTypes.PrintResult(result, n.CNIVersion)
|
||||
}
|
||||
|
||||
func cmdDel(_ *skel.CmdArgs) error {
|
||||
// Nothing to do for DEL command, just return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadNetConf(bytes []byte) (*NetConf, error) {
|
||||
n := &NetConf{}
|
||||
if err := json.Unmarshal(bytes, n); err != nil {
|
||||
return nil, fmt.Errorf("passthru cni: failed to load netconf: %s", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
b64 "encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
cniversion "github.com/containernetworking/cni/pkg/version"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/cmdutils"
|
||||
@@ -225,7 +227,7 @@ func (o *Options) createKubeConfig(prevCAHash, prevSATokenHash []byte) ([]byte,
|
||||
return nil, nil, fmt.Errorf("template parse error: %v", err)
|
||||
}
|
||||
templateData := map[string]string{
|
||||
"KubeConfigHost": fmt.Sprintf("%s://[%s]:%s", kubeProtocol, kubeHost, kubePort),
|
||||
"KubeConfigHost": fmt.Sprintf("%s://%s", kubeProtocol, net.JoinHostPort(kubeHost, kubePort)),
|
||||
"KubeServerTLS": tlsConfig,
|
||||
"KubeServiceAccountToken": string(saTokenByte),
|
||||
}
|
||||
@@ -496,14 +498,14 @@ func (o *Options) createMultusConfig(prevMasterConfigFileHash []byte) (string, [
|
||||
return "", nil, fmt.Errorf("cannot create multus cni temp file: %v", err)
|
||||
}
|
||||
|
||||
// use conflist template if cniVersionConfig == "1.0.0"
|
||||
// use conflist template if cniVersionConfig >= "1.0.0"
|
||||
multusConfFilePath := fmt.Sprintf("%s/00-multus.conf", o.CNIConfDir)
|
||||
templateMultusConfig, err := template.New("multusCNIConfig").Parse(multusConfTemplate)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("template parse error: %v", err)
|
||||
}
|
||||
|
||||
if o.CNIVersion == "1.0.0" { //Check 1.0.0 or above!
|
||||
if gt, err := cniversion.GreaterThanOrEqualTo(o.CNIVersion, "1.0.0"); err == nil && gt {
|
||||
multusConfFilePath = fmt.Sprintf("%s/00-multus.conflist", o.CNIConfDir)
|
||||
templateMultusConfig, err = template.New("multusCNIConfig").Parse(multusConflistTemplate)
|
||||
if err != nil {
|
||||
|
||||
@@ -318,6 +318,56 @@ var _ = Describe("thin entrypoint testing", func() {
|
||||
Expect(os.RemoveAll(tmpDir)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Run createMultusConfig(), default, conflist for cniVersion 1.1.0", func() {
|
||||
// create directory and files
|
||||
tmpDir, err := os.MkdirTemp("", "multus_thin_entrypoint_tmp")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
multusAutoConfigDir := fmt.Sprintf("%s/auto_conf", tmpDir)
|
||||
cniConfDir := fmt.Sprintf("%s/cni_conf", tmpDir)
|
||||
|
||||
Expect(os.Mkdir(multusAutoConfigDir, 0755)).To(Succeed())
|
||||
Expect(os.Mkdir(cniConfDir, 0755)).To(Succeed())
|
||||
|
||||
// create master CNI config
|
||||
masterCNIConfig := `
|
||||
{
|
||||
"cniVersion": "1.1.0",
|
||||
"name": "test1",
|
||||
"type": "cnitesttype"
|
||||
}`
|
||||
Expect(os.WriteFile(fmt.Sprintf("%s/10-testcni.conf", multusAutoConfigDir), []byte(masterCNIConfig), 0755)).To(Succeed())
|
||||
|
||||
masterConfigPath, masterConfigHash, err := (&Options{
|
||||
MultusAutoconfigDir: multusAutoConfigDir,
|
||||
CNIConfDir: cniConfDir,
|
||||
MultusKubeConfigFileHost: "/etc/foobar_kubeconfig",
|
||||
}).createMultusConfig(nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Expect(masterConfigPath).NotTo(Equal(""))
|
||||
Expect(masterConfigHash).NotTo(Equal(""))
|
||||
|
||||
expectedResult :=
|
||||
`{
|
||||
"cniVersion": "1.1.0",
|
||||
"name": "multus-cni-network",
|
||||
"plugins": [ {
|
||||
"type": "multus",
|
||||
"logToStderr": false,
|
||||
"kubeconfig": "/etc/foobar_kubeconfig",
|
||||
"delegates": [
|
||||
{"cniVersion":"1.1.0","name":"test1","type":"cnitesttype"}
|
||||
]
|
||||
}]
|
||||
}
|
||||
`
|
||||
conf, err := os.ReadFile(fmt.Sprintf("%s/00-multus.conflist", cniConfDir))
|
||||
Expect(string(conf)).To(Equal(expectedResult))
|
||||
|
||||
Expect(os.RemoveAll(tmpDir)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Run createMultusConfig(), capabilities, conflist", func() {
|
||||
// create directory and files
|
||||
tmpDir, err := os.MkdirTemp("", "multus_thin_entrypoint_tmp")
|
||||
|
||||
@@ -192,6 +192,8 @@ spec:
|
||||
- name: hostroot
|
||||
mountPath: /hostroot
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /etc/cni/multus/net.d
|
||||
name: multus-conf-dir
|
||||
env:
|
||||
- name: MULTUS_NODE_NAME
|
||||
valueFrom:
|
||||
@@ -201,9 +203,11 @@ spec:
|
||||
- name: install-multus-binary
|
||||
image: ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot-thick
|
||||
command:
|
||||
- "cp"
|
||||
- "/usr/src/multus-cni/bin/multus-shim"
|
||||
- "/host/opt/cni/bin/multus-shim"
|
||||
- "/usr/src/multus-cni/bin/install_multus"
|
||||
- "-d"
|
||||
- "/host/opt/cni/bin"
|
||||
- "-t"
|
||||
- "thick"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "10m"
|
||||
@@ -247,3 +251,6 @@ spec:
|
||||
- name: host-run-netns
|
||||
hostPath:
|
||||
path: /run/netns/
|
||||
- name: multus-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/multus/net.d
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Aside from setting options for Multus, one of the goals of configuration is to set the configuration for your *default network*. The default network is also sometimes referred as the "primary CNI plugin", the "primary network", or a "default CNI plugin" and is the CNI plugin that is used to implement [the Kubernetes networking model](https://kubernetes.io/docs/concepts/services-networking/#the-kubernetes-network-model) in your cluster. Common examples include Flannel, Weave, Calico, Cillium, and OVN-Kubernetes, among others.
|
||||
Aside from setting options for Multus, one of the goals of configuration is to set the configuration for your *default network*. The default network is also sometimes referred as the "primary CNI plugin", the "primary network", or a "default CNI plugin" and is the CNI plugin that is used to implement [the Kubernetes networking model](https://kubernetes.io/docs/concepts/services-networking/#the-kubernetes-network-model) in your cluster. Common examples include Flannel, Weave, Calico, Cilium, and OVN-Kubernetes, among others.
|
||||
|
||||
Here we will refer to this as your default CNI plugin or default network.
|
||||
|
||||
@@ -37,6 +37,7 @@ Example configuration using `clusterNetwork` (see also [using delegates](#using-
|
||||
"defaultNetworks": ["sidecarCRD", "exampleNetwork"],
|
||||
"systemNamespaces": ["kube-system", "admin"],
|
||||
"multusNamespace": "kube-system",
|
||||
"auxiliaryCNIChainName": "cni-chain-config",
|
||||
allowTryDeleteOnErr: false
|
||||
}
|
||||
```
|
||||
@@ -63,6 +64,7 @@ message to next when some missing error. Defaults to false.
|
||||
* `systemNamespaces` ([]string, optional): list of namespaces for Kubernetes system (namespaces listed here will not have `defaultNetworks` added)
|
||||
* `multusNamespace` (string, optional): namespace for `clusterNetwork`/`defaultNetworks` (the default value is `kube-system`)
|
||||
* `retryDeleteOnError` (bool, optional): Enable or disable delegate DEL
|
||||
* [`auxiliaryCNIChainName`](#auxiliaryCNIChainName) (string, optional): Enable loading CNI configurations from disk as chained plugins in an auxiliary CNI chain
|
||||
|
||||
### Using `clusterNetwork`
|
||||
|
||||
@@ -380,3 +382,47 @@ annotations:
|
||||
v1.multus-cni.io/default-network: calico-conf
|
||||
...
|
||||
```
|
||||
|
||||
### `auxiliaryCNIChainName`
|
||||
|
||||
`auxiliaryCNIChainName` (of value string) is used to express the name of an additional auxiliary CNI chain that will execute in order to composably execute chained CNI plugins from configurations on the host's disk in a subdirectory of the CNI configuration directory.
|
||||
|
||||
**NOTE**: The path used to determine the base for the subdirectory is the pathname of the `clusterNetwork` value, which must be set to a file in order to use this functionality.
|
||||
|
||||
When this string is set, Multus will execute an additional CNI chain, outside of the default network, on its own independent CNI chain (as to not interfere with default network functionality that might be hampered by CNI chaining and to otherwise isolate this execution) and will load CNI configurations from a subdirectory of the same name in the CNI configuration directory.
|
||||
|
||||
This feature is based on [improvements made to libcni for "safe subdirectory-based plugin conf loading"](https://github.com/containernetworking/cni/pull/1052).
|
||||
|
||||
`auxiliaryCNIChainName` is meant to be set as a CNI configuration name, this name is arbitrary but must match the subdirectory name.
|
||||
|
||||
Consider this [daemon configuration](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/deployments/multus-daemonset-thick.yml#L113):
|
||||
|
||||
```
|
||||
{
|
||||
"cniConfigDir": "/host/etc/cni/net.d",
|
||||
"multusAutoconfigDir": "/host/etc/cni/net.d",
|
||||
"multusConfigFile": "auto",
|
||||
"socketDir": "/host/run/multus/",
|
||||
"auxiliaryCNIChainName": "cni-chain-config"
|
||||
}
|
||||
```
|
||||
|
||||
Here we have set `"auxiliaryCNIChainName": "cni-chain-config"`, and we have expressed that our CNI configurations are on `/etc/cni/net.d/` on the host.
|
||||
|
||||
In this case, we would also have a directory named in `/etc/cni/net.d/cni-chain-config`
|
||||
|
||||
One could add any number of CNI configurations to be used as part of this chain, consider this example if we added a tuning CNI configuration called `/etc/cni/net.d/cni-chain-config/mytuning.conf` with these contents:
|
||||
|
||||
```
|
||||
{
|
||||
"name": "mytuning",
|
||||
"type": "tuning",
|
||||
"sysctl": {
|
||||
"net.ipv4.conf.IFNAME.arp_filter": "1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
With the given configuration, plus this configuration, this would be executed for every pod launched by Multus CNI.
|
||||
|
||||
If this is unset, no auxiliary chain will be executed. However, if the default network CNI configuration is loaded from disk and is a conflist format, the libcni functionality for loading from a subdirectory will still apply.
|
||||
|
||||
@@ -39,7 +39,7 @@ cd multus-cni
|
||||
./hack/build-go.sh
|
||||
```
|
||||
|
||||
## How do I run CI tests?
|
||||
## How do I run the unit tests?
|
||||
|
||||
Multus has go unit tests (based on ginkgo framework).The following commands drive CI tests manually in your environment:
|
||||
|
||||
@@ -47,6 +47,10 @@ Multus has go unit tests (based on ginkgo framework).The following commands driv
|
||||
sudo ./hack/test-go.sh
|
||||
```
|
||||
|
||||
## How do I run the e2e tests?
|
||||
|
||||
Check the `README.md` in the `./e2e/` folder.
|
||||
|
||||
## What are the best practices for logging?
|
||||
|
||||
The following are the best practices for multus logging:
|
||||
|
||||
@@ -19,13 +19,13 @@ You may acquire the Multus binary via compilation (see the [developer guide](dev
|
||||
|
||||
*Via Daemonset method*
|
||||
|
||||
As a [quickstart](quickstart.md), you may apply these YAML files (included in the clone of this repository). Run this command (typically you would run this on the master, or wherever you have access to the `kubectl` command to manage your cluster).
|
||||
As a [quickstart](quickstart.md), you may apply these YAML files. Run this command (typically you would run this on the master, or wherever you have access to the `kubectl` command to manage your cluster).
|
||||
|
||||
cat ./deployments/multus-daemonset.yml | kubectl apply -f - # thin deployment
|
||||
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml # thin deployment
|
||||
|
||||
or
|
||||
|
||||
cat ./deployments/multus-daemonset-thick.yml | kubectl apply -f - # thick (client/server) deployment
|
||||
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml # thick (client/server) deployment
|
||||
|
||||
If you need more comprehensive detail, continue along with this guide, otherwise, you may wish to either [follow the quickstart guide]() or skip to the ['Create network attachment definition'](#create-network-attachment-definition) section.
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ is provided.
|
||||
- `"logLevel"`: the logging level for the multus daemon logs.
|
||||
- `"logToStderr"`: enable this to have the daemon multus logs echoed to stderr
|
||||
as well. By default, it is disabled.
|
||||
- `"auxiliaryCNIChainName"`: set a value to execute chained cni configurations from disk in an auxiliary CNI chain (see details in [configuration.md](configuration.md))
|
||||
|
||||
In addition, you can add any configuration which is in [configuration reference](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/configuration.md#multus-cni-configuration-reference). Server configuration override multus CNI configuration (e.g. `/etc/cni/net.d/00-multus.conf`)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
To run the e2e test, you need the following components:
|
||||
|
||||
- curl
|
||||
- jinjanator
|
||||
- jinjanator (optional)
|
||||
- docker
|
||||
|
||||
### How to test e2e
|
||||
@@ -14,7 +14,23 @@ To run the e2e test, you need the following components:
|
||||
$ git clone https://github.com/k8snetworkplumbingwg/multus-cni.git
|
||||
$ cd multus-cni/e2e
|
||||
$ ./get_tools.sh
|
||||
```
|
||||
|
||||
If you have `jinjanator` you can generate the YAML with:
|
||||
|
||||
```
|
||||
$ ./generate_yamls.sh
|
||||
```
|
||||
|
||||
Alternatively, if you have trouble with it, use the `sed` script.
|
||||
|
||||
```
|
||||
$ ./e2e/sed_generate_yaml.sh
|
||||
```
|
||||
|
||||
Then, setup the cluster
|
||||
|
||||
```
|
||||
$ ./setup_cluster.sh
|
||||
$ ./test-simple-macvlan1.sh
|
||||
```
|
||||
|
||||
@@ -5,7 +5,7 @@ if [ ! -d bin ]; then
|
||||
mkdir bin
|
||||
fi
|
||||
|
||||
curl -Lo ./bin/kind "https://github.com/kubernetes-sigs/kind/releases/download/v0.22.0/kind-$(uname)-amd64"
|
||||
curl -Lo ./bin/kind "https://github.com/kubernetes-sigs/kind/releases/download/v0.27.0/kind-$(uname)-amd64"
|
||||
chmod +x ./bin/kind
|
||||
curl -Lo ./bin/kubectl https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./bin/kubectl
|
||||
|
||||
17
e2e/sed_generate_yaml.sh
Executable file
17
e2e/sed_generate_yaml.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ ! -d yamls ]; then
|
||||
mkdir yamls
|
||||
fi
|
||||
|
||||
# specify CNI version (default: 0.4.0)
|
||||
CNI_VERSION=${CNI_VERSION:-0.4.0}
|
||||
|
||||
templates_dir="$(dirname $(readlink -f $0))/templates"
|
||||
|
||||
# generate yaml files based on templates/*.j2 to yamls directory
|
||||
for i in `ls ${templates_dir}/*.j2`; do
|
||||
echo "Processing $i..."
|
||||
# Use sed to replace the placeholder with the CNI_VERSION variable
|
||||
sed "s/{{ CNI_VERSION }}/$CNI_VERSION/g" $i > yamls/$(basename ${i%.j2})
|
||||
done
|
||||
@@ -27,6 +27,23 @@ kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
extraArgs:
|
||||
runtime-config: "resource.k8s.io/v1beta1=true"
|
||||
scheduler:
|
||||
extraArgs:
|
||||
v: "1"
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
v: "1"
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
v: "1"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
@@ -34,14 +51,33 @@ nodes:
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
pod-manifest-path: "/etc/kubernetes/manifests/"
|
||||
feature-gates: "DynamicResourceAllocation=true,KubeletPodResourcesDynamicResources=true"
|
||||
feature-gates: "DynamicResourceAllocation=true,DRAResourceClaimDeviceStatus=true,KubeletPodResourcesDynamicResources=true"
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
v: "1"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
pod-manifest-path: "/etc/kubernetes/manifests/"
|
||||
feature-gates: "DynamicResourceAllocation=true,DRAResourceClaimDeviceStatus=true,KubeletPodResourcesDynamicResources=true"
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
v: "1"
|
||||
# Required by DRA Integration
|
||||
##
|
||||
featureGates:
|
||||
DynamicResourceAllocation: true
|
||||
DRAResourceClaimDeviceStatus: true
|
||||
KubeletPodResourcesDynamicResources: true
|
||||
runtimeConfig:
|
||||
"api/alpha": "true"
|
||||
"api/beta": "true"
|
||||
containerdConfigPatches:
|
||||
# Enable CDI as described in
|
||||
# https://github.com/container-orchestrated-devices/container-device-interface#containerd-configuration
|
||||
@@ -66,4 +102,4 @@ sleep 1
|
||||
kubectl -n kube-system wait --for=condition=ready -l name=multus pod --timeout=300s
|
||||
kubectl create -f yamls/cni-install.yml
|
||||
sleep 1
|
||||
kubectl -n kube-system wait --for=condition=ready -l name=cni-plugins pod --timeout=300s
|
||||
kubectl -n kube-system wait --for=condition=ready -l name=cni-plugins pod --timeout=400s
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
---
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1beta1
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: gpu.example.com
|
||||
name: single-gpu
|
||||
spec:
|
||||
spec:
|
||||
resourceClassName: gpu.example.com
|
||||
devices:
|
||||
requests:
|
||||
- name: gpu
|
||||
deviceClassName: gpu.example.com
|
||||
---
|
||||
apiVersion: "k8s.cni.cncf.io/v1"
|
||||
kind: NetworkAttachmentDefinition
|
||||
metadata:
|
||||
name: dra-net
|
||||
name: dra-net
|
||||
annotations:
|
||||
k8s.v1.cni.cncf.io/resourceName: gpu.example.com
|
||||
k8s.v1.cni.cncf.io/resourceName: single-gpu
|
||||
spec:
|
||||
config: '{
|
||||
"cniVersion": "{{ CNI_VERSION }}",
|
||||
@@ -45,5 +48,4 @@ spec:
|
||||
- name: gpu
|
||||
resourceClaims:
|
||||
- name: gpu
|
||||
source:
|
||||
resourceClaimTemplateName: gpu.example.com
|
||||
resourceClaimTemplateName: single-gpu
|
||||
|
||||
@@ -170,9 +170,11 @@ spec:
|
||||
- name: install-multus-shim
|
||||
image: localhost:5000/multus:e2e
|
||||
command:
|
||||
- "cp"
|
||||
- "/usr/src/multus-cni/bin/multus-shim"
|
||||
- "/host/opt/cni/bin/multus-shim"
|
||||
- "/usr/src/multus-cni/bin/install_multus"
|
||||
- "-d"
|
||||
- "/host/opt/cni/bin"
|
||||
- "-t"
|
||||
- "thick"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "10m"
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: multus-daemon-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: multus
|
||||
data:
|
||||
daemon-config.json: |
|
||||
{
|
||||
"confDir": "/host/etc/cni/net.d",
|
||||
"logToStderr": true,
|
||||
"logLevel": "debug",
|
||||
"logFile": "/tmp/multus.log",
|
||||
"binDir": "/host/opt/cni/bin",
|
||||
"cniDir": "/var/lib/cni/multus",
|
||||
"socketDir": "/host/run/multus",
|
||||
"cniVersion": "{{ CNI_VERSION }}",
|
||||
"cniConfigDir": "/host/etc/cni/net.d",
|
||||
"multusConfigFile": "auto",
|
||||
"forceCNIVersion": true,
|
||||
"multusAutoconfigDir": "/host/etc/cni/net.d",
|
||||
"auxiliaryCNIChainName": "vendor-cni-chain"
|
||||
}
|
||||
94
e2e/templates/subdirectory-chaining-passthru.yml.j2
Normal file
94
e2e/templates/subdirectory-chaining-passthru.yml.j2
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cni-setup-script
|
||||
namespace: default
|
||||
data:
|
||||
setup.sh: |
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
DEFAULT_NETWORK_CNI_NAME="vendor-cni-chain"
|
||||
|
||||
cleanup() {
|
||||
echo "Cleaning up..."
|
||||
rm -f /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to remove sysctltwiddle.conf" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "Cleanup completed successfully"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create the chained CNI directory if it doesn't exist
|
||||
mkdir -p /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create directory /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Write the chained tuning CNI config
|
||||
cat <<EOF > /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
|
||||
{
|
||||
"cniVersion": "{{ CNI_VERSION }}",
|
||||
"name": "sysctltwiddle",
|
||||
"type": "tuning",
|
||||
"sysctl": {
|
||||
"net.ipv4.conf.eth0.arp_filter": "1"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create chained CNI config" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "CNI chained setup completed successfully."
|
||||
sleep infinity
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cni-setup-daemonset
|
||||
namespace: default
|
||||
labels:
|
||||
app: cni-setup
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cni-setup
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cni-setup
|
||||
spec:
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
containers:
|
||||
- name: setup
|
||||
image: quay.io/fedora/fedora:40
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni-config
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: script-volume
|
||||
mountPath: /scripts
|
||||
command: ["/bin/bash", "/scripts/setup.sh"]
|
||||
volumes:
|
||||
- name: cni-config
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: Directory
|
||||
- name: script-volume
|
||||
configMap:
|
||||
name: cni-setup-script
|
||||
items:
|
||||
- key: setup.sh
|
||||
path: setup.sh
|
||||
11
e2e/templates/subdirectory-chaining-pod.yml.j2
Normal file
11
e2e/templates/subdirectory-chaining-pod.yml.j2
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sysctl-modified
|
||||
spec:
|
||||
containers:
|
||||
- name: sysctl
|
||||
image: quay.io/dosmith/fedora-procps
|
||||
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
95
e2e/templates/subdirectory-chaining.yml.j2
Normal file
95
e2e/templates/subdirectory-chaining.yml.j2
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cni-setup-script
|
||||
namespace: default
|
||||
data:
|
||||
setup.sh: |
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
DEFAULT_NETWORK_CNI_NAME="kindnet"
|
||||
|
||||
cleanup() {
|
||||
echo "Cleaning up..."
|
||||
rm -f /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to remove sysctltwiddle.conf" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "Cleanup completed successfully"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create the chained CNI directory if it doesn't exist
|
||||
mkdir -p /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create directory /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Write the chained tuning CNI config
|
||||
cat <<EOF > /host/etc/cni/net.d/${DEFAULT_NETWORK_CNI_NAME}/sysctltwiddle.conf
|
||||
{
|
||||
"cniVersion": "{{ CNI_VERSION }}",
|
||||
"name": "sysctltwiddle",
|
||||
"type": "tuning",
|
||||
"sysctl": {
|
||||
"net.ipv4.conf.IFNAME.arp_filter": "1"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create chained CNI config" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "CNI chained setup completed successfully."
|
||||
sleep infinity
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cni-setup-daemonset
|
||||
namespace: default
|
||||
labels:
|
||||
app: cni-setup
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cni-setup
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cni-setup
|
||||
spec:
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
containers:
|
||||
- name: setup
|
||||
image: quay.io/fedora/fedora:40
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni-config
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: script-volume
|
||||
mountPath: /scripts
|
||||
command: ["/bin/bash", "/scripts/setup.sh"]
|
||||
volumes:
|
||||
- name: cni-config
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: Directory
|
||||
- name: script-volume
|
||||
configMap:
|
||||
name: cni-setup-script
|
||||
items:
|
||||
- key: setup.sh
|
||||
path: setup.sh
|
||||
@@ -15,9 +15,12 @@ echo "installing dra-example-driver"
|
||||
repo_path="repos/dra-example-driver"
|
||||
|
||||
rm -rf $repo_path || true
|
||||
git clone --branch classic-dra https://github.com/kubernetes-sigs/dra-example-driver.git ${repo_path}
|
||||
${repo_path}/demo/build-driver.sh
|
||||
KIND_CLUSTER_NAME=kind ${repo_path}/demo/scripts/load-driver-image-into-kind.sh
|
||||
git clone --branch main https://github.com/kubernetes-sigs/dra-example-driver.git ${repo_path}
|
||||
MULTUS_DIR=$(pwd)
|
||||
cd ${repo_path}
|
||||
./demo/build-driver.sh
|
||||
KIND_CLUSTER_NAME=kind ./demo/scripts/load-driver-image-into-kind.sh
|
||||
cd "$MULTUS_DIR"
|
||||
chart_path=${repo_path}/deployments/helm/dra-example-driver/
|
||||
overriden_values_path=${chart_path}/overriden_values.yaml
|
||||
|
||||
@@ -47,7 +50,7 @@ echo "check dra-integration pod for DRA injected environment variable"
|
||||
# We can validate that the resource is correctly injected by checking an environment variable this dra driver is injecting
|
||||
# in the Pod.
|
||||
# https://github.com/kubernetes-sigs/dra-example-driver/blob/be2b8b1db47b8c757440e955ce5ced88c23bfe86/cmd/dra-example-kubeletplugin/cdi.go#L71C20-L71C44
|
||||
env_variable=$(kubectl exec dra-integration -- bash -c "echo \$DRA_RESOURCE_DRIVER_NAME | grep gpu.resource.example.com")
|
||||
env_variable=$(kubectl exec dra-integration -- bash -c "echo \$DRA_RESOURCE_DRIVER_NAME | grep gpu.example.com")
|
||||
if [ $? -eq 0 ];then
|
||||
echo "dra-integration pod has DRA injected environment variable"
|
||||
else
|
||||
|
||||
81
e2e/test-subdirectory-chaining-passthru.sh
Executable file
81
e2e/test-subdirectory-chaining-passthru.sh
Executable file
@@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
export PATH=${PATH}:./bin
|
||||
|
||||
TEST_POD_NAME="sysctl-modified"
|
||||
EXPECTED_BINARIES="${EXPECTED_BINARIES:-/opt/cni/bin/ptp /opt/cni/bin/portmap /opt/cni/bin/tuning}"
|
||||
EXPECTED_CNI_DIR="/etc/cni/net.d"
|
||||
|
||||
# Reconfigure multus
|
||||
echo "Applying subdirectory chain passthru config..."
|
||||
kubectl apply -f yamls/subdirectory-chain-passthru-configupdate.yml
|
||||
|
||||
# Restart the multus daemonset to pick up the new config
|
||||
echo "Restarting Multus DaemonSet..."
|
||||
kubectl rollout restart daemonset kube-multus-ds-amd64 -n kube-system
|
||||
kubectl rollout status daemonset/kube-multus-ds-amd64 -n kube-system
|
||||
|
||||
# Debug: show CNI configs and binaries inside each Kind node
|
||||
echo "Checking CNI configs and binaries on nodes..."
|
||||
|
||||
for node in $(kubectl get nodes --no-headers | awk '{print $1}'); do
|
||||
container_name=$(docker ps --format '{{.Names}}' | grep "^${node}$")
|
||||
|
||||
echo "------"
|
||||
echo "Node: ${node} (container: ${container_name})"
|
||||
echo "Listing /opt/cni/bin contents..."
|
||||
docker exec "${container_name}" ls -l /opt/cni/bin || echo "WARNING: /opt/cni/bin missing!"
|
||||
|
||||
echo "Checking expected binaries..."
|
||||
for bin in $EXPECTED_BINARIES; do
|
||||
echo "Checking for ${bin}..."
|
||||
if docker exec "${container_name}" test -f "${bin}"; then
|
||||
echo "SUCCESS: ${bin} found."
|
||||
else
|
||||
echo "FAIL: ${bin} NOT found!"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Listing /etc/cni/net.d configs..."
|
||||
docker exec "${container_name}" ls -l ${EXPECTED_CNI_DIR} || echo "WARNING: ${EXPECTED_CNI_DIR} missing!"
|
||||
done
|
||||
echo "------"
|
||||
|
||||
# Deploy the daemonset that will lay down the chained CNI config
|
||||
echo "Applying CNI setup DaemonSet..."
|
||||
kubectl apply -f yamls/subdirectory-chaining-passthru.yml
|
||||
|
||||
# Wait for the daemonset pods to be ready (make sure they set up CNI config)
|
||||
echo "Waiting for CNI setup DaemonSet to be Ready..."
|
||||
kubectl rollout status daemonset/cni-setup-daemonset --timeout=300s
|
||||
|
||||
# Deploy a test pod that will get chained CNI applied
|
||||
echo "Applying test pod..."
|
||||
kubectl apply -f yamls/subdirectory-chaining-pod.yml
|
||||
|
||||
# Wait for the pod to be Ready
|
||||
echo "Waiting for test pod to be Ready..."
|
||||
kubectl wait --for=condition=ready pod/${TEST_POD_NAME} --timeout=300s
|
||||
|
||||
# Check that the sysctl got set
|
||||
echo "Verifying sysctl arp_filter is set to 1 on eth0..."
|
||||
|
||||
SYSCTL_VALUE=$(kubectl exec ${TEST_POD_NAME} -- sysctl -n net.ipv4.conf.eth0.arp_filter)
|
||||
|
||||
if [ "$SYSCTL_VALUE" != "1" ]; then
|
||||
echo "FAIL: net.ipv4.conf.eth0.arp_filter is not set to 1, got ${SYSCTL_VALUE}" >&2
|
||||
exit 1
|
||||
else
|
||||
echo "SUCCESS: net.ipv4.conf.eth0.arp_filter is set correctly."
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up test resources..."
|
||||
kubectl delete -f yamls/subdirectory-chaining-pod.yml
|
||||
kubectl delete -f yamls/subdirectory-chaining-passthru.yml
|
||||
|
||||
echo "Test completed successfully."
|
||||
exit 0
|
||||
37
e2e/test-subdirectory-chaining.sh
Executable file
37
e2e/test-subdirectory-chaining.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/sh
|
||||
set -o errexit
|
||||
|
||||
export PATH=${PATH}:./bin
|
||||
|
||||
TEST_POD_NAME="sysctl-modified"
|
||||
|
||||
# Deploy the daemonset that will lay down the chained CNI config
|
||||
kubectl apply -f yamls/subdirectory-chaining.yml
|
||||
|
||||
# Wait for the daemonset pods to be ready (we need the config to be laid down)
|
||||
kubectl rollout status daemonset/cni-setup-daemonset
|
||||
|
||||
# Deploy a test pod that will get chained CNI applied
|
||||
kubectl apply -f yamls/subdirectory-chaining-pod.yml
|
||||
|
||||
# Wait for the pod to be Ready
|
||||
kubectl wait --for=condition=ready pod/sysctl-modified --timeout=300s
|
||||
|
||||
# Check that the sysctl got set properly inside the pod's eth0 interface
|
||||
echo "Verifying sysctl arp_filter is set to 1 on eth0"
|
||||
|
||||
SYSCTL_VALUE=$(kubectl exec sysctl-modified -- sysctl -n net.ipv4.conf.eth0.arp_filter)
|
||||
|
||||
if [ "$SYSCTL_VALUE" != "1" ]; then
|
||||
echo "FAIL: net.ipv4.conf.eth0.arp_filter is not set to 1, got ${SYSCTL_VALUE}" >&2
|
||||
exit 1
|
||||
else
|
||||
echo "SUCCESS: net.ipv4.conf.eth0.arp_filter is set correctly."
|
||||
fi
|
||||
|
||||
# 6. Clean up
|
||||
echo "Cleaning up test resources"
|
||||
kubectl delete -f yamls/subdirectory-chaining-pod.yml
|
||||
kubectl delete -f yamls/subdirectory-chaining.yml
|
||||
|
||||
exit 0
|
||||
149
go.mod
149
go.mod
@@ -1,112 +1,77 @@
|
||||
module gopkg.in/k8snetworkplumbingwg/multus-cni.v4
|
||||
|
||||
go 1.21
|
||||
go 1.24.11
|
||||
|
||||
require (
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/containernetworking/cni v1.2.0-rc1
|
||||
github.com/containernetworking/plugins v1.1.0
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1
|
||||
github.com/onsi/ginkgo/v2 v2.13.2
|
||||
github.com/onsi/gomega v1.30.0
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/sys v0.18.0
|
||||
google.golang.org/grpc v1.58.3
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/client-go v1.5.2
|
||||
github.com/containernetworking/cni v1.3.0
|
||||
github.com/containernetworking/plugins v1.9.0
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6
|
||||
github.com/onsi/ginkgo/v2 v2.25.1
|
||||
github.com/onsi/gomega v1.38.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/sys v0.35.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
k8s.io/api v0.34.1
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/client-go v0.34.1
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/kubelet v0.27.5
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kubelet v0.34.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic v0.7.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/oauth2 v0.28.0 // indirect
|
||||
golang.org/x/term v0.34.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
google.golang.org/protobuf v1.36.7 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2
|
||||
k8s.io/api => k8s.io/api v0.29.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.29.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.29.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.29.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.29.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.29.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.29.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.29.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.29.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0
|
||||
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.29.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.29.0
|
||||
k8s.io/kubernetes => k8s.io/kubernetes v1.29.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0
|
||||
k8s.io/metrics => k8s.io/metrics v0.29.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.29.0
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
@@ -80,3 +80,5 @@ echo "Building kubeconfig_generator"
|
||||
go build -o "${DEST_DIR}"/kubeconfig_generator ${BUILD_ARGS} -ldflags "${LDFLAGS}" ./cmd/kubeconfig_generator
|
||||
echo "Building cert-approver"
|
||||
go build -o "${DEST_DIR}"/cert-approver ${BUILD_ARGS} -ldflags "${LDFLAGS}" ./cmd/cert-approver
|
||||
echo "Building passthru CNI"
|
||||
go build -o "${DEST_DIR}"/passthru ${BUILD_ARGS} -ldflags "${LDFLAGS}" ./cmd/passthru-cni
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This Dockerfile is used to build the image available on DockerHub
|
||||
FROM --platform=$BUILDPLATFORM golang:1.21 as build
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24 as build
|
||||
|
||||
# Add everything
|
||||
ADD . /usr/src/multus-cni
|
||||
@@ -8,7 +8,7 @@ ARG TARGETPLATFORM
|
||||
RUN cd /usr/src/multus-cni && \
|
||||
./hack/build-go.sh
|
||||
|
||||
FROM gcr.io/distroless/base-debian11:latest
|
||||
FROM gcr.io/distroless/base-debian12:latest
|
||||
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
COPY --from=build /usr/src/multus-cni/bin /usr/src/multus-cni/bin
|
||||
COPY --from=build /usr/src/multus-cni/LICENSE /usr/src/multus-cni/LICENSE
|
||||
@@ -18,4 +18,5 @@ COPY --from=build /usr/src/multus-cni/bin/install_multus /
|
||||
COPY --from=build /usr/src/multus-cni/bin/thin_entrypoint /
|
||||
COPY --from=build /usr/src/multus-cni/bin/kubeconfig_generator /
|
||||
COPY --from=build /usr/src/multus-cni/bin/cert-approver /
|
||||
CMD ["/thin_entrypoint"]
|
||||
|
||||
ENTRYPOINT ["/thin_entrypoint"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This Dockerfile is used to build the image available on DockerHub
|
||||
FROM --platform=$BUILDPLATFORM golang:1.21 as build
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24 as build
|
||||
|
||||
# Add everything
|
||||
ADD . /usr/src/multus-cni
|
||||
@@ -8,7 +8,7 @@ ARG TARGETPLATFORM
|
||||
RUN cd /usr/src/multus-cni && \
|
||||
./hack/build-go.sh
|
||||
|
||||
FROM gcr.io/distroless/base-debian11:debug
|
||||
FROM gcr.io/distroless/base-debian12:debug
|
||||
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
COPY --from=build /usr/src/multus-cni/bin /usr/src/multus-cni/bin
|
||||
COPY --from=build /usr/src/multus-cni/LICENSE /usr/src/multus-cni/LICENSE
|
||||
@@ -18,4 +18,5 @@ COPY --from=build /usr/src/multus-cni/bin/install_multus /
|
||||
COPY --from=build /usr/src/multus-cni/bin/thin_entrypoint /
|
||||
COPY --from=build /usr/src/multus-cni/bin/kubeconfig_generator /
|
||||
COPY --from=build /usr/src/multus-cni/bin/cert-approver /
|
||||
CMD ["/thin_entrypoint"]
|
||||
|
||||
ENTRYPOINT ["/thin_entrypoint"]
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
# This Dockerfile is used to build the image available on DockerHub
|
||||
FROM golang:1.21 as build
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24 as build
|
||||
|
||||
# Add everything
|
||||
ADD . /usr/src/multus-cni
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN cd /usr/src/multus-cni && \
|
||||
./hack/build-go.sh
|
||||
|
||||
FROM debian:stable-slim
|
||||
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
LABEL org.opencontainers.image.source=https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
COPY --from=build /usr/src/multus-cni/bin /usr/src/multus-cni/bin
|
||||
COPY --from=build /usr/src/multus-cni/LICENSE /usr/src/multus-cni/LICENSE
|
||||
COPY --from=build /usr/src/multus-cni/bin/cert-approver /
|
||||
|
||||
@@ -18,9 +18,11 @@ package k8sclient
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -82,6 +84,20 @@ func (c *ClientInfo) GetPod(namespace, name string) (*v1.Pod, error) {
|
||||
return c.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// GetPodContext gets pod from kubernetes with context
|
||||
func (c *ClientInfo) GetPodContext(ctx context.Context, namespace, name string) (*v1.Pod, error) {
|
||||
if c.PodInformer != nil {
|
||||
logging.Debugf("GetPod for [%s/%s] will use informer cache", namespace, name)
|
||||
return listers.NewPodLister(c.PodInformer.GetIndexer()).Pods(namespace).Get(name)
|
||||
}
|
||||
return c.Client.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// GetPodAPILiveQuery does a live API query for the pod, instead of using informers, for cases when a failure occurred, as to prevent a cache miss.
|
||||
func (c *ClientInfo) GetPodAPILiveQuery(ctx context.Context, namespace, name string) (*v1.Pod, error) {
|
||||
return c.Client.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// DeletePod deletes a pod from kubernetes
|
||||
func (c *ClientInfo) DeletePod(namespace, name string) error {
|
||||
return c.Client.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
@@ -189,13 +205,13 @@ func parsePodNetworkObjectName(podnetwork string) (string, string, string, error
|
||||
for i := range allItems {
|
||||
matched := expr.MatchString(allItems[i])
|
||||
if !matched && len([]rune(allItems[i])) > 0 {
|
||||
return "", "", "", logging.Errorf(fmt.Sprintf("parsePodNetworkObjectName: Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i]))
|
||||
return "", "", "", logging.Errorf("parsePodNetworkObjectName: Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(netIfName) > 0 {
|
||||
if len(netIfName) > (syscall.IFNAMSIZ-1) || strings.ContainsAny(netIfName, " \t\n\v\f\r/") {
|
||||
return "", "", "", logging.Errorf(fmt.Sprintf("parsePodNetworkObjectName: Failed to parse interface name: must be less than 15 chars and not contain '/' or spaces. interface name '%s'", netIfName))
|
||||
return "", "", "", logging.Errorf("parsePodNetworkObjectName: Failed to parse interface name: must be less than 15 chars and not contain '/' or spaces. interface name '%s'", netIfName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,13 +298,13 @@ func getKubernetesDelegate(client *ClientInfo, net *types.NetworkSelectionElemen
|
||||
if client != nil {
|
||||
client.Eventf(pod, v1.EventTypeWarning, "NoNetworkFound", errMsg)
|
||||
}
|
||||
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: " + errMsg)
|
||||
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: %s", errMsg)
|
||||
}
|
||||
|
||||
// Get resourceName annotation from NetworkAttachmentDefinition
|
||||
deviceID := ""
|
||||
resourceName, ok := customResource.GetAnnotations()[resourceNameAnnot]
|
||||
if ok && pod.Name != "" && pod.Namespace != "" {
|
||||
if ok && pod != nil && pod.Name != "" && pod.Namespace != "" {
|
||||
// ResourceName annotation is found; try to get device info from resourceMap
|
||||
logging.Debugf("getKubernetesDelegate: found resourceName annotation : %s", resourceName)
|
||||
|
||||
@@ -524,31 +540,119 @@ func getNetDelegate(client *ClientInfo, pod *v1.Pod, netname, confdir, namespace
|
||||
} else {
|
||||
// option4) if file path (absolute), then load it directly
|
||||
if strings.HasSuffix(netname, ".conflist") {
|
||||
confList, err := libcni.ConfListFromFile(netname)
|
||||
confList, err := LoadChainedPluginsFromFile(netname)
|
||||
if err != nil {
|
||||
return nil, resourceMap, logging.Errorf("error loading CNI conflist file %s: %v", netname, err)
|
||||
}
|
||||
configBytes = confList.Bytes
|
||||
} else {
|
||||
conf, err := libcni.ConfFromFile(netname)
|
||||
|
||||
delegate, err := types.LoadDelegateNetConfFromConfList(confList, nil, "", "")
|
||||
if err != nil {
|
||||
return nil, resourceMap, logging.Errorf("error loading CNI config file %s: %v", netname, err)
|
||||
return nil, resourceMap, err
|
||||
}
|
||||
if conf.Network.Type == "" {
|
||||
return nil, resourceMap, logging.Errorf("error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", netname)
|
||||
}
|
||||
configBytes = conf.Bytes
|
||||
return delegate, resourceMap, nil
|
||||
|
||||
}
|
||||
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "", "")
|
||||
|
||||
// Or it's not a conflist...
|
||||
// after libcni v1.2.3 there's no support support this old-school method with non-conflists.
|
||||
// this method doesn't check if there's a 0 length plugins field, that is.
|
||||
conf, err := libcni.ConfFromFile(netname)
|
||||
if err != nil {
|
||||
return nil, resourceMap, logging.Errorf("error loading CNI config file %s: %v", netname, err)
|
||||
}
|
||||
if conf.Network.Type == "" {
|
||||
return nil, resourceMap, logging.Errorf("error loading CNI config file %s: no 'type'; perhaps this is supposed to be a .conflist?", netname)
|
||||
}
|
||||
|
||||
delegate, err := types.LoadDelegateNetConf(conf.Bytes, nil, "", "")
|
||||
if err != nil {
|
||||
return nil, resourceMap, err
|
||||
}
|
||||
return delegate, resourceMap, nil
|
||||
}
|
||||
|
||||
}
|
||||
return nil, resourceMap, logging.Errorf("getNetDelegate: cannot find network: %v", netname)
|
||||
}
|
||||
|
||||
func loadSubdirectoryChain(bytes []byte, cniconfdir string) (*libcni.NetworkConfigList, error) {
|
||||
// Load the network configuration from the byte array
|
||||
conf, err := libcni.NetworkConfFromBytes(bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading network config from bytes: %v", err)
|
||||
}
|
||||
|
||||
// Check if plugins need to be loaded from files
|
||||
if !conf.LoadOnlyInlinedPlugins && cniconfdir != "" {
|
||||
// Let's validate that conf.Name
|
||||
// From the CNI spec:
|
||||
// > Must start with an alphanumeric character, optionally followed by any combination of one or more alphanumeric characters,
|
||||
// > underscore, dot (.) or hyphen (-). Must not contain characters disallowed in file paths.
|
||||
if !regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`).MatchString(conf.Name) {
|
||||
return nil, fmt.Errorf("invalid network config name: %s", conf.Name)
|
||||
}
|
||||
|
||||
plugins, err := libcni.NetworkPluginConfsFromFiles(cniconfdir, conf.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading plugin configs: %v", err)
|
||||
}
|
||||
conf.Plugins = append(conf.Plugins, plugins...)
|
||||
}
|
||||
|
||||
if len(conf.Plugins) == 0 {
|
||||
return nil, fmt.Errorf("no plugin configs found")
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// LoadChainedDelegatesFromBytes loads a CNI configuration byte array and returns a DelegateNetConf with the chain added.
|
||||
func LoadChainedDelegatesFromBytes(bytes []byte, cniconfdir string) *types.DelegateNetConf {
|
||||
conf, err := loadSubdirectoryChain(bytes, cniconfdir)
|
||||
if err != nil {
|
||||
logging.Errorf("LoadChainedDelegatesFromBytes: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create and return a DelegateNetConf from the configuration list
|
||||
delegate, err := types.LoadDelegateNetConfFromConfList(conf, nil, "", "")
|
||||
if err != nil {
|
||||
logging.Errorf("LoadChainedDelegatesFromBytes: error loading delegate network config: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return delegate
|
||||
}
|
||||
|
||||
// LoadChainedPluginsFromFile loads a CNI configuration file and returns the NetworkConfigList
|
||||
func LoadChainedPluginsFromFile(filename string) (*libcni.NetworkConfigList, error) {
|
||||
cleanPath := filepath.Clean(filename)
|
||||
|
||||
// stat the file to make sure it's a normal file.
|
||||
info, err := os.Stat(cleanPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !info.Mode().IsRegular() {
|
||||
return nil, errors.New("CNI configuration path is not a regular file")
|
||||
}
|
||||
|
||||
bytes, err := os.ReadFile(cleanPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %w", filename, err)
|
||||
}
|
||||
logging.Debugf("LoadChainedPluginsFromFile: %s", filename)
|
||||
|
||||
conf, err := loadSubdirectoryChain(bytes, filepath.Dir(filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logging.Debugf("Loaded SubdirectoryChain: %+v", conf)
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// GetDefaultNetworks parses 'defaultNetwork' config, gets network json and put it into netconf.Delegates.
|
||||
func GetDefaultNetworks(pod *v1.Pod, conf *types.NetConf, kubeClient *ClientInfo, resourceMap map[string]*types.ResourceInfo) (map[string]*types.ResourceInfo, error) {
|
||||
logging.Debugf("GetDefaultNetworks: %v, %v, %v, %v", pod, conf, kubeClient, resourceMap)
|
||||
@@ -575,7 +679,7 @@ func GetDefaultNetworks(pod *v1.Pod, conf *types.NetConf, kubeClient *ClientInfo
|
||||
delegates = append(delegates, delegate)
|
||||
|
||||
// Pod in kube-system namespace does not have default network for now.
|
||||
if !types.CheckSystemNamespaces(pod.ObjectMeta.Namespace, conf.SystemNamespaces) {
|
||||
if pod != nil && !types.CheckSystemNamespaces(pod.ObjectMeta.Namespace, conf.SystemNamespaces) {
|
||||
for _, netname := range conf.DefaultNetworks {
|
||||
delegate, resourceMap, err := getNetDelegate(kubeClient, pod, netname, conf.ConfDir, conf.MultusNamespace, resourceMap)
|
||||
if err != nil {
|
||||
|
||||
@@ -160,7 +160,7 @@ func (rc *kubeletClient) getDRAResources(dynamicResources []*podresourcesapi.Dyn
|
||||
for _, dynamicResource := range dynamicResources {
|
||||
var deviceIDs []string
|
||||
for _, claimResource := range dynamicResource.ClaimResources {
|
||||
for _, cdiDevice := range claimResource.CDIDevices {
|
||||
for _, cdiDevice := range claimResource.CdiDevices {
|
||||
res := strings.Split(cdiDevice.Name, "=")
|
||||
if len(res) == 2 {
|
||||
deviceIDs = append(deviceIDs, res[1])
|
||||
@@ -169,10 +169,10 @@ func (rc *kubeletClient) getDRAResources(dynamicResources []*podresourcesapi.Dyn
|
||||
}
|
||||
}
|
||||
}
|
||||
if rInfo, ok := resourceMap[dynamicResource.ClassName]; ok {
|
||||
if rInfo, ok := resourceMap[dynamicResource.ClaimName]; ok {
|
||||
rInfo.DeviceIDs = append(rInfo.DeviceIDs, deviceIDs...)
|
||||
} else {
|
||||
resourceMap[dynamicResource.ClassName] = &types.ResourceInfo{DeviceIDs: deviceIDs}
|
||||
resourceMap[dynamicResource.ClaimName] = &types.ResourceInfo{DeviceIDs: deviceIDs}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ var (
|
||||
|
||||
type fakeResourceServer struct {
|
||||
server *grpc.Server
|
||||
podresourcesapi.UnimplementedPodResourcesListerServer
|
||||
}
|
||||
|
||||
// TODO: This is stub code for test, but we may need to change for the testing we use this API in the future...
|
||||
@@ -72,16 +73,21 @@ func (m *fakeResourceServer) List(_ context.Context, _ *podresourcesapi.ListPodR
|
||||
Name: "cdi-kind=cdi-resource",
|
||||
},
|
||||
}
|
||||
draDriverName := "dra.example.com"
|
||||
poolName := "worker-1-pool"
|
||||
deviceName := "gpu-1"
|
||||
|
||||
claimsResource := []*podresourcesapi.ClaimResource{
|
||||
{
|
||||
CDIDevices: cdiDevices,
|
||||
CdiDevices: cdiDevices,
|
||||
DriverName: draDriverName,
|
||||
PoolName: poolName,
|
||||
DeviceName: deviceName,
|
||||
},
|
||||
}
|
||||
|
||||
dynamicResources := []*podresourcesapi.DynamicResource{
|
||||
{
|
||||
ClassName: "resource-class",
|
||||
ClaimName: "resource-claim",
|
||||
ClaimNamespace: "dynamic-resource-pod-namespace",
|
||||
ClaimResources: claimsResource,
|
||||
@@ -263,7 +269,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
outputRMap := map[string]*mtypes.ResourceInfo{
|
||||
"resource-class": {DeviceIDs: []string{"cdi-resource"}},
|
||||
"resource-claim": {DeviceIDs: []string{"cdi-resource"}},
|
||||
}
|
||||
resourceMap, err := client.GetPodResourceMap(fakePod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -18,6 +18,7 @@ package multus
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
cni100 "github.com/containernetworking/cni/pkg/types/100"
|
||||
cniversion "github.com/containernetworking/cni/pkg/version"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
|
||||
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
|
||||
@@ -52,11 +54,12 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
version = "master@git"
|
||||
commit = "unknown commit"
|
||||
date = "unknown date"
|
||||
gitTreeState = ""
|
||||
releaseStatus = ""
|
||||
version = "master@git"
|
||||
commit = "unknown commit"
|
||||
date = "unknown date"
|
||||
gitTreeState = ""
|
||||
releaseStatus = ""
|
||||
errPodNotFound = fmt.Errorf("pod not found during Multus GetPod")
|
||||
)
|
||||
|
||||
// PrintVersionString ...
|
||||
@@ -131,15 +134,49 @@ func saveDelegates(containerID, dataDir string, delegates []*types.DelegateNetCo
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteDelegates(containerID, dataDir string) error {
|
||||
logging.Debugf("deleteDelegates: %s, %s", containerID, dataDir)
|
||||
|
||||
path := filepath.Join(dataDir, containerID)
|
||||
if err := os.Remove(path); err != nil {
|
||||
return logging.Errorf("deleteDelegates: error in deleting the delegates : %v", err)
|
||||
func getValidAttachmentFromCache(b []byte) (string, string, error) {
|
||||
type simpleCacheV1 struct {
|
||||
Kind string `json:"kind"`
|
||||
ContainerID string `json:"containerId"`
|
||||
IfName string `json:"ifName"`
|
||||
}
|
||||
|
||||
return nil
|
||||
cache := &simpleCacheV1{}
|
||||
if err := json.Unmarshal(b, cache); err != nil {
|
||||
return "", "", fmt.Errorf("getValidAttachmentFromCache: invalid json: %v", err)
|
||||
}
|
||||
|
||||
if cache.ContainerID == "" || cache.IfName == "" {
|
||||
return "", "", fmt.Errorf("invalid cache: containerID:%q, ifName:%q", cache.ContainerID, cache.IfName)
|
||||
}
|
||||
|
||||
return cache.ContainerID, cache.IfName, nil
|
||||
}
|
||||
|
||||
func gatherValidAttachmentsFromCache(cniDir string) ([]cnitypes.GCAttachment, error) {
|
||||
cacheDir := filepath.Join(cniDir, "results")
|
||||
dirEntries, err := os.ReadDir(cacheDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allAttachments := []cnitypes.GCAttachment{}
|
||||
for _, dirEnt := range dirEntries {
|
||||
path := filepath.Join(cacheDir, dirEnt.Name())
|
||||
delegatesBytes, err := os.ReadFile(path)
|
||||
// if delegates cannot read that, skipped for now (because cannot recover).
|
||||
if err != nil {
|
||||
logging.Errorf("gatherSavedDelegates: cannot read %q, skipped to add", path)
|
||||
continue
|
||||
}
|
||||
containerID, ifName, err := getValidAttachmentFromCache(delegatesBytes)
|
||||
if err != nil {
|
||||
logging.Errorf("gatherSavedDelegates: cannot read cache, skipped to add: %v", err)
|
||||
continue
|
||||
}
|
||||
allAttachments = append(allAttachments, cnitypes.GCAttachment{ContainerID: containerID, IfName: ifName})
|
||||
}
|
||||
return allAttachments, nil
|
||||
}
|
||||
|
||||
func validateIfName(nsname string, ifname string) error {
|
||||
@@ -223,16 +260,61 @@ func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.Net
|
||||
return err
|
||||
}
|
||||
|
||||
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
|
||||
func confStatus(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
|
||||
logging.Debugf("confStatus: %v, %s", rt, string(rawNetconf))
|
||||
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
conf, err := libcni.ConfFromBytes(rawNetconf)
|
||||
if err != nil {
|
||||
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
|
||||
}
|
||||
|
||||
if gt, _ := cniversion.GreaterThanOrEqualTo(conf.Network.CNIVersion, "1.1.0"); !gt {
|
||||
logging.Debugf("confStatus: skipping STATUS for network %q type %q cniVersion %q (< 1.1.0)",
|
||||
conf.Network.Name, conf.Network.Type, conf.Network.CNIVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
confList := &libcni.NetworkConfigList{
|
||||
Name: conf.Network.Name,
|
||||
CNIVersion: conf.Network.CNIVersion,
|
||||
Plugins: []*libcni.PluginConfig{conf},
|
||||
}
|
||||
|
||||
err = cniNet.GetStatusNetworkList(context.Background(), confList)
|
||||
if err != nil {
|
||||
var cniErr *cnitypes.Error
|
||||
if stderrors.As(err, &cniErr) {
|
||||
return err
|
||||
}
|
||||
return logging.Errorf("error in getting result from StatusNetworkList: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, cniConfList *libcni.NetworkConfigList, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
|
||||
logging.Debugf("conflistAdd: %v, %s", rt, string(rawnetconflist))
|
||||
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
confList, err := libcni.ConfListFromBytes(rawnetconflist)
|
||||
if err != nil {
|
||||
return nil, logging.Errorf("conflistAdd: error converting the raw bytes into a conflist: %v", err)
|
||||
var confList *libcni.NetworkConfigList
|
||||
var err error
|
||||
|
||||
// This may wind up being set during parsing the default network config.
|
||||
// In this case -- we'll use it as passed. Otherwise, we'll recalculate it.
|
||||
if len(cniConfList.Plugins) > 0 {
|
||||
confList = cniConfList
|
||||
} else {
|
||||
confList, err = libcni.NetworkConfFromBytes(rawnetconflist)
|
||||
if err != nil {
|
||||
return nil, logging.Errorf("conflistAdd: error converting the raw bytes into a conflist: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
result, err := cniNet.AddNetworkList(context.Background(), confList, rt)
|
||||
@@ -283,6 +365,33 @@ func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *t
|
||||
return err
|
||||
}
|
||||
|
||||
func conflistStatus(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
|
||||
logging.Debugf("conflistStatus: %v, %s", rt, string(rawnetconflist))
|
||||
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
|
||||
|
||||
confList, err := libcni.ConfListFromBytes(rawnetconflist)
|
||||
if err != nil {
|
||||
return logging.Errorf("conflistStatus: error converting the raw bytes into a conflist: %v", err)
|
||||
}
|
||||
if gt, _ := cniversion.GreaterThanOrEqualTo(confList.CNIVersion, "1.1.0"); !gt {
|
||||
logging.Debugf("conflistStatus: skipping STATUS for network list %q cniVersion %q (< 1.1.0)", confList.Name, confList.CNIVersion)
|
||||
}
|
||||
|
||||
err = cniNet.GetStatusNetworkList(context.Background(), confList)
|
||||
if err != nil {
|
||||
var cniErr *cnitypes.Error
|
||||
if stderrors.As(err, &cniErr) {
|
||||
return err
|
||||
}
|
||||
return logging.Errorf("conflistStatus: error in getting result from StatusNetworkList: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DelegateAdd ...
|
||||
func DelegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) (cnitypes.Result, error) {
|
||||
logging.Debugf("DelegateAdd: %v, %v, %v", exec, delegate, rt)
|
||||
@@ -326,7 +435,8 @@ func DelegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, dele
|
||||
var result cnitypes.Result
|
||||
var err error
|
||||
if delegate.ConfListPlugin {
|
||||
result, err = conflistAdd(rt, delegate.Bytes, multusNetconf, exec)
|
||||
// TODO: why are we passing bytes here? don't we have a better representation of it?
|
||||
result, err = conflistAdd(rt, delegate.Bytes, &delegate.CNINetworkConfigList, multusNetconf, exec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -411,6 +521,46 @@ func DelegateCheck(exec invoke.Exec, delegateConf *types.DelegateNetConf, rt *li
|
||||
return err
|
||||
}
|
||||
|
||||
// DelegateStatus ...
|
||||
func DelegateStatus(exec invoke.Exec, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
|
||||
logging.Debugf("DelegateStatus: %v, %v, %v", exec, delegateConf, rt)
|
||||
|
||||
isConfList := delegateConf.ConfListPlugin
|
||||
if !isConfList && delegateConf.Conf.Type == "" && delegateConf.ConfList.Name != "" {
|
||||
isConfList = true
|
||||
}
|
||||
|
||||
if logging.GetLoggingLevel() >= logging.VerboseLevel {
|
||||
var cniConfName string
|
||||
if isConfList {
|
||||
cniConfName = delegateConf.ConfList.Name
|
||||
} else {
|
||||
cniConfName = delegateConf.Conf.Name
|
||||
}
|
||||
logging.Verbosef("Status: %s:%s:%s(%s):%s %s", rt.Args[1][1], rt.Args[2][1], delegateConf.Name, cniConfName, rt.IfName, string(delegateConf.Bytes))
|
||||
}
|
||||
|
||||
var err error
|
||||
if isConfList {
|
||||
err = conflistStatus(rt, delegateConf.Bytes, multusNetconf, exec)
|
||||
} else {
|
||||
err = confStatus(rt, delegateConf.Bytes, multusNetconf, exec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
var cniErr *cnitypes.Error
|
||||
if stderrors.As(err, &cniErr) {
|
||||
return err
|
||||
}
|
||||
if isConfList {
|
||||
return logging.Errorf("DelegateStatus: error invoking ConflistStatus - %q: %v", delegateConf.ConfList.Name, err)
|
||||
}
|
||||
return logging.Errorf("DelegateStatus: error invoking ConfStatus - %q: %v", delegateConf.Conf.Type, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DelegateDel ...
|
||||
func DelegateDel(exec invoke.Exec, pod *v1.Pod, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
|
||||
logging.Debugf("DelegateDel: %v, %v, %v, %v", exec, pod, delegateConf, rt)
|
||||
@@ -471,7 +621,7 @@ func delPlugins(exec invoke.Exec, pod *v1.Pod, args *skel.CmdArgs, k8sArgs *type
|
||||
|
||||
// Check if we had any errors, and send them all back.
|
||||
if len(errorstrings) > 0 {
|
||||
return fmt.Errorf(strings.Join(errorstrings, " / "))
|
||||
return fmt.Errorf("%s", strings.Join(errorstrings, " / "))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -542,21 +692,33 @@ func GetPod(kubeClient *k8s.ClientInfo, k8sArgs *types.K8sArgs, isDel bool) (*v1
|
||||
var pod *v1.Pod
|
||||
if err := wait.PollImmediate(pollDuration, shortPollTimeout, func() (bool, error) {
|
||||
var getErr error
|
||||
pod, getErr = kubeClient.GetPod(podNamespace, podName)
|
||||
// Use context with a short timeout so the call to API server doesn't take too long.
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), pollDuration)
|
||||
defer cancel()
|
||||
pod, getErr = kubeClient.GetPodContext(ctx, podNamespace, podName)
|
||||
if isCriticalRequestRetriable(getErr) || retryOnNotFound(getErr) {
|
||||
return false, nil
|
||||
}
|
||||
return pod != nil, getErr
|
||||
}); err != nil {
|
||||
if isDel && errors.IsNotFound(err) {
|
||||
// On DEL pod may already be gone from apiserver/informer
|
||||
return nil, nil
|
||||
if errors.IsNotFound(err) {
|
||||
// When pods are not found, this is "OK", it's a known condition for rapidly deleted pods, we'll just warn on it.
|
||||
if !isDel {
|
||||
logging.Verbosef("Warning: GetPod for [%s/%s] resulted in pod not found during CNI ADD (pod may have already been deleted): %v", podNamespace, podName, err)
|
||||
}
|
||||
return nil, errPodNotFound
|
||||
}
|
||||
// Try one more time to get the pod directly from the apiserver;
|
||||
// TODO: figure out why static pods don't show up via the informer
|
||||
// and always hit this case.
|
||||
pod, err = kubeClient.GetPod(podNamespace, podName)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), pollDuration)
|
||||
defer cancel()
|
||||
pod, err = kubeClient.GetPodAPILiveQuery(ctx, podNamespace, podName)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
logging.Verbosef("Warning: On live query retry, [%s/%s] pod not found during CNI ADD (pod may have already been deleted): %v", podNamespace, podName, err)
|
||||
return nil, errPodNotFound
|
||||
}
|
||||
return nil, cmdErr(k8sArgs, "error waiting for pod: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -602,6 +764,11 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
|
||||
pod, err := GetPod(kubeClient, k8sArgs, false)
|
||||
if err != nil {
|
||||
if stderrors.Is(err, errPodNotFound) {
|
||||
emptyResult := emptyCNIResult(args, n.CNIVersion)
|
||||
logging.Verbosef("CmdAdd: Warning: pod [%s/%s] not found, exiting with empty CNI result: %v", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME, emptyResult)
|
||||
return emptyResult, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -623,6 +790,36 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
return nil, cmdErr(k8sArgs, "error loading k8s delegates k8s args: %v", err)
|
||||
}
|
||||
|
||||
// we add to the auxiliary CNI chain here.
|
||||
if n.AuxiliaryCNIChainName != "" {
|
||||
logging.Debugf("Using AuxiliaryCNIChainName: %v", n.AuxiliaryCNIChainName)
|
||||
|
||||
// create an passthru cni conflist configuration with our aux chain cni chain name.
|
||||
jsonString := fmt.Sprintf(`{"cniVersion":"%s","name":"%s","plugins":[{"type":"passthru","name":"passthru-cni"}]}`, n.CNIVersion, n.AuxiliaryCNIChainName)
|
||||
|
||||
// Convert the JSON string to a byte array
|
||||
byteArray := []byte(jsonString)
|
||||
|
||||
// Let's try to get the cni path from the ClusterNetwork
|
||||
if !strings.Contains(n.ClusterNetwork, "/") {
|
||||
return nil, cmdErr(k8sArgs, "auxiliary chain used but ClusterNetwork must be a path, and it is not a path: %v", n.ClusterNetwork)
|
||||
}
|
||||
|
||||
// Get the directory part of the ClusterNetwork path
|
||||
// TODO: This could probably be improved.
|
||||
cniPath := filepath.Dir(n.ClusterNetwork)
|
||||
|
||||
// Load chained delegates
|
||||
delegate := k8s.LoadChainedDelegatesFromBytes(byteArray, cniPath)
|
||||
if delegate != nil {
|
||||
// Only if additional plugins were listed do we add this aux chain delegate.
|
||||
if len(delegate.ConfList.Plugins) > 1 {
|
||||
// Add the resulting delegate to n.Delegates
|
||||
n.Delegates = append(n.Delegates, delegate)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cache the multus config
|
||||
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
|
||||
return nil, cmdErr(k8sArgs, "error saving the delegates: %v", err)
|
||||
@@ -757,15 +954,17 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
}
|
||||
}
|
||||
|
||||
// set the network status annotation in apiserver, only in case Multus as kubeconfig
|
||||
// set the network status annotation in apiserver, only in case Multus has kubeconfig
|
||||
if kubeClient != nil && kc != nil {
|
||||
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
|
||||
err = k8s.SetNetworkStatus(kubeClient, k8sArgs, netStatus, n)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "failed to query the pod") {
|
||||
return nil, cmdErr(k8sArgs, "error setting the networks status, pod was already deleted: %v", err)
|
||||
if strings.Contains(err.Error(), `pod "`) && strings.Contains(err.Error(), `" not found`) {
|
||||
// Tolerate issues with writing the status due to pod deletion, and log them.
|
||||
logging.Verbosef("warning: tolerated failure writing network status (pod not found): %v", err)
|
||||
} else {
|
||||
return nil, cmdErr(k8sArgs, "error setting the networks status: %v", err)
|
||||
}
|
||||
return nil, cmdErr(k8sArgs, "error setting the networks status: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -925,3 +1124,153 @@ func CmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) er
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// CmdStatus ...
|
||||
func CmdStatus(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
logging.Debugf("CmdStatus: %v, %v, %v", args, exec, kubeClient)
|
||||
if err != nil {
|
||||
return cmdErr(nil, "error loading netconf: %v", err)
|
||||
}
|
||||
|
||||
kubeClient, err = k8s.GetK8sClient(n.Kubeconfig, kubeClient)
|
||||
if err != nil {
|
||||
return cmdErr(nil, "error getting k8s client: %v", err)
|
||||
}
|
||||
|
||||
if n.ReadinessIndicatorFile != "" {
|
||||
if err := types.GetReadinessIndicatorFile(n.ReadinessIndicatorFile); err != nil {
|
||||
return cmdErr(nil, "have you checked that your default network is ready? still waiting for readinessindicatorfile @ %v. pollimmediate error: %v", n.ReadinessIndicatorFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if n.ClusterNetwork != "" {
|
||||
_, err = k8s.GetDefaultNetworks(nil, n, kubeClient, nil)
|
||||
if err != nil {
|
||||
return cmdErr(nil, "failed to get clusterNetwork: %v", err)
|
||||
}
|
||||
// First delegate is always the master plugin
|
||||
n.Delegates[0].MasterPlugin = true
|
||||
}
|
||||
|
||||
// invoke delegate's STATUS command
|
||||
// we only need to check cluster network status
|
||||
delegate := n.Delegates[0]
|
||||
if !delegate.ConfListPlugin {
|
||||
return confStatus(&libcni.RuntimeConf{}, delegate.Bytes, n, exec)
|
||||
}
|
||||
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{n.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, n.CNIDir, exec)
|
||||
|
||||
conf, err := libcni.ConfListFromBytes(delegate.Bytes)
|
||||
if err != nil {
|
||||
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
|
||||
}
|
||||
|
||||
err = cniNet.GetStatusNetworkList(context.Background(), conf)
|
||||
if err != nil {
|
||||
var cniErr *cnitypes.Error
|
||||
if stderrors.As(err, &cniErr) {
|
||||
return err
|
||||
}
|
||||
return logging.Errorf("error in STATUS command: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdGC ...
|
||||
func CmdGC(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
logging.Debugf("CmdStatus: %v, %v, %v", args, exec, kubeClient)
|
||||
if err != nil {
|
||||
return cmdErr(nil, "error loading netconf: %v", err)
|
||||
}
|
||||
|
||||
kubeClient, err = k8s.GetK8sClient(n.Kubeconfig, kubeClient)
|
||||
if err != nil {
|
||||
return cmdErr(nil, "error getting k8s client: %v", err)
|
||||
}
|
||||
|
||||
if n.ReadinessIndicatorFile != "" {
|
||||
if err := types.GetReadinessIndicatorFile(n.ReadinessIndicatorFile); err != nil {
|
||||
return cmdErr(nil, "have you checked that your default network is ready? still waiting for readinessindicatorfile @ %v. pollimmediate error: %v", n.ReadinessIndicatorFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if n.ClusterNetwork != "" {
|
||||
_, err = k8s.GetDefaultNetworks(nil, n, kubeClient, nil)
|
||||
if err != nil {
|
||||
return cmdErr(nil, "failed to get clusterNetwork: %v", err)
|
||||
}
|
||||
// First delegate is always the master plugin
|
||||
n.Delegates[0].MasterPlugin = true
|
||||
}
|
||||
|
||||
// invoke delegate's GC command
|
||||
// we only need to check cluster network status
|
||||
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
|
||||
binDirs = append([]string{n.BinDir}, binDirs...)
|
||||
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, n.CNIDir, exec)
|
||||
|
||||
delegate := n.Delegates[0]
|
||||
isConfList := delegate.ConfListPlugin
|
||||
if !isConfList && delegate.Conf.Type == "" && delegate.ConfList.Name != "" {
|
||||
isConfList = true
|
||||
}
|
||||
|
||||
var confList *libcni.NetworkConfigList
|
||||
if isConfList {
|
||||
confList, err = libcni.ConfListFromBytes(delegate.Bytes)
|
||||
if err != nil {
|
||||
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
|
||||
}
|
||||
} else {
|
||||
conf, err := libcni.ConfFromBytes(delegate.Bytes)
|
||||
if err != nil {
|
||||
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
|
||||
}
|
||||
confList = &libcni.NetworkConfigList{
|
||||
Name: conf.Network.Name,
|
||||
CNIVersion: conf.Network.CNIVersion,
|
||||
Plugins: []*libcni.PluginConfig{conf},
|
||||
}
|
||||
}
|
||||
|
||||
validAttachments, err := gatherValidAttachmentsFromCache(n.CNIDir)
|
||||
if err != nil {
|
||||
return logging.Errorf("error in gather valid attachments: %v", err)
|
||||
}
|
||||
|
||||
err = cniNet.GCNetworkList(context.TODO(), confList, &libcni.GCArgs{
|
||||
ValidAttachments: validAttachments,
|
||||
})
|
||||
if err != nil {
|
||||
return logging.Errorf("error in GC command: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func emptyCNIResult(args *skel.CmdArgs, cniVersion string) *cni100.Result {
|
||||
return &cni100.Result{
|
||||
CNIVersion: cniVersion,
|
||||
Interfaces: []*cni100.Interface{
|
||||
{
|
||||
Name: args.IfName,
|
||||
Sandbox: args.Netns,
|
||||
},
|
||||
},
|
||||
IPs: []*cni100.IPConfig{
|
||||
{
|
||||
Address: net.IPNet{
|
||||
IP: net.ParseIP("0.0.0.0"),
|
||||
Mask: net.CIDRMask(0, 32),
|
||||
},
|
||||
Gateway: net.ParseIP("0.0.0.0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,10 +26,8 @@ import (
|
||||
types020 "github.com/containernetworking/cni/pkg/types/020"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/containernetworking/plugins/pkg/testutils"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
testhelpers "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/testing"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
@@ -43,20 +41,6 @@ var _ = Describe("multus operations", func() {
|
||||
err := saveScratchNetConf("123456789", "", meme)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("fails to delete delegates with bad filepath", func() {
|
||||
err := deleteDelegates("123456789", "bad!file!~?Path$^")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("delete delegates given good filepath", func() {
|
||||
os.MkdirAll("/opt/cni/bin", 0755)
|
||||
d1 := []byte("blah")
|
||||
os.WriteFile("/opt/cni/bin/123456789", d1, 0644)
|
||||
|
||||
err := deleteDelegates("123456789", "/opt/cni/bin")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("multus operations cniVersion 0.2.0 config", func() {
|
||||
@@ -769,66 +753,4 @@ var _ = Describe("multus operations cniVersion 0.2.0 config", func() {
|
||||
Expect(fExec.delIndex).To(Equal(len(fExec.plugins)))
|
||||
})
|
||||
|
||||
It("fails to execute confListDel given no 'plugins' key", func() {
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
StdinData: []byte(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"readinessindicatorfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkwaitseconds": 3,
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "weave-net"
|
||||
},{
|
||||
"name": "other1",
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "other-plugin"
|
||||
}]
|
||||
}`),
|
||||
}
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedResult1 := &types020.Result{
|
||||
CNIVersion: "0.2.0",
|
||||
IP4: &types020.IPConfig{
|
||||
IP: *testhelpers.EnsureCIDR("1.1.1.2/24"),
|
||||
},
|
||||
}
|
||||
expectedConf1 := `{
|
||||
"name": "weave1",
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "weave-net"
|
||||
}`
|
||||
fExec.addPlugin020(nil, "eth0", expectedConf1, expectedResult1, nil)
|
||||
|
||||
expectedResult2 := &types020.Result{
|
||||
CNIVersion: "0.2.0",
|
||||
IP4: &types020.IPConfig{
|
||||
IP: *testhelpers.EnsureCIDR("1.1.1.5/24"),
|
||||
},
|
||||
}
|
||||
expectedConf2 := `{
|
||||
"name": "other1",
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "other-plugin"
|
||||
}`
|
||||
fExec.addPlugin020(nil, "net1", expectedConf2, expectedResult2, nil)
|
||||
|
||||
fakeMultusNetConf := types.NetConf{
|
||||
BinDir: "/opt/cni/bin",
|
||||
}
|
||||
// use fExec for the exec param
|
||||
rawnetconflist := []byte(`{"cniVersion":"0.2.0","name":"weave1","type":"weave-net"}`)
|
||||
k8sargs, err := k8sclient.GetK8sArgs(args)
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sargs, args.IfName, n.RuntimeConfig, nil)
|
||||
|
||||
err = conflistDel(rt, rawnetconflist, &fakeMultusNetConf, fExec)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
@@ -26,10 +26,8 @@ import (
|
||||
cni040 "github.com/containernetworking/cni/pkg/types/040"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/containernetworking/plugins/pkg/testutils"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
testhelpers "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/testing"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@@ -1502,67 +1500,4 @@ var _ = Describe("multus operations cniVersion 0.4.0 config", func() {
|
||||
Expect(fExec.delIndex).To(Equal(len(fExec.plugins)))
|
||||
})
|
||||
|
||||
It("fails to execute confListDel given no 'plugins' key", func() {
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
StdinData: []byte(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"readinessindicatorfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkwaitseconds": 3,
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "0.4.0",
|
||||
"type": "weave-net"
|
||||
},{
|
||||
"name": "other1",
|
||||
"cniVersion": "0.4.0",
|
||||
"type": "other-plugin"
|
||||
}]
|
||||
}`),
|
||||
}
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedResult1 := &cni040.Result{
|
||||
CNIVersion: "0.4.0",
|
||||
IPs: []*cni040.IPConfig{{
|
||||
Address: *testhelpers.EnsureCIDR("1.1.1.2/24"),
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedConf1 := `{
|
||||
"name": "weave1",
|
||||
"cniVersion": "0.4.0",
|
||||
"type": "weave-net"
|
||||
}`
|
||||
fExec.addPlugin040(nil, "eth0", expectedConf1, expectedResult1, nil)
|
||||
|
||||
expectedResult2 := &cni040.Result{
|
||||
CNIVersion: "0.4.0",
|
||||
IPs: []*cni040.IPConfig{{
|
||||
Address: *testhelpers.EnsureCIDR("1.1.1.5/24"),
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedConf2 := `{
|
||||
"name": "other1",
|
||||
"cniVersion": "0.4.0",
|
||||
"type": "other-plugin"
|
||||
}`
|
||||
fExec.addPlugin040(nil, "net1", expectedConf2, expectedResult2, nil)
|
||||
|
||||
fakeMultusNetConf := types.NetConf{
|
||||
BinDir: "/opt/cni/bin",
|
||||
}
|
||||
// use fExec for the exec param
|
||||
rawnetconflist := []byte(`{"cniVersion":"0.4.0","name":"weave1","type":"weave-net"}`)
|
||||
k8sargs, err := k8sclient.GetK8sArgs(args)
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sargs, args.IfName, n.RuntimeConfig, nil)
|
||||
|
||||
err = conflistDel(rt, rawnetconflist, &fakeMultusNetConf, fExec)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -18,17 +18,19 @@ package multus
|
||||
//revive:disable:dot-imports
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
cni100 "github.com/containernetworking/cni/pkg/types/100"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/containernetworking/plugins/pkg/testutils"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
testhelpers "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/testing"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types"
|
||||
@@ -1163,7 +1165,47 @@ var _ = Describe("multus operations cniVersion 1.0.0 config", func() {
|
||||
Expect(fExec.delIndex).To(Equal(len(fExec.plugins)))
|
||||
})
|
||||
|
||||
It("fails to execute confListDel given no 'plugins' key", func() {
|
||||
})
|
||||
|
||||
var _ = Describe("multus operations cniVersion 1.1.0 config", func() {
|
||||
var testNS ns.NetNS
|
||||
var tmpDir string
|
||||
configPath := "/tmp/foo.multus.conf"
|
||||
var cancel context.CancelFunc
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a new NetNS so we don't modify the host
|
||||
var err error
|
||||
testNS, err = testutils.NewNS()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
os.Setenv("CNI_NETNS", testNS.Path())
|
||||
os.Setenv("CNI_PATH", "/some/path")
|
||||
|
||||
tmpDir, err = os.MkdirTemp("", "multus_tmp")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Touch the default network file.
|
||||
os.OpenFile(configPath, os.O_RDONLY|os.O_CREATE, 0755)
|
||||
_, cancel = context.WithCancel(context.TODO())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
|
||||
// Cleanup default network file.
|
||||
if _, errStat := os.Stat(configPath); errStat == nil {
|
||||
errRemove := os.Remove(configPath)
|
||||
Expect(errRemove).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(testNS.Close()).To(Succeed())
|
||||
os.Unsetenv("CNI_PATH")
|
||||
os.Unsetenv("CNI_ARGS")
|
||||
err := os.RemoveAll(tmpDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("executes delegates with CNI Check", func() {
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
@@ -1171,59 +1213,257 @@ var _ = Describe("multus operations cniVersion 1.0.0 config", func() {
|
||||
StdinData: []byte(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"readinessindicatorfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkwaitseconds": 3,
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.0.0",
|
||||
"type": "weave-net"
|
||||
"cniVersion": "1.1.0",
|
||||
"plugins": [{
|
||||
"type": "weave-net"
|
||||
}]
|
||||
},{
|
||||
"name": "other1",
|
||||
"cniVersion": "1.0.0",
|
||||
"type": "other-plugin"
|
||||
"cniVersion": "1.1.0",
|
||||
"plugins": [{
|
||||
"type": "other-plugin"
|
||||
}]
|
||||
}]
|
||||
}`),
|
||||
}
|
||||
|
||||
logging.SetLogLevel("verbose")
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedConf1 := `{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}`
|
||||
fExec.addPlugin100(nil, "", expectedConf1, nil, nil)
|
||||
|
||||
err := CmdStatus(args, fExec, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// we only execute once for cluster network, not additional one
|
||||
Expect(fExec.statusIndex).To(Equal(1))
|
||||
})
|
||||
|
||||
It("returns empty add result using top-level cniVersion when pod is not found", func() {
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
Args: "K8S_POD_NAME=missing-pod;K8S_POD_NAMESPACE=default",
|
||||
StdinData: []byte(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
|
||||
"cniVersion": "1.1.0",
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}]
|
||||
}`),
|
||||
}
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedResult1 := &cni100.Result{
|
||||
CNIVersion: "1.0.0",
|
||||
IPs: []*cni100.IPConfig{{
|
||||
Address: *testhelpers.EnsureCIDR("1.1.1.2/24"),
|
||||
},
|
||||
},
|
||||
fKubeClient := NewFakeClientInfo()
|
||||
|
||||
result, err := CmdAdd(args, fExec, fKubeClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
r, ok := result.(*cni100.Result)
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(r.CNIVersion).To(Equal("1.1.0"))
|
||||
Expect(fExec.addIndex).To(Equal(0))
|
||||
})
|
||||
|
||||
It("propagates delegate STATUS errors", func() {
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
}
|
||||
expectedConf1 := `{
|
||||
k8sArgs := &types.K8sArgs{
|
||||
K8S_POD_NAMESPACE: cnitypes.UnmarshallableString("default"),
|
||||
K8S_POD_NAME: cnitypes.UnmarshallableString("pod"),
|
||||
K8S_POD_INFRA_CONTAINER_ID: cnitypes.UnmarshallableString("sandbox"),
|
||||
K8S_POD_UID: cnitypes.UnmarshallableString("uid"),
|
||||
}
|
||||
|
||||
delegateConf, err := types.LoadDelegateNetConf([]byte(`{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.0.0",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}`), nil, "", "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sArgs, args.IfName, nil, delegateConf)
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedConf := `{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}`
|
||||
fExec.addPlugin100(nil, "eth0", expectedConf1, expectedResult1, nil)
|
||||
fExec.addPlugin100(nil, "", expectedConf, nil, &cnitypes.Error{Code: 50, Msg: "status failed"})
|
||||
|
||||
expectedResult2 := &cni100.Result{
|
||||
CNIVersion: "1.0.0",
|
||||
IPs: []*cni100.IPConfig{{
|
||||
Address: *testhelpers.EnsureCIDR("1.1.1.5/24"),
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedConf2 := `{
|
||||
"name": "other1",
|
||||
"cniVersion": "1.0.0",
|
||||
"type": "other-plugin"
|
||||
}`
|
||||
fExec.addPlugin100(nil, "net1", expectedConf2, expectedResult2, nil)
|
||||
|
||||
fakeMultusNetConf := types.NetConf{
|
||||
BinDir: "/opt/cni/bin",
|
||||
}
|
||||
// use fExec for the exec param
|
||||
rawnetconflist := []byte(`{"cniVersion":"1.0.0","name":"weave1","type":"weave-net"}`)
|
||||
k8sargs, err := k8sclient.GetK8sArgs(args)
|
||||
n, err := types.LoadNetConf(args.StdinData)
|
||||
rt, _ := types.CreateCNIRuntimeConf(args, k8sargs, args.IfName, n.RuntimeConfig, nil)
|
||||
|
||||
err = conflistDel(rt, rawnetconflist, &fakeMultusNetConf, fExec)
|
||||
err = DelegateStatus(fExec, delegateConf, rt, &types.NetConf{BinDir: "/bin", CNIDir: tmpDir})
|
||||
Expect(err).To(HaveOccurred())
|
||||
var cniErr *cnitypes.Error
|
||||
Expect(errors.As(err, &cniErr)).To(BeTrue())
|
||||
Expect(cniErr.Code).To(Equal(uint(50)))
|
||||
Expect(cniErr.Msg).To(Equal("status failed"))
|
||||
})
|
||||
|
||||
It("propagates CmdStatus errors for single plugin delegates", func() {
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
StdinData: []byte(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"defaultnetworkfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkwaitseconds": 3,
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}]
|
||||
}`),
|
||||
}
|
||||
|
||||
logging.SetLogLevel("verbose")
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedConf := `{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}`
|
||||
fExec.addPlugin100(nil, "", expectedConf, nil, &cnitypes.Error{Code: 50, Msg: "status failed"})
|
||||
|
||||
err := CmdStatus(args, fExec, nil)
|
||||
Expect(err).To(HaveOccurred())
|
||||
var cniErr *cnitypes.Error
|
||||
Expect(errors.As(err, &cniErr)).To(BeTrue())
|
||||
Expect(cniErr.Code).To(Equal(uint(50)))
|
||||
Expect(cniErr.Msg).To(Equal("status failed"))
|
||||
})
|
||||
|
||||
It("executes delegates with CNI GC", func() {
|
||||
tmpCNIDir := tmpDir + "/cniData"
|
||||
err := os.Mkdir(tmpCNIDir, 0777)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cniCacheDir := filepath.Join(tmpCNIDir, "/results")
|
||||
err = os.Mkdir(cniCacheDir, 0777)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//create fake cniResult file
|
||||
err = os.WriteFile(filepath.Join(cniCacheDir, "cbr0-3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755-eth0"), []byte(`{"kind":"cniCacheV1","containerId":"3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755","config":"eyJjbmlWZXJzaW9uIjoiMC4zLjEiLCJuYW1lIjoiY2JyMCIsInBsdWdpbnMiOlt7ImNhcGFiaWxpdGllcyI6eyJpby5rdWJlcm5ldGVzLmNyaS5wb2QtYW5ub3RhdGlvbnMiOnRydWV9LCJkZWxlZ2F0ZSI6eyJoYWlycGluTW9kZSI6dHJ1ZSwiaXNEZWZhdWx0R2F0ZXdheSI6dHJ1ZX0sInR5cGUiOiJmbGFubmVsIn0seyJjYXBhYmlsaXRpZXMiOnsicG9ydE1hcHBpbmdzIjp0cnVlfSwidHlwZSI6InBvcnRtYXAifV19","ifName":"eth0","networkName":"cbr0","netns":"/var/run/netns/8b8677c8-8929-4746-8206-514069760f6e","cniArgs":[["IgnoreUnknown","true"],["K8S_POD_NAMESPACE","default"],["K8S_POD_NAME","macvlan"],["K8S_POD_INFRA_CONTAINER_ID","3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755"],["K8S_POD_UID","f0bfbd5b-096d-48ef-998c-da26743dd0cb"],["IgnoreUnknown","1"],["K8S_POD_NAMESPACE","default"],["K8S_POD_NAME","macvlan"],["K8S_POD_INFRA_CONTAINER_ID","3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755"],["K8S_POD_UID","f0bfbd5b-096d-48ef-998c-da26743dd0cb"]],"result":{"cniVersion":"0.3.1","dns":{},"interfaces":[{"mac":"ea:19:25:a2:a1:93","name":"cni0"},{"mac":"ba:76:61:2f:8b:ca","name":"vethc42d3d18"},{"mac":"7e:57:6a:9b:6b:b5","name":"eth0","sandbox":"/var/run/netns/8b8677c8-8929-4746-8206-514069760f6e"}],"ips":[{"address":"10.244.1.4/24","gateway":"10.244.1.1","interface":2,"version":"4"}],"routes":[{"dst":"10.244.0.0/16"},{"dst":"0.0.0.0/0","gw":"10.244.1.1"}]}}`), 0666)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = os.WriteFile(filepath.Join(cniCacheDir, "macvlan-conf-1-3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755-net1"), []byte(`{"kind":"cniCacheV1","containerId":"3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755","config":"eyJjbmlWZXJzaW9uIjoiMC4zLjEiLCJpcGFtIjp7ImFkZHJlc3NlcyI6W3siYWRkcmVzcyI6IjEwLjEuMS4xMDEvMjQifV0sInR5cGUiOiJzdGF0aWMifSwibWFzdGVyIjoiZXRoMSIsIm1vZGUiOiJicmlkZ2UiLCJuYW1lIjoibWFjdmxhbi1jb25mLTEiLCJ0eXBlIjoibWFjdmxhbiJ9","ifName":"net1","networkName":"macvlan-conf-1","netns":"/var/run/netns/8b8677c8-8929-4746-8206-514069760f6e","cniArgs":[["IgnoreUnknown","true"],["K8S_POD_NAMESPACE","default"],["K8S_POD_NAME","macvlan"],["K8S_POD_INFRA_CONTAINER_ID","3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755"],["K8S_POD_UID","f0bfbd5b-096d-48ef-998c-da26743dd0cb"],["IgnoreUnknown","1"],["K8S_POD_NAMESPACE","default"],["K8S_POD_NAME","macvlan"],["K8S_POD_INFRA_CONTAINER_ID","3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755"],["K8S_POD_UID","f0bfbd5b-096d-48ef-998c-da26743dd0cb"]],"result":{"cniVersion":"0.3.1","dns":{},"interfaces":[{"mac":"36:b3:c5:29:ad:b8","name":"net1","sandbox":"/var/run/netns/8b8677c8-8929-4746-8206-514069760f6e"}],"ips":[{"address":"10.1.1.101/24","interface":0,"version":"4"}]}}`), 0666)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
StdinData: []byte(fmt.Sprintf(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"defaultnetworkfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkwaitseconds": 3,
|
||||
"cniDir": "%s",
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"plugins": [{
|
||||
"type": "weave-net"
|
||||
}]
|
||||
},{
|
||||
"name": "other1",
|
||||
"cniVersion": "1.1.0",
|
||||
"plugins": [{
|
||||
"type": "other-plugin"
|
||||
}]
|
||||
}]
|
||||
}`, tmpCNIDir)),
|
||||
}
|
||||
|
||||
logging.SetLogLevel("verbose")
|
||||
|
||||
fExec := newFakeExec()
|
||||
expectedConf1 := `{
|
||||
"cni.dev/attachments": [
|
||||
{
|
||||
"containerID": "3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755",
|
||||
"ifname": "eth0"
|
||||
},
|
||||
{
|
||||
"containerID": "3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755",
|
||||
"ifname": "net1"
|
||||
}
|
||||
],
|
||||
"cni.dev/valid-attachments": [
|
||||
{
|
||||
"containerID": "3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755",
|
||||
"ifname": "eth0"
|
||||
},
|
||||
{
|
||||
"containerID": "3f6940ab5ab43bc522569d15b23f8c1bbde1d7678b080398506924fc01d72755",
|
||||
"ifname": "net1"
|
||||
}
|
||||
],
|
||||
"cniVersion": "1.1.0",
|
||||
"name": "weave1",
|
||||
"type": "weave-net"
|
||||
}`
|
||||
fExec.addPlugin100(nil, "", expectedConf1, nil, nil)
|
||||
|
||||
err = CmdGC(args, fExec, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// we only execute once for cluster network, not additional one
|
||||
Expect(fExec.gcIndex).To(Equal(1))
|
||||
err = os.RemoveAll(tmpCNIDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("executes single plugin delegates with CNI GC", func() {
|
||||
tmpCNIDir := tmpDir + "/cniData-single"
|
||||
err := os.Mkdir(tmpCNIDir, 0777)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cniCacheDir := filepath.Join(tmpCNIDir, "/results")
|
||||
err = os.Mkdir(cniCacheDir, 0777)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
args := &skel.CmdArgs{
|
||||
ContainerID: "123456789",
|
||||
Netns: testNS.Path(),
|
||||
IfName: "eth0",
|
||||
StdinData: []byte(fmt.Sprintf(`{
|
||||
"name": "node-cni-network",
|
||||
"type": "multus",
|
||||
"defaultnetworkfile": "/tmp/foo.multus.conf",
|
||||
"defaultnetworkwaitseconds": 3,
|
||||
"cniDir": "%s",
|
||||
"delegates": [{
|
||||
"name": "weave1",
|
||||
"cniVersion": "1.1.0",
|
||||
"type": "weave-net"
|
||||
}]
|
||||
}`, tmpCNIDir)),
|
||||
}
|
||||
|
||||
fExec := newFakeExec()
|
||||
fExec.addPlugin100(nil, "", "", nil, nil)
|
||||
|
||||
err = CmdGC(args, fExec, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(fExec.gcIndex).To(Equal(1))
|
||||
|
||||
err = os.RemoveAll(tmpCNIDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -58,6 +58,8 @@ type fakeExec struct {
|
||||
addIndex int
|
||||
delIndex int
|
||||
chkIndex int
|
||||
statusIndex int
|
||||
gcIndex int
|
||||
expectedDelSkip int
|
||||
plugins map[string]*fakePlugin
|
||||
}
|
||||
@@ -168,6 +170,14 @@ func (f *fakeExec) ExecPlugin(_ context.Context, pluginPath string, stdinData []
|
||||
Expect(len(f.plugins)).To(BeNumerically(">", f.delIndex))
|
||||
index = len(f.plugins) - f.expectedDelSkip - f.delIndex - 1
|
||||
f.delIndex++
|
||||
case "GC":
|
||||
Expect(len(f.plugins)).To(BeNumerically(">", f.statusIndex))
|
||||
index = f.gcIndex
|
||||
f.gcIndex++
|
||||
case "STATUS":
|
||||
Expect(len(f.plugins)).To(BeNumerically(">", f.statusIndex))
|
||||
index = f.statusIndex
|
||||
f.statusIndex++
|
||||
default:
|
||||
// Should never be reached
|
||||
Expect(false).To(BeTrue())
|
||||
|
||||
@@ -23,8 +23,10 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
cniversion "github.com/containernetworking/cni/pkg/version"
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.org/x/sys/unix"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
)
|
||||
|
||||
@@ -82,7 +84,12 @@ func SetDefaultGW(netnsPath string, ifName string, gateways []net.IP) error {
|
||||
// Perform the creation of the default route....
|
||||
err = netlink.RouteAdd(&newDefaultRoute)
|
||||
if err != nil {
|
||||
logging.Errorf("SetDefaultGW: Error adding route: %v", err)
|
||||
if os.IsExist(err) || err == unix.EEXIST {
|
||||
logging.Debugf("SetDefaultGW: Route already exists, ignoring: %v", err)
|
||||
err = nil
|
||||
} else {
|
||||
logging.Errorf("SetDefaultGW: Error adding route: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -177,7 +184,7 @@ func deleteDefaultGWResult(result map[string]interface{}, ipv4, ipv6 bool) (map[
|
||||
return deleteDefaultGWResult020(result, ipv4, ipv6)
|
||||
}
|
||||
|
||||
if cniVersion != "0.3.0" && cniVersion != "0.3.1" && cniVersion != "0.4.0" && cniVersion != "1.0.0" {
|
||||
if !isSupportedGatewayResultVersion(cniVersion) {
|
||||
return nil, fmt.Errorf("not supported version: %s", cniVersion)
|
||||
}
|
||||
|
||||
@@ -334,7 +341,7 @@ func addDefaultGWResult(result map[string]interface{}, gw []net.IP) (map[string]
|
||||
return addDefaultGWResult020(result, gw)
|
||||
}
|
||||
|
||||
if cniVersion != "0.3.0" && cniVersion != "0.3.1" && cniVersion != "0.4.0" && cniVersion != "1.0.0" {
|
||||
if !isSupportedGatewayResultVersion(cniVersion) {
|
||||
return nil, fmt.Errorf("not supported version: %s", cniVersion)
|
||||
}
|
||||
|
||||
@@ -362,6 +369,19 @@ func addDefaultGWResult(result map[string]interface{}, gw []net.IP) (map[string]
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func isSupportedGatewayResultVersion(cniVersion string) bool {
|
||||
switch cniVersion {
|
||||
case "0.3.0", "0.3.1", "0.4.0":
|
||||
return true
|
||||
}
|
||||
|
||||
if gt, _ := cniversion.GreaterThanOrEqualTo(cniVersion, "1.0.0"); gt {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func addDefaultGWResult020(result map[string]interface{}, gw []net.IP) (map[string]interface{}, error) {
|
||||
for _, g := range gw {
|
||||
if g.To4() != nil {
|
||||
|
||||
@@ -1508,4 +1508,34 @@ var _ = Describe("other function unit testing", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(routeJSON).Should(MatchJSON(`[{"dst":"10.1.1.0/24"}]`))
|
||||
})
|
||||
|
||||
It("supports gateway result updates for cniVersion 1.1.0", func() {
|
||||
deleteInput := map[string]interface{}{
|
||||
"cniVersion": "1.1.0",
|
||||
"routes": []interface{}{
|
||||
map[string]interface{}{"dst": "0.0.0.0/0", "gw": "10.1.1.1"},
|
||||
},
|
||||
}
|
||||
updatedDeleteResult, err := deleteDefaultGWResult(deleteInput, true, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, hasRoutes := updatedDeleteResult["routes"]
|
||||
Expect(hasRoutes).To(BeFalse())
|
||||
|
||||
addInput := map[string]interface{}{
|
||||
"cniVersion": "1.1.0",
|
||||
}
|
||||
updatedAddResult, err := addDefaultGWResult(addInput, []net.IP{net.ParseIP("10.1.1.1")})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
routes, ok := updatedAddResult["routes"].([]interface{})
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(routes).To(HaveLen(1))
|
||||
})
|
||||
|
||||
It("rejects unsupported pre-1.0.0 cniVersion", func() {
|
||||
addInput := map[string]interface{}{
|
||||
"cniVersion": "0.9.0",
|
||||
}
|
||||
_, err := addDefaultGWResult(addInput, []net.IP{net.ParseIP("10.1.1.1")})
|
||||
Expect(err).To(MatchError("not supported version: 0.9.0"))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
|
||||
utilwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
@@ -71,6 +73,10 @@ func DoCNI(url string, req interface{}, socketPath string) ([]byte, error) {
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
cniErr := &cnitypes.Error{}
|
||||
if err := json.Unmarshal(body, cniErr); err == nil && cniErr.Msg != "" {
|
||||
return nil, cniErr
|
||||
}
|
||||
return nil, fmt.Errorf("CNI request failed with status %v: '%s'", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -74,6 +75,24 @@ func CmdDel(args *skel.CmdArgs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdGC implements the CNI spec GC command handler
|
||||
func CmdGC(args *skel.CmdArgs) error {
|
||||
_, _, err := postRequest(args, WaitUntilAPIReady)
|
||||
if err != nil {
|
||||
return logging.Errorf("CmdGC (shim): %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdStatus implements the CNI spec STATUS command handler
|
||||
func CmdStatus(args *skel.CmdArgs) error {
|
||||
_, _, err := postRequest(args, WaitUntilAPIReady)
|
||||
if err != nil {
|
||||
return logging.Errorf("CmdStatus (shim): %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func postRequest(args *skel.CmdArgs, readinessCheck readyCheckFunc) (*Response, string, error) {
|
||||
multusShimConfig, err := shimConfig(args.StdinData)
|
||||
if err != nil {
|
||||
@@ -93,6 +112,10 @@ func postRequest(args *skel.CmdArgs, readinessCheck readyCheckFunc) (*Response,
|
||||
var body []byte
|
||||
body, err = DoCNI("http://dummy/cni", cniRequest, SocketPath(multusShimConfig.MultusSocketDir))
|
||||
if err != nil {
|
||||
var cniErr *cnitypes.Error
|
||||
if stderrors.As(err, &cniErr) {
|
||||
return nil, multusShimConfig.CNIVersion, err
|
||||
}
|
||||
return nil, multusShimConfig.CNIVersion, fmt.Errorf("%s: StdinData: %s", err.Error(), string(args.StdinData))
|
||||
}
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ type MultusConf struct {
|
||||
Type string `json:"type"`
|
||||
CniDir string `json:"cniDir,omitempty"`
|
||||
CniConfigDir string `json:"cniConfigDir,omitempty"`
|
||||
AuxiliaryCNIChainName string `json:"auxiliaryCNIChainName,omitempty"`
|
||||
DaemonSocketDir string `json:"daemonSocketDir,omitempty"`
|
||||
MultusConfigFile string `json:"multusConfigFile,omitempty"`
|
||||
MultusMasterCni string `json:"multusMasterCNI,omitempty"`
|
||||
|
||||
@@ -17,6 +17,7 @@ package server
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -95,6 +96,10 @@ func (s *Server) HandleCNIRequest(cmd string, k8sArgs *types.K8sArgs, cniCmdArgs
|
||||
err = s.cmdDel(cniCmdArgs, k8sArgs)
|
||||
case "CHECK":
|
||||
err = s.cmdCheck(cniCmdArgs, k8sArgs)
|
||||
case "GC":
|
||||
err = s.cmdGC(cniCmdArgs, k8sArgs)
|
||||
case "STATUS":
|
||||
err = s.cmdStatus(cniCmdArgs, k8sArgs)
|
||||
default:
|
||||
return []byte(""), fmt.Errorf("unknown cmd type: %s", cmd)
|
||||
}
|
||||
@@ -121,6 +126,8 @@ func (s *Server) HandleDelegateRequest(cmd string, k8sArgs *types.K8sArgs, cniCm
|
||||
err = s.cmdDelegateDel(cniCmdArgs, k8sArgs, multusConfig)
|
||||
case "CHECK":
|
||||
err = s.cmdDelegateCheck(cniCmdArgs, k8sArgs, multusConfig)
|
||||
case "STATUS":
|
||||
err = s.cmdDelegateStatus(cniCmdArgs, k8sArgs, multusConfig)
|
||||
default:
|
||||
return []byte(""), fmt.Errorf("unknown cmd type: %s", cmd)
|
||||
}
|
||||
@@ -298,7 +305,7 @@ func newCNIServer(rundir string, kubeClient *k8s.ClientInfo, exec invoke.Exec, s
|
||||
|
||||
result, err := s.handleCNIRequest(r)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("%v", err), http.StatusBadRequest)
|
||||
s.writeCNIErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -320,7 +327,7 @@ func newCNIServer(rundir string, kubeClient *k8s.ClientInfo, exec invoke.Exec, s
|
||||
|
||||
result, err := s.handleDelegateRequest(r)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("%v", err), http.StatusBadRequest)
|
||||
s.writeCNIErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -385,6 +392,34 @@ func (s *Server) Start(ctx context.Context, l net.Listener) {
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Server) writeCNIErrorResponse(w http.ResponseWriter, err error) {
|
||||
var cniErr *cnitypes.Error
|
||||
if errors.As(err, &cniErr) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
errBytes, marshalErr := json.Marshal(cniErr)
|
||||
if marshalErr != nil {
|
||||
http.Error(w, fmt.Sprintf("%v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if _, writeErr := w.Write(errBytes); writeErr != nil {
|
||||
_ = logging.Errorf("Error writing HTTP response: %v", writeErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("%v", err), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func (s *Server) wrapCNIRequestError(cmdArgs *skel.CmdArgs, err error) error {
|
||||
var cniErr *cnitypes.Error
|
||||
if errors.As(err, &cniErr) {
|
||||
_ = logging.Errorf("%s ERRORED: %v", printCmdArgs(cmdArgs), err)
|
||||
return err
|
||||
}
|
||||
// Prefix error with request information for easier debugging.
|
||||
return fmt.Errorf("%s ERRORED: %v", printCmdArgs(cmdArgs), err)
|
||||
}
|
||||
|
||||
func (s *Server) handleCNIRequest(r *http.Request) ([]byte, error) {
|
||||
var cr api.Request
|
||||
b, err := io.ReadAll(r.Body)
|
||||
@@ -406,8 +441,7 @@ func (s *Server) handleCNIRequest(r *http.Request) ([]byte, error) {
|
||||
|
||||
result, err := s.HandleCNIRequest(cmdType, k8sArgs, cniCmdArgs)
|
||||
if err != nil {
|
||||
// Prefix error with request information for easier debugging
|
||||
return nil, fmt.Errorf("%s ERRORED: %v", printCmdArgs(cniCmdArgs), err)
|
||||
return nil, s.wrapCNIRequestError(cniCmdArgs, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -433,8 +467,7 @@ func (s *Server) handleDelegateRequest(r *http.Request) ([]byte, error) {
|
||||
|
||||
result, err := s.HandleDelegateRequest(cmdType, k8sArgs, cniCmdArgs, cr.InterfaceAttributes)
|
||||
if err != nil {
|
||||
// Prefix error with request information for easier debugging
|
||||
return nil, fmt.Errorf("%s ERRORED: %v", printCmdArgs(cniCmdArgs), err)
|
||||
return nil, s.wrapCNIRequestError(cniCmdArgs, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -614,6 +647,28 @@ func (s *Server) cmdCheck(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs) error {
|
||||
return multus.CmdCheck(cmdArgs, s.exec, s.kubeclient)
|
||||
}
|
||||
|
||||
func (s *Server) cmdGC(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs) error {
|
||||
namespace := string(k8sArgs.K8S_POD_NAMESPACE)
|
||||
podName := string(k8sArgs.K8S_POD_NAME)
|
||||
if namespace == "" || podName == "" {
|
||||
return fmt.Errorf("required CNI variable missing. pod name: %s; pod namespace: %s", podName, namespace)
|
||||
}
|
||||
|
||||
logging.Debugf("CmdGC for [%s/%s]. CNI conf: %+v", namespace, podName, *cmdArgs)
|
||||
return multus.CmdGC(cmdArgs, s.exec, s.kubeclient)
|
||||
}
|
||||
|
||||
func (s *Server) cmdStatus(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs) error {
|
||||
namespace := string(k8sArgs.K8S_POD_NAMESPACE)
|
||||
podName := string(k8sArgs.K8S_POD_NAME)
|
||||
if namespace == "" || podName == "" {
|
||||
return fmt.Errorf("required CNI variable missing. pod name: %s; pod namespace: %s", podName, namespace)
|
||||
}
|
||||
|
||||
logging.Debugf("CmdStatus for [%s/%s]. CNI conf: %+v", namespace, podName, *cmdArgs)
|
||||
return multus.CmdStatus(cmdArgs, s.exec, s.kubeclient)
|
||||
}
|
||||
|
||||
func serializeResult(result cnitypes.Result) ([]byte, error) {
|
||||
// cni result is converted to latest here and decoded to specific cni version at multus-shim
|
||||
realResult, err := cni100.NewResultFromResult(result)
|
||||
@@ -679,6 +734,15 @@ func (s *Server) cmdDelegateCheck(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs,
|
||||
return multus.DelegateCheck(s.exec, delegateCNIConf, rt, multusConfig)
|
||||
}
|
||||
|
||||
func (s *Server) cmdDelegateStatus(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs, multusConfig *types.NetConf) error {
|
||||
delegateCNIConf, err := types.LoadDelegateNetConf(cmdArgs.StdinData, nil, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rt, _ := types.CreateCNIRuntimeConf(cmdArgs, k8sArgs, cmdArgs.IfName, nil, delegateCNIConf)
|
||||
return multus.DelegateStatus(s.exec, delegateCNIConf, rt, multusConfig)
|
||||
}
|
||||
|
||||
// note: this function may send back error to the client. In cni spec, command DEL should NOT send any error
|
||||
// because container deletion follows cni DEL command. But in delegateDel case, container is not removed by
|
||||
// this delegateDel, hence we decide to send error message to the request sender.
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
cni100 "github.com/containernetworking/cni/pkg/types/100"
|
||||
"github.com/containernetworking/cni/pkg/version"
|
||||
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
|
||||
@@ -62,6 +63,112 @@ func LoadDelegateNetConfList(bytes []byte, delegateConf *DelegateNetConf) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertNetworkConfigListToNetConfList converts a libcni.NetworkConfigList to a NetConfList
|
||||
func ConvertNetworkConfigListToNetConfList(ncList *libcni.NetworkConfigList) (*types.NetConfList, error) {
|
||||
// Convert Plugins from []*libcni.PluginConfig to []*types.PluginConf
|
||||
var plugins []*types.PluginConf
|
||||
for _, plugin := range ncList.Plugins {
|
||||
plugins = append(plugins, plugin.Network)
|
||||
}
|
||||
|
||||
// Create NetConfList
|
||||
netConfList := &types.NetConfList{
|
||||
CNIVersion: ncList.CNIVersion,
|
||||
Name: ncList.Name,
|
||||
DisableCheck: ncList.DisableCheck,
|
||||
DisableGC: ncList.DisableGC,
|
||||
Plugins: plugins,
|
||||
}
|
||||
|
||||
return netConfList, nil
|
||||
}
|
||||
|
||||
// LoadDelegateNetConfFromConfList converts a libcni.NetworkConfigList into a DelegateNetConf structure
|
||||
func LoadDelegateNetConfFromConfList(confList *libcni.NetworkConfigList, netElement *NetworkSelectionElement, deviceID string, resourceName string) (*DelegateNetConf, error) {
|
||||
var err error
|
||||
logging.Debugf("LoadDelegateNetConfFromConfList: %v, %v, %s", confList, netElement, deviceID)
|
||||
|
||||
// Convert libcni.NetworkConfigList to NetConfList
|
||||
netConfList, err := ConvertNetworkConfigListToNetConfList(confList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delegateConf := &DelegateNetConf{
|
||||
Name: netConfList.Name,
|
||||
ConfList: *netConfList,
|
||||
CNINetworkConfigList: *confList,
|
||||
ConfListPlugin: true,
|
||||
}
|
||||
|
||||
// Convert the plugins back to bytes for consistency
|
||||
pluginsBytes, err := json.Marshal(netConfList)
|
||||
if err != nil {
|
||||
return nil, logging.Errorf("LoadDelegateNetConfFromConfList: error marshaling netConfList: %v", err)
|
||||
}
|
||||
delegateConf.Bytes = pluginsBytes
|
||||
|
||||
if deviceID != "" {
|
||||
pluginsBytes, err = addDeviceIDInConfList(pluginsBytes, deviceID)
|
||||
if err != nil {
|
||||
return nil, logging.Errorf("LoadDelegateNetConfFromConfList: failed to add deviceID in NetConfList bytes: %v", err)
|
||||
}
|
||||
delegateConf.ResourceName = resourceName
|
||||
delegateConf.DeviceID = deviceID
|
||||
}
|
||||
|
||||
if netElement != nil && netElement.CNIArgs != nil {
|
||||
pluginsBytes, err = addCNIArgsInConfList(pluginsBytes, netElement.CNIArgs)
|
||||
if err != nil {
|
||||
return nil, logging.Errorf("LoadDelegateNetConfFromConfList: failed to add cni-args in NetConfList bytes: %v", err)
|
||||
}
|
||||
delegateConf.Bytes = pluginsBytes
|
||||
}
|
||||
|
||||
if netElement != nil {
|
||||
if netElement.Name != "" {
|
||||
// Overwrite CNI config name with net-attach-def name
|
||||
delegateConf.Name = fmt.Sprintf("%s/%s", netElement.Namespace, netElement.Name)
|
||||
}
|
||||
if netElement.InterfaceRequest != "" {
|
||||
delegateConf.IfnameRequest = netElement.InterfaceRequest
|
||||
}
|
||||
if netElement.MacRequest != "" {
|
||||
delegateConf.MacRequest = netElement.MacRequest
|
||||
}
|
||||
if netElement.IPRequest != nil {
|
||||
delegateConf.IPRequest = netElement.IPRequest
|
||||
}
|
||||
if netElement.BandwidthRequest != nil {
|
||||
delegateConf.BandwidthRequest = netElement.BandwidthRequest
|
||||
}
|
||||
if netElement.PortMappingsRequest != nil {
|
||||
delegateConf.PortMappingsRequest = netElement.PortMappingsRequest
|
||||
}
|
||||
if netElement.GatewayRequest != nil {
|
||||
var list []net.IP
|
||||
if delegateConf.GatewayRequest != nil {
|
||||
list = append(*delegateConf.GatewayRequest, *netElement.GatewayRequest...)
|
||||
} else {
|
||||
list = *netElement.GatewayRequest
|
||||
}
|
||||
delegateConf.GatewayRequest = &list
|
||||
}
|
||||
if netElement.InfinibandGUIDRequest != "" {
|
||||
delegateConf.InfinibandGUIDRequest = netElement.InfinibandGUIDRequest
|
||||
}
|
||||
if netElement.DeviceID != "" {
|
||||
if deviceID != "" {
|
||||
logging.Debugf("Warning: Both RuntimeConfig and ResourceMap provide deviceID. Ignoring RuntimeConfig")
|
||||
} else {
|
||||
delegateConf.DeviceID = netElement.DeviceID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return delegateConf, nil
|
||||
}
|
||||
|
||||
// LoadDelegateNetConf converts raw CNI JSON into a DelegateNetConf structure
|
||||
func LoadDelegateNetConf(bytes []byte, netElement *NetworkSelectionElement, deviceID string, resourceName string) (*DelegateNetConf, error) {
|
||||
var err error
|
||||
|
||||
@@ -18,10 +18,10 @@ package types
|
||||
import (
|
||||
"net"
|
||||
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
cni100 "github.com/containernetworking/cni/pkg/types/100"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -57,6 +57,7 @@ type NetConf struct {
|
||||
NamespaceIsolation bool `json:"namespaceIsolation"`
|
||||
RawNonIsolatedNamespaces string `json:"globalNamespaces"`
|
||||
NonIsolatedNamespaces []string `json:"-"`
|
||||
AuxiliaryCNIChainName string `json:"auxiliaryCNIChainName,omitempty"`
|
||||
|
||||
// Option to set system namespaces (to avoid to add defaultNetworks)
|
||||
SystemNamespaces []string `json:"systemNamespaces"`
|
||||
@@ -99,6 +100,7 @@ type BandwidthEntry struct {
|
||||
type DelegateNetConf struct {
|
||||
Conf types.NetConf
|
||||
ConfList types.NetConfList
|
||||
CNINetworkConfigList libcni.NetworkConfigList
|
||||
Name string
|
||||
IfnameRequest string `json:"ifnameRequest,omitempty"`
|
||||
MacRequest string `json:"macRequest,omitempty"`
|
||||
|
||||
56
vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
generated
vendored
56
vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
generated
vendored
@@ -1,5 +1,59 @@
|
||||
# Changelog
|
||||
|
||||
## 3.4.0 (2025-06-27)
|
||||
|
||||
### Added
|
||||
|
||||
- #268: Added property to Constraints to include prereleases for Check and Validate
|
||||
|
||||
### Changed
|
||||
|
||||
- #263: Updated Go testing for 1.24, 1.23, and 1.22
|
||||
- #269: Updated the error message handling for message case and wrapping errors
|
||||
- #266: Restore the ability to have leading 0's when parsing with NewVersion.
|
||||
Opt-out of this by setting CoerceNewVersion to false.
|
||||
|
||||
### Fixed
|
||||
|
||||
- #257: Fixed the CodeQL link (thanks @dmitris)
|
||||
- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out
|
||||
of this by setting DetailedNewVersionErrors to false for faster performance.
|
||||
- #267: Handle pre-releases for an "and" group if one constraint includes them
|
||||
|
||||
## 3.3.1 (2024-11-19)
|
||||
|
||||
### Fixed
|
||||
|
||||
- #253: Fix for allowing some version that were invalid
|
||||
|
||||
## 3.3.0 (2024-08-27)
|
||||
|
||||
### Added
|
||||
|
||||
- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
|
||||
- #213: nil version equality checking (thanks @KnutZuidema)
|
||||
|
||||
### Changed
|
||||
|
||||
- #241: Simplify StrictNewVersion parsing (thanks @grosser)
|
||||
- Testing support up through Go 1.23
|
||||
- Minimum version set to 1.21 as this is what's tested now
|
||||
- Fuzz testing now supports caching
|
||||
|
||||
## 3.2.1 (2023-04-10)
|
||||
|
||||
### Changed
|
||||
|
||||
- #198: Improved testing around pre-release names
|
||||
- #200: Improved code scanning with addition of CodeQL
|
||||
- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
|
||||
- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
|
||||
- #203: Docs updated for security details
|
||||
|
||||
### Fixed
|
||||
|
||||
- #199: Fixed issue with range transformations
|
||||
|
||||
## 3.2.0 (2022-11-28)
|
||||
|
||||
### Added
|
||||
@@ -109,7 +163,7 @@ functions. These are described in the added and changed sections below.
|
||||
- #78: Fix unchecked error in example code (thanks @ravron)
|
||||
- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
|
||||
- #97: Fixed copyright file for proper display on GitHub
|
||||
- #107: Fix handling prerelease when sorting alphanum and num
|
||||
- #107: Fix handling prerelease when sorting alphanum and num
|
||||
- #109: Fixed where Validate sometimes returns wrong message on error
|
||||
|
||||
## 1.4.2 (2018-04-10)
|
||||
|
||||
3
vendor/github.com/Masterminds/semver/v3/Makefile
generated
vendored
3
vendor/github.com/Masterminds/semver/v3/Makefile
generated
vendored
@@ -19,6 +19,7 @@ test-cover:
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
@echo "==> Running Fuzz Tests"
|
||||
go env GOCACHE
|
||||
go test -fuzz=FuzzNewVersion -fuzztime=15s .
|
||||
go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
|
||||
go test -fuzz=FuzzNewConstraint -fuzztime=15s .
|
||||
@@ -27,4 +28,4 @@ $(GOLANGCI_LINT):
|
||||
# Install golangci-lint. The configuration for it is in the .golangci.yml
|
||||
# file in the root of the repository
|
||||
echo ${GOPATH}
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
|
||||
|
||||
46
vendor/github.com/Masterminds/semver/v3/README.md
generated
vendored
46
vendor/github.com/Masterminds/semver/v3/README.md
generated
vendored
@@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
|
||||
[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
|
||||
[](https://goreportcard.com/report/github.com/Masterminds/semver)
|
||||
|
||||
If you are looking for a command line tool for version comparisons please see
|
||||
[vert](https://github.com/Masterminds/vert) which uses this library.
|
||||
|
||||
## Package Versions
|
||||
|
||||
Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
|
||||
Note, import `github.com/Masterminds/semver/v3` to use the latest version.
|
||||
|
||||
There are three major versions fo the `semver` package.
|
||||
|
||||
@@ -53,6 +50,18 @@ other versions, convert the version back into a string, and get the original
|
||||
string. Getting the original string is useful if the semantic version was coerced
|
||||
into a valid form.
|
||||
|
||||
There are package level variables that affect how `NewVersion` handles parsing.
|
||||
|
||||
- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant
|
||||
versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch
|
||||
part. This enables the use of CalVer in versions even when not compliant with SemVer.
|
||||
When set to `false` less coercion work is done.
|
||||
- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when
|
||||
`CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true`
|
||||
it can provide some more insight into why a version is invalid. Setting
|
||||
`DetailedNewVersionErrors` to `false` is faster on performance but provides less
|
||||
detailed error messages if a version fails to parse.
|
||||
|
||||
## Sorting Semantic Versions
|
||||
|
||||
A set of versions can be sorted using the `sort` package from the standard library.
|
||||
@@ -80,12 +89,12 @@ There are two methods for comparing versions. One uses comparison methods on
|
||||
differences to notes between these two methods of comparison.
|
||||
|
||||
1. When two versions are compared using functions such as `Compare`, `LessThan`,
|
||||
and others it will follow the specification and always include prereleases
|
||||
and others it will follow the specification and always include pre-releases
|
||||
within the comparison. It will provide an answer that is valid with the
|
||||
comparison section of the spec at https://semver.org/#spec-item-11
|
||||
2. When constraint checking is used for checks or validation it will follow a
|
||||
different set of rules that are common for ranges with tools like npm/js
|
||||
and Rust/Cargo. This includes considering prereleases to be invalid if the
|
||||
and Rust/Cargo. This includes considering pre-releases to be invalid if the
|
||||
ranges does not include one. If you want to have it include pre-releases a
|
||||
simple solution is to include `-0` in your range.
|
||||
3. Constraint ranges can have some complex rules including the shorthand use of
|
||||
@@ -113,7 +122,7 @@ v, err := semver.NewVersion("1.3")
|
||||
if err != nil {
|
||||
// Handle version not being parsable.
|
||||
}
|
||||
// Check if the version meets the constraints. The a variable will be true.
|
||||
// Check if the version meets the constraints. The variable a will be true.
|
||||
a := c.Check(v)
|
||||
```
|
||||
|
||||
@@ -137,20 +146,20 @@ The basic comparisons are:
|
||||
### Working With Prerelease Versions
|
||||
|
||||
Pre-releases, for those not familiar with them, are used for software releases
|
||||
prior to stable or generally available releases. Examples of prereleases include
|
||||
development, alpha, beta, and release candidate releases. A prerelease may be
|
||||
prior to stable or generally available releases. Examples of pre-releases include
|
||||
development, alpha, beta, and release candidate releases. A pre-release may be
|
||||
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
|
||||
order of precedence, prereleases come before their associated releases. In this
|
||||
order of precedence, pre-releases come before their associated releases. In this
|
||||
example `1.2.3-beta.1 < 1.2.3`.
|
||||
|
||||
According to the Semantic Version specification prereleases may not be
|
||||
According to the Semantic Version specification, pre-releases may not be
|
||||
API compliant with their release counterpart. It says,
|
||||
|
||||
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
|
||||
|
||||
SemVer comparisons using constraints without a prerelease comparator will skip
|
||||
prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
|
||||
at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
|
||||
SemVer's comparisons using constraints without a pre-release comparator will skip
|
||||
pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
|
||||
at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
|
||||
|
||||
The reason for the `0` as a pre-release version in the example comparison is
|
||||
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
|
||||
@@ -163,6 +172,10 @@ means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
|
||||
sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
|
||||
the spec specifies.
|
||||
|
||||
The `Constraints` instance returned from `semver.NewConstraint()` has a property
|
||||
`IncludePrerelease` that, when set to true, will return prerelease versions when calls
|
||||
to `Check()` and `Validate()` are made.
|
||||
|
||||
### Hyphen Range Comparisons
|
||||
|
||||
There are multiple methods to handle ranges and the first is hyphens ranges.
|
||||
@@ -171,6 +184,9 @@ These look like:
|
||||
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
|
||||
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
|
||||
|
||||
Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
|
||||
parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
|
||||
|
||||
### Wildcards In Comparisons
|
||||
|
||||
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
|
||||
@@ -250,7 +266,7 @@ or [create a pull request](https://github.com/Masterminds/semver/pulls).
|
||||
Security is an important consideration for this project. The project currently
|
||||
uses the following tools to help discover security issues:
|
||||
|
||||
* [CodeQL](https://github.com/Masterminds/semver)
|
||||
* [CodeQL](https://codeql.github.com)
|
||||
* [gosec](https://github.com/securego/gosec)
|
||||
* Daily Fuzz testing
|
||||
|
||||
|
||||
127
vendor/github.com/Masterminds/semver/v3/constraints.go
generated
vendored
127
vendor/github.com/Masterminds/semver/v3/constraints.go
generated
vendored
@@ -12,6 +12,13 @@ import (
|
||||
// checked against.
|
||||
type Constraints struct {
|
||||
constraints [][]*constraint
|
||||
containsPre []bool
|
||||
|
||||
// IncludePrerelease specifies if pre-releases should be included in
|
||||
// the results. Note, if a constraint range has a prerelease than
|
||||
// prereleases will be included for that AND group even if this is
|
||||
// set to false.
|
||||
IncludePrerelease bool
|
||||
}
|
||||
|
||||
// NewConstraint returns a Constraints instance that a Version instance can
|
||||
@@ -22,11 +29,10 @@ func NewConstraint(c string) (*Constraints, error) {
|
||||
c = rewriteRange(c)
|
||||
|
||||
ors := strings.Split(c, "||")
|
||||
or := make([][]*constraint, len(ors))
|
||||
lenors := len(ors)
|
||||
or := make([][]*constraint, lenors)
|
||||
hasPre := make([]bool, lenors)
|
||||
for k, v := range ors {
|
||||
|
||||
// TODO: Find a way to validate and fetch all the constraints in a simpler form
|
||||
|
||||
// Validate the segment
|
||||
if !validConstraintRegex.MatchString(v) {
|
||||
return nil, fmt.Errorf("improper constraint: %s", v)
|
||||
@@ -43,12 +49,22 @@ func NewConstraint(c string) (*Constraints, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If one of the constraints has a prerelease record this.
|
||||
// This information is used when checking all in an "and"
|
||||
// group to ensure they all check for prereleases.
|
||||
if pc.con.pre != "" {
|
||||
hasPre[k] = true
|
||||
}
|
||||
|
||||
result[i] = pc
|
||||
}
|
||||
or[k] = result
|
||||
}
|
||||
|
||||
o := &Constraints{constraints: or}
|
||||
o := &Constraints{
|
||||
constraints: or,
|
||||
containsPre: hasPre,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
@@ -57,10 +73,10 @@ func (cs Constraints) Check(v *Version) bool {
|
||||
// TODO(mattfarina): For v4 of this library consolidate the Check and Validate
|
||||
// functions as the underlying functions make that possible now.
|
||||
// loop over the ORs and check the inner ANDs
|
||||
for _, o := range cs.constraints {
|
||||
for i, o := range cs.constraints {
|
||||
joy := true
|
||||
for _, c := range o {
|
||||
if check, _ := c.check(v); !check {
|
||||
if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check {
|
||||
joy = false
|
||||
break
|
||||
}
|
||||
@@ -83,12 +99,12 @@ func (cs Constraints) Validate(v *Version) (bool, []error) {
|
||||
// Capture the prerelease message only once. When it happens the first time
|
||||
// this var is marked
|
||||
var prerelesase bool
|
||||
for _, o := range cs.constraints {
|
||||
for i, o := range cs.constraints {
|
||||
joy := true
|
||||
for _, c := range o {
|
||||
// Before running the check handle the case there the version is
|
||||
// a prerelease and the check is not searching for prereleases.
|
||||
if c.con.pre == "" && v.pre != "" {
|
||||
if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" {
|
||||
if !prerelesase {
|
||||
em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
e = append(e, em)
|
||||
@@ -98,7 +114,7 @@ func (cs Constraints) Validate(v *Version) (bool, []error) {
|
||||
|
||||
} else {
|
||||
|
||||
if _, err := c.check(v); err != nil {
|
||||
if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil {
|
||||
e = append(e, err)
|
||||
joy = false
|
||||
}
|
||||
@@ -227,8 +243,8 @@ type constraint struct {
|
||||
}
|
||||
|
||||
// Check if a version meets the constraint
|
||||
func (c *constraint) check(v *Version) (bool, error) {
|
||||
return constraintOps[c.origfunc](v, c)
|
||||
func (c *constraint) check(v *Version, includePre bool) (bool, error) {
|
||||
return constraintOps[c.origfunc](v, c, includePre)
|
||||
}
|
||||
|
||||
// String prints an individual constraint into a string
|
||||
@@ -236,7 +252,7 @@ func (c *constraint) string() string {
|
||||
return c.origfunc + c.orig
|
||||
}
|
||||
|
||||
type cfunc func(v *Version, c *constraint) (bool, error)
|
||||
type cfunc func(v *Version, c *constraint, includePre bool) (bool, error)
|
||||
|
||||
func parseConstraint(c string) (*constraint, error) {
|
||||
if len(c) > 0 {
|
||||
@@ -272,7 +288,7 @@ func parseConstraint(c string) (*constraint, error) {
|
||||
|
||||
// The constraintRegex should catch any regex parsing errors. So,
|
||||
// we should never get here.
|
||||
return nil, errors.New("constraint Parser Error")
|
||||
return nil, errors.New("constraint parser error")
|
||||
}
|
||||
|
||||
cs.con = con
|
||||
@@ -290,7 +306,7 @@ func parseConstraint(c string) (*constraint, error) {
|
||||
|
||||
// The constraintRegex should catch any regex parsing errors. So,
|
||||
// we should never get here.
|
||||
return nil, errors.New("constraint Parser Error")
|
||||
return nil, errors.New("constraint parser error")
|
||||
}
|
||||
|
||||
cs := &constraint{
|
||||
@@ -305,16 +321,14 @@ func parseConstraint(c string) (*constraint, error) {
|
||||
}
|
||||
|
||||
// Constraint functions
|
||||
func constraintNotEqual(v *Version, c *constraint) (bool, error) {
|
||||
func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
if c.dirty {
|
||||
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
if c.con.Major() != v.Major() {
|
||||
return true, nil
|
||||
}
|
||||
@@ -345,12 +359,11 @@ func constraintNotEqual(v *Version, c *constraint) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func constraintGreaterThan(v *Version, c *constraint) (bool, error) {
|
||||
func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
@@ -391,11 +404,10 @@ func constraintGreaterThan(v *Version, c *constraint) (bool, error) {
|
||||
return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
|
||||
}
|
||||
|
||||
func constraintLessThan(v *Version, c *constraint) (bool, error) {
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
@@ -406,12 +418,11 @@ func constraintLessThan(v *Version, c *constraint) (bool, error) {
|
||||
return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
|
||||
}
|
||||
|
||||
func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) {
|
||||
func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
@@ -422,11 +433,10 @@ func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) {
|
||||
return false, fmt.Errorf("%s is less than %s", v, c.orig)
|
||||
}
|
||||
|
||||
func constraintLessThanEqual(v *Version, c *constraint) (bool, error) {
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
@@ -455,11 +465,10 @@ func constraintLessThanEqual(v *Version, c *constraint) (bool, error) {
|
||||
// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
|
||||
// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
|
||||
// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
|
||||
func constraintTilde(v *Version, c *constraint) (bool, error) {
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
@@ -487,16 +496,15 @@ func constraintTilde(v *Version, c *constraint) (bool, error) {
|
||||
|
||||
// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
|
||||
// it's a straight =
|
||||
func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) {
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
if c.dirty {
|
||||
return constraintTilde(v, c)
|
||||
return constraintTilde(v, c, includePre)
|
||||
}
|
||||
|
||||
eq := v.Equal(c.con)
|
||||
@@ -516,11 +524,10 @@ func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) {
|
||||
// ^0.0.3 --> >=0.0.3 <0.0.4
|
||||
// ^0.0 --> >=0.0.0 <0.1.0
|
||||
// ^0 --> >=0.0.0 <1.0.0
|
||||
func constraintCaret(v *Version, c *constraint) (bool, error) {
|
||||
// If there is a pre-release on the version but the constraint isn't looking
|
||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
||||
// more details.
|
||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
||||
func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) {
|
||||
// The existence of prereleases is checked at the group level and passed in.
|
||||
// Exit early if the version has a prerelease but those are to be ignored.
|
||||
if v.Prerelease() != "" && !includePre {
|
||||
return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
||||
}
|
||||
|
||||
|
||||
221
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
221
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
@@ -14,32 +14,52 @@ import (
|
||||
// The compiled version of the regex created at init() is cached here so it
|
||||
// only needs to be created once.
|
||||
var versionRegex *regexp.Regexp
|
||||
var looseVersionRegex *regexp.Regexp
|
||||
|
||||
// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are
|
||||
// not allowed in a valid semantic version. When set to true, NewVersion will coerce
|
||||
// leading 0's into a valid version.
|
||||
var CoerceNewVersion = true
|
||||
|
||||
// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion
|
||||
// function. This is used when CoerceNewVersion is set to false. If set to false
|
||||
// ErrInvalidSemVer is returned for an invalid version. This does not apply to
|
||||
// StrictNewVersion. Setting this function to false returns errors more quickly.
|
||||
var DetailedNewVersionErrors = true
|
||||
|
||||
var (
|
||||
// ErrInvalidSemVer is returned a version is found to be invalid when
|
||||
// being parsed.
|
||||
ErrInvalidSemVer = errors.New("Invalid Semantic Version")
|
||||
ErrInvalidSemVer = errors.New("invalid semantic version")
|
||||
|
||||
// ErrEmptyString is returned when an empty string is passed in for parsing.
|
||||
ErrEmptyString = errors.New("Version string empty")
|
||||
ErrEmptyString = errors.New("version string empty")
|
||||
|
||||
// ErrInvalidCharacters is returned when invalid characters are found as
|
||||
// part of a version
|
||||
ErrInvalidCharacters = errors.New("Invalid characters in version")
|
||||
ErrInvalidCharacters = errors.New("invalid characters in version")
|
||||
|
||||
// ErrSegmentStartsZero is returned when a version segment starts with 0.
|
||||
// This is invalid in SemVer.
|
||||
ErrSegmentStartsZero = errors.New("Version segment starts with 0")
|
||||
ErrSegmentStartsZero = errors.New("version segment starts with 0")
|
||||
|
||||
// ErrInvalidMetadata is returned when the metadata is an invalid format
|
||||
ErrInvalidMetadata = errors.New("Invalid Metadata string")
|
||||
ErrInvalidMetadata = errors.New("invalid metadata string")
|
||||
|
||||
// ErrInvalidPrerelease is returned when the pre-release is an invalid format
|
||||
ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
|
||||
ErrInvalidPrerelease = errors.New("invalid prerelease string")
|
||||
)
|
||||
|
||||
// semVerRegex is the regular expression used to parse a semantic version.
|
||||
const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
||||
// This is not the official regex from the semver spec. It has been modified to allow for loose handling
|
||||
// where versions like 2.1 are detected.
|
||||
const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
|
||||
`(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
|
||||
`(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
|
||||
|
||||
// looseSemVerRegex is a regular expression that lets invalid semver expressions through
|
||||
// with enough detail that certain errors can be checked for.
|
||||
const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
||||
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
|
||||
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
|
||||
|
||||
@@ -53,6 +73,7 @@ type Version struct {
|
||||
|
||||
func init() {
|
||||
versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
|
||||
looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$")
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -83,22 +104,23 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
original: v,
|
||||
}
|
||||
|
||||
// check for prerelease or build metadata
|
||||
var extra []string
|
||||
if strings.ContainsAny(parts[2], "-+") {
|
||||
// Start with the build metadata first as it needs to be on the right
|
||||
extra = strings.SplitN(parts[2], "+", 2)
|
||||
if len(extra) > 1 {
|
||||
// build metadata found
|
||||
sv.metadata = extra[1]
|
||||
parts[2] = extra[0]
|
||||
// Extract build metadata
|
||||
if strings.Contains(parts[2], "+") {
|
||||
extra := strings.SplitN(parts[2], "+", 2)
|
||||
sv.metadata = extra[1]
|
||||
parts[2] = extra[0]
|
||||
if err := validateMetadata(sv.metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
extra = strings.SplitN(parts[2], "-", 2)
|
||||
if len(extra) > 1 {
|
||||
// prerelease found
|
||||
sv.pre = extra[1]
|
||||
parts[2] = extra[0]
|
||||
// Extract build prerelease
|
||||
if strings.Contains(parts[2], "-") {
|
||||
extra := strings.SplitN(parts[2], "-", 2)
|
||||
sv.pre = extra[1]
|
||||
parts[2] = extra[0]
|
||||
if err := validatePrerelease(sv.pre); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +136,7 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the major, minor, and patch elements onto the returned Version
|
||||
// Extract major, minor, and patch
|
||||
var err error
|
||||
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
@@ -131,10 +153,70 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No prerelease or build metadata found so returning now as a fastpath.
|
||||
if sv.pre == "" && sv.metadata == "" {
|
||||
return sv, nil
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
// NewVersion parses a given version and returns an instance of Version or
|
||||
// an error if unable to parse the version. If the version is SemVer-ish it
|
||||
// attempts to convert it to SemVer. If you want to validate it was a strict
|
||||
// semantic version at parse time see StrictNewVersion().
|
||||
func NewVersion(v string) (*Version, error) {
|
||||
if CoerceNewVersion {
|
||||
return coerceNewVersion(v)
|
||||
}
|
||||
m := versionRegex.FindStringSubmatch(v)
|
||||
if m == nil {
|
||||
|
||||
// Disabling detailed errors is first so that it is in the fast path.
|
||||
if !DetailedNewVersionErrors {
|
||||
return nil, ErrInvalidSemVer
|
||||
}
|
||||
|
||||
// Check for specific errors with the semver string and return a more detailed
|
||||
// error.
|
||||
m = looseVersionRegex.FindStringSubmatch(v)
|
||||
if m == nil {
|
||||
return nil, ErrInvalidSemVer
|
||||
}
|
||||
err := validateVersion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrInvalidSemVer
|
||||
}
|
||||
|
||||
sv := &Version{
|
||||
metadata: m[5],
|
||||
pre: m[4],
|
||||
original: v,
|
||||
}
|
||||
|
||||
var err error
|
||||
sv.major, err = strconv.ParseUint(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
|
||||
if m[2] != "" {
|
||||
sv.minor, err = strconv.ParseUint(m[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
} else {
|
||||
sv.minor = 0
|
||||
}
|
||||
|
||||
if m[3] != "" {
|
||||
sv.patch, err = strconv.ParseUint(m[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
} else {
|
||||
sv.patch = 0
|
||||
}
|
||||
|
||||
// Perform some basic due diligence on the extra parts to ensure they are
|
||||
// valid.
|
||||
|
||||
if sv.pre != "" {
|
||||
if err = validatePrerelease(sv.pre); err != nil {
|
||||
@@ -151,12 +233,8 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
// NewVersion parses a given version and returns an instance of Version or
|
||||
// an error if unable to parse the version. If the version is SemVer-ish it
|
||||
// attempts to convert it to SemVer. If you want to validate it was a strict
|
||||
// semantic version at parse time see StrictNewVersion().
|
||||
func NewVersion(v string) (*Version, error) {
|
||||
m := versionRegex.FindStringSubmatch(v)
|
||||
func coerceNewVersion(v string) (*Version, error) {
|
||||
m := looseVersionRegex.FindStringSubmatch(v)
|
||||
if m == nil {
|
||||
return nil, ErrInvalidSemVer
|
||||
}
|
||||
@@ -170,13 +248,13 @@ func NewVersion(v string) (*Version, error) {
|
||||
var err error
|
||||
sv.major, err = strconv.ParseUint(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
return nil, fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
|
||||
if m[2] != "" {
|
||||
sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
return nil, fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
} else {
|
||||
sv.minor = 0
|
||||
@@ -185,7 +263,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
if m[3] != "" {
|
||||
sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
return nil, fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
} else {
|
||||
sv.patch = 0
|
||||
@@ -381,15 +459,31 @@ func (v *Version) LessThan(o *Version) bool {
|
||||
return v.Compare(o) < 0
|
||||
}
|
||||
|
||||
// LessThanEqual tests if one version is less or equal than another one.
|
||||
func (v *Version) LessThanEqual(o *Version) bool {
|
||||
return v.Compare(o) <= 0
|
||||
}
|
||||
|
||||
// GreaterThan tests if one version is greater than another one.
|
||||
func (v *Version) GreaterThan(o *Version) bool {
|
||||
return v.Compare(o) > 0
|
||||
}
|
||||
|
||||
// GreaterThanEqual tests if one version is greater or equal than another one.
|
||||
func (v *Version) GreaterThanEqual(o *Version) bool {
|
||||
return v.Compare(o) >= 0
|
||||
}
|
||||
|
||||
// Equal tests if two versions are equal to each other.
|
||||
// Note, versions can be equal with different metadata since metadata
|
||||
// is not considered part of the comparable version.
|
||||
func (v *Version) Equal(o *Version) bool {
|
||||
if v == o {
|
||||
return true
|
||||
}
|
||||
if v == nil || o == nil {
|
||||
return false
|
||||
}
|
||||
return v.Compare(o) == 0
|
||||
}
|
||||
|
||||
@@ -612,7 +706,9 @@ func containsOnly(s string, comp string) bool {
|
||||
func validatePrerelease(p string) error {
|
||||
eparts := strings.Split(p, ".")
|
||||
for _, p := range eparts {
|
||||
if containsOnly(p, num) {
|
||||
if p == "" {
|
||||
return ErrInvalidPrerelease
|
||||
} else if containsOnly(p, num) {
|
||||
if len(p) > 1 && p[0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
@@ -631,9 +727,62 @@ func validatePrerelease(p string) error {
|
||||
func validateMetadata(m string) error {
|
||||
eparts := strings.Split(m, ".")
|
||||
for _, p := range eparts {
|
||||
if !containsOnly(p, allowed) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if !containsOnly(p, allowed) {
|
||||
return ErrInvalidMetadata
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateVersion checks for common validation issues but may not catch all errors
|
||||
func validateVersion(m []string) error {
|
||||
var err error
|
||||
var v string
|
||||
if m[1] != "" {
|
||||
if len(m[1]) > 1 && m[1][0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
_, err = strconv.ParseUint(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m[2] != "" {
|
||||
v = strings.TrimPrefix(m[2], ".")
|
||||
if len(v) > 1 && v[0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
_, err = strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m[3] != "" {
|
||||
v = strings.TrimPrefix(m[3], ".")
|
||||
if len(v) > 1 && v[0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
_, err = strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing version segment: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m[5] != "" {
|
||||
if err = validatePrerelease(m[5]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if m[8] != "" {
|
||||
if err = validateMetadata(m[8]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
- [Ristretto](https://github.com/dgraph-io/ristretto)
|
||||
- [Badger](https://github.com/dgraph-io/badger)
|
||||
|
||||
29
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
29
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -19,10 +19,13 @@ const (
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
// contiguous array for the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
//
|
||||
// Note that a zero-valued Digest is not ready to receive writes.
|
||||
// Call Reset or create a Digest using New before calling other methods.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
@@ -33,19 +36,31 @@ type Digest struct {
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
// New creates a new Digest with a zero seed.
|
||||
func New() *Digest {
|
||||
return NewWithSeed(0)
|
||||
}
|
||||
|
||||
// NewWithSeed creates a new Digest with the given seed.
|
||||
func NewWithSeed(seed uint64) *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
d.ResetWithSeed(seed)
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
// It uses a seed value of zero.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -primes[0]
|
||||
d.ResetWithSeed(0)
|
||||
}
|
||||
|
||||
// ResetWithSeed clears the Digest's state so that it can be reused.
|
||||
// It uses the given seed to initialize the state.
|
||||
func (d *Digest) ResetWithSeed(seed uint64) {
|
||||
d.v1 = seed + prime1 + prime2
|
||||
d.v2 = seed + prime2
|
||||
d.v3 = seed
|
||||
d.v4 = seed - prime1
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -3,7 +3,7 @@
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -5,7 +5,7 @@
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -33,7 +33,7 @@ import (
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
|
||||
97
vendor/github.com/containernetworking/cni/libcni/api.go
generated
vendored
97
vendor/github.com/containernetworking/cni/libcni/api.go
generated
vendored
@@ -23,6 +23,7 @@ package libcni
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -66,17 +67,23 @@ type RuntimeConf struct {
|
||||
CacheDir string
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
Network *types.NetConf
|
||||
// Use PluginConfig instead of NetworkConfig, the NetworkConfig
|
||||
// backwards-compat alias will be removed in a future release.
|
||||
type NetworkConfig = PluginConfig
|
||||
|
||||
type PluginConfig struct {
|
||||
Network *types.PluginConf
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
type NetworkConfigList struct {
|
||||
Name string
|
||||
CNIVersion string
|
||||
DisableCheck bool
|
||||
Plugins []*NetworkConfig
|
||||
Bytes []byte
|
||||
Name string
|
||||
CNIVersion string
|
||||
DisableCheck bool
|
||||
DisableGC bool
|
||||
LoadOnlyInlinedPlugins bool
|
||||
Plugins []*PluginConfig
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
type NetworkAttachment struct {
|
||||
@@ -100,19 +107,21 @@ type CNI interface {
|
||||
GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
|
||||
GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
|
||||
|
||||
AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
|
||||
CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
|
||||
DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
|
||||
GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
|
||||
GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
|
||||
AddNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) (types.Result, error)
|
||||
CheckNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error
|
||||
DelNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error
|
||||
GetNetworkCachedResult(net *PluginConfig, rt *RuntimeConf) (types.Result, error)
|
||||
GetNetworkCachedConfig(net *PluginConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
|
||||
|
||||
ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
|
||||
ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
|
||||
ValidateNetwork(ctx context.Context, net *PluginConfig) ([]string, error)
|
||||
|
||||
GCNetworkList(ctx context.Context, net *NetworkConfigList, args *GCArgs) error
|
||||
GetStatusNetworkList(ctx context.Context, net *NetworkConfigList) error
|
||||
|
||||
GetCachedAttachments(containerID string) ([]*NetworkAttachment, error)
|
||||
|
||||
GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error)
|
||||
}
|
||||
|
||||
type CNIConfig struct {
|
||||
@@ -143,7 +152,7 @@ func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec)
|
||||
}
|
||||
}
|
||||
|
||||
func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) {
|
||||
func buildOneConfig(name, cniVersion string, orig *PluginConfig, prevResult types.Result, rt *RuntimeConf) (*PluginConfig, error) {
|
||||
var err error
|
||||
|
||||
inject := map[string]interface{}{
|
||||
@@ -179,7 +188,7 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ
|
||||
// capabilities include "portMappings", and the CapabilityArgs map includes a
|
||||
// "portMappings" key, that key and its value are added to the "runtimeConfig"
|
||||
// dictionary to be passed to the plugin's stdin.
|
||||
func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) {
|
||||
func injectRuntimeConfig(orig *PluginConfig, rt *RuntimeConf) (*PluginConfig, error) {
|
||||
var err error
|
||||
|
||||
rc := make(map[string]interface{})
|
||||
@@ -400,7 +409,7 @@ func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *Runt
|
||||
|
||||
// GetNetworkCachedResult returns the cached Result of the previous
|
||||
// AddNetwork() operation for a network, or an error.
|
||||
func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
|
||||
func (c *CNIConfig) GetNetworkCachedResult(net *PluginConfig, rt *RuntimeConf) (types.Result, error) {
|
||||
return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
|
||||
}
|
||||
|
||||
@@ -412,7 +421,7 @@ func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *Runt
|
||||
|
||||
// GetNetworkCachedConfig copies the input RuntimeConf to output
|
||||
// RuntimeConf with fields updated with info from the cached Config.
|
||||
func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
|
||||
func (c *CNIConfig) GetNetworkCachedConfig(net *PluginConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
|
||||
return c.getCachedConfig(net.Network.Name, rt)
|
||||
}
|
||||
|
||||
@@ -422,6 +431,9 @@ func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachme
|
||||
dirPath := filepath.Join(c.getCacheDir(&RuntimeConf{}), "results")
|
||||
entries, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -475,7 +487,7 @@ func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachme
|
||||
return attachments, nil
|
||||
}
|
||||
|
||||
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
|
||||
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *PluginConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
|
||||
c.ensureExec()
|
||||
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||
if err != nil {
|
||||
@@ -517,7 +529,7 @@ func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList,
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
|
||||
func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *PluginConfig, prevResult types.Result, rt *RuntimeConf) error {
|
||||
c.ensureExec()
|
||||
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||
if err != nil {
|
||||
@@ -559,7 +571,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
|
||||
func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *PluginConfig, prevResult types.Result, rt *RuntimeConf) error {
|
||||
c.ensureExec()
|
||||
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||
if err != nil {
|
||||
@@ -595,14 +607,12 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
|
||||
}
|
||||
}
|
||||
|
||||
if cachedResult != nil {
|
||||
_ = c.cacheDel(list.Name, rt)
|
||||
}
|
||||
_ = c.cacheDel(list.Name, rt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func pluginDescription(net *types.NetConf) string {
|
||||
func pluginDescription(net *types.PluginConf) string {
|
||||
if net == nil {
|
||||
return "<missing>"
|
||||
}
|
||||
@@ -616,7 +626,7 @@ func pluginDescription(net *types.NetConf) string {
|
||||
}
|
||||
|
||||
// AddNetwork executes the plugin with the ADD command
|
||||
func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
|
||||
func (c *CNIConfig) AddNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) (types.Result, error) {
|
||||
result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -630,7 +640,7 @@ func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
|
||||
}
|
||||
|
||||
// CheckNetwork executes the plugin with the CHECK command
|
||||
func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
|
||||
func (c *CNIConfig) CheckNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error {
|
||||
// CHECK was added in CNI spec version 0.4.0 and higher
|
||||
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
|
||||
return err
|
||||
@@ -646,7 +656,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru
|
||||
}
|
||||
|
||||
// DelNetwork executes the plugin with the DEL command
|
||||
func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
|
||||
func (c *CNIConfig) DelNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error {
|
||||
var cachedResult types.Result
|
||||
|
||||
// Cached result on DEL was added in CNI spec version 0.4.0 and higher
|
||||
@@ -706,7 +716,7 @@ func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfig
|
||||
// ValidateNetwork checks that a configuration is reasonably valid.
|
||||
// It uses the same logic as ValidateNetworkList)
|
||||
// Returns a list of capabilities
|
||||
func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) {
|
||||
func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *PluginConfig) ([]string, error) {
|
||||
caps := []string{}
|
||||
for c, ok := range net.Network.Capabilities {
|
||||
if ok {
|
||||
@@ -758,15 +768,23 @@ func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (vers
|
||||
// - dump the list of cached attachments, and issue deletes as necessary
|
||||
// - issue a GC to the underlying plugins (if the version is high enough)
|
||||
func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, args *GCArgs) error {
|
||||
// If DisableGC is set, then don't bother GCing at all.
|
||||
if list.DisableGC {
|
||||
return nil
|
||||
}
|
||||
|
||||
// First, get the list of cached attachments
|
||||
cachedAttachments, err := c.GetCachedAttachments("")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
validAttachments := make(map[types.GCAttachment]interface{}, len(args.ValidAttachments))
|
||||
for _, a := range args.ValidAttachments {
|
||||
validAttachments[a] = nil
|
||||
var validAttachments map[types.GCAttachment]interface{}
|
||||
if args != nil {
|
||||
validAttachments = make(map[types.GCAttachment]interface{}, len(args.ValidAttachments))
|
||||
for _, a := range args.ValidAttachments {
|
||||
validAttachments[a] = nil
|
||||
}
|
||||
}
|
||||
|
||||
var errs []error
|
||||
@@ -799,10 +817,15 @@ func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList,
|
||||
// now, if the version supports it, issue a GC
|
||||
if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); gt {
|
||||
inject := map[string]interface{}{
|
||||
"name": list.Name,
|
||||
"cniVersion": list.CNIVersion,
|
||||
"cni.dev/valid-attachments": args.ValidAttachments,
|
||||
"name": list.Name,
|
||||
"cniVersion": list.CNIVersion,
|
||||
}
|
||||
if args != nil {
|
||||
inject["cni.dev/valid-attachments"] = args.ValidAttachments
|
||||
// #1101: spec used incorrect variable name
|
||||
inject["cni.dev/attachments"] = args.ValidAttachments
|
||||
}
|
||||
|
||||
for _, plugin := range list.Plugins {
|
||||
// build config here
|
||||
pluginConfig, err := InjectConf(plugin, inject)
|
||||
@@ -815,10 +838,10 @@ func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList,
|
||||
}
|
||||
}
|
||||
|
||||
return joinErrors(errs...)
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *CNIConfig) gcNetwork(ctx context.Context, net *NetworkConfig) error {
|
||||
func (c *CNIConfig) gcNetwork(ctx context.Context, net *PluginConfig) error {
|
||||
c.ensureExec()
|
||||
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||
if err != nil {
|
||||
@@ -853,7 +876,7 @@ func (c *CNIConfig) GetStatusNetworkList(ctx context.Context, list *NetworkConfi
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *NetworkConfig) error {
|
||||
func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *PluginConfig) error {
|
||||
c.ensureExec()
|
||||
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
|
||||
if err != nil {
|
||||
|
||||
224
vendor/github.com/containernetworking/cni/libcni/conf.go
generated
vendored
224
vendor/github.com/containernetworking/cni/libcni/conf.go
generated
vendored
@@ -20,11 +20,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/containernetworking/cni/pkg/version"
|
||||
)
|
||||
@@ -46,9 +45,16 @@ func (e NoConfigsFoundError) Error() string {
|
||||
return fmt.Sprintf(`no net configurations found in %s`, e.Dir)
|
||||
}
|
||||
|
||||
func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
|
||||
conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}}
|
||||
if err := json.Unmarshal(bytes, conf.Network); err != nil {
|
||||
// This will not validate that the plugins actually belong to the netconfig by ensuring
|
||||
// that they are loaded from a directory named after the networkName, relative to the network config.
|
||||
//
|
||||
// Since here we are just accepting raw bytes, the caller is responsible for ensuring that the plugin
|
||||
// config provided here actually "belongs" to the networkconfig in question.
|
||||
func NetworkPluginConfFromBytes(pluginConfBytes []byte) (*PluginConfig, error) {
|
||||
// TODO why are we creating a struct that holds both the byte representation and the deserialized
|
||||
// representation, and returning that, instead of just returning the deserialized representation?
|
||||
conf := &PluginConfig{Bytes: pluginConfBytes, Network: &types.PluginConf{}}
|
||||
if err := json.Unmarshal(pluginConfBytes, conf.Network); err != nil {
|
||||
return nil, fmt.Errorf("error parsing configuration: %w", err)
|
||||
}
|
||||
if conf.Network.Type == "" {
|
||||
@@ -57,17 +63,35 @@ func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func ConfFromFile(filename string) (*NetworkConfig, error) {
|
||||
bytes, err := os.ReadFile(filename)
|
||||
// Given a path to a directory containing a network configuration, and the name of a network,
|
||||
// loads all plugin definitions found at path `networkConfPath/networkName/*.conf`
|
||||
func NetworkPluginConfsFromFiles(networkConfPath, networkName string) ([]*PluginConfig, error) {
|
||||
var pConfs []*PluginConfig
|
||||
|
||||
pluginConfPath := filepath.Join(networkConfPath, networkName)
|
||||
|
||||
pluginConfFiles, err := ConfFiles(pluginConfPath, []string{".conf"})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %w", filename, err)
|
||||
return nil, fmt.Errorf("failed to read plugin config files in %s: %w", pluginConfPath, err)
|
||||
}
|
||||
return ConfFromBytes(bytes)
|
||||
|
||||
for _, pluginConfFile := range pluginConfFiles {
|
||||
pluginConfBytes, err := os.ReadFile(pluginConfFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %w", pluginConfFile, err)
|
||||
}
|
||||
pluginConf, err := NetworkPluginConfFromBytes(pluginConfBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pConfs = append(pConfs, pluginConf)
|
||||
}
|
||||
return pConfs, nil
|
||||
}
|
||||
|
||||
func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
|
||||
func NetworkConfFromBytes(confBytes []byte) (*NetworkConfigList, error) {
|
||||
rawList := make(map[string]interface{})
|
||||
if err := json.Unmarshal(bytes, &rawList); err != nil {
|
||||
if err := json.Unmarshal(confBytes, &rawList); err != nil {
|
||||
return nil, fmt.Errorf("error parsing configuration list: %w", err)
|
||||
}
|
||||
|
||||
@@ -92,24 +116,20 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
|
||||
rawVersions, ok := rawList["cniVersions"]
|
||||
if ok {
|
||||
// Parse the current package CNI version
|
||||
currentVersion, err := semver.NewVersion(version.Current())
|
||||
if err != nil {
|
||||
panic("CNI version is invalid semver!")
|
||||
}
|
||||
|
||||
rvs, ok := rawVersions.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions: %T", rvs)
|
||||
}
|
||||
vs := make([]*semver.Version, 0, len(rvs))
|
||||
vs := make([]string, 0, len(rvs))
|
||||
for i, rv := range rvs {
|
||||
v, ok := rv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions index %d: %T", i, rv)
|
||||
}
|
||||
if v, err := semver.NewVersion(v); err != nil {
|
||||
gt, err := version.GreaterThan(v, version.Current())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid cniVersions entry %s at index %d: %w", v, i, err)
|
||||
} else if !v.GreaterThan(currentVersion) {
|
||||
} else if !gt {
|
||||
// Skip versions "greater" than this implementation of the spec
|
||||
vs = append(vs, v)
|
||||
}
|
||||
@@ -117,50 +137,91 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
|
||||
|
||||
// if cniVersion was already set, append it to the list for sorting.
|
||||
if cniVersion != "" {
|
||||
if v, err := semver.NewVersion(cniVersion); err != nil {
|
||||
gt, err := version.GreaterThan(cniVersion, version.Current())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion %s: %w", cniVersion, err)
|
||||
} else if !v.GreaterThan(currentVersion) {
|
||||
} else if !gt {
|
||||
// ignore any versions higher than the current implemented spec version
|
||||
vs = append(vs, v)
|
||||
vs = append(vs, cniVersion)
|
||||
}
|
||||
}
|
||||
sort.Sort(semver.Collection(vs))
|
||||
slices.SortFunc[[]string](vs, func(v1, v2 string) int {
|
||||
if v1 == v2 {
|
||||
return 0
|
||||
}
|
||||
if gt, _ := version.GreaterThan(v1, v2); gt {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
})
|
||||
if len(vs) > 0 {
|
||||
cniVersion = vs[len(vs)-1].String()
|
||||
cniVersion = vs[len(vs)-1]
|
||||
}
|
||||
}
|
||||
|
||||
disableCheck := false
|
||||
if rawDisableCheck, ok := rawList["disableCheck"]; ok {
|
||||
disableCheck, ok = rawDisableCheck.(bool)
|
||||
readBool := func(key string) (bool, error) {
|
||||
rawVal, ok := rawList[key]
|
||||
if !ok {
|
||||
disableCheckStr, ok := rawDisableCheck.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
|
||||
}
|
||||
switch {
|
||||
case strings.ToLower(disableCheckStr) == "false":
|
||||
disableCheck = false
|
||||
case strings.ToLower(disableCheckStr) == "true":
|
||||
disableCheck = true
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck value %q", disableCheckStr)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
if b, ok := rawVal.(bool); ok {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
s, ok := rawVal.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("error parsing configuration list: invalid type %T for %s", rawVal, key)
|
||||
}
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "false":
|
||||
return false, nil
|
||||
case "true":
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("error parsing configuration list: invalid value %q for %s", s, key)
|
||||
}
|
||||
|
||||
disableCheck, err := readBool("disableCheck")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disableGC, err := readBool("disableGC")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loadOnlyInlinedPlugins, err := readBool("loadOnlyInlinedPlugins")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list := &NetworkConfigList{
|
||||
Name: name,
|
||||
DisableCheck: disableCheck,
|
||||
CNIVersion: cniVersion,
|
||||
Bytes: bytes,
|
||||
Name: name,
|
||||
DisableCheck: disableCheck,
|
||||
DisableGC: disableGC,
|
||||
LoadOnlyInlinedPlugins: loadOnlyInlinedPlugins,
|
||||
CNIVersion: cniVersion,
|
||||
Bytes: confBytes,
|
||||
}
|
||||
|
||||
var plugins []interface{}
|
||||
plug, ok := rawList["plugins"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key")
|
||||
// We can have a `plugins` list key in the main conf,
|
||||
// We can also have `loadOnlyInlinedPlugins == true`
|
||||
//
|
||||
// If `plugins` is there, then `loadOnlyInlinedPlugins` can be true
|
||||
//
|
||||
// If plugins is NOT there, then `loadOnlyInlinedPlugins` cannot be true
|
||||
//
|
||||
// We have to have at least some plugins.
|
||||
if !ok && loadOnlyInlinedPlugins {
|
||||
return nil, fmt.Errorf("error parsing configuration list: `loadOnlyInlinedPlugins` is true, and no 'plugins' key")
|
||||
} else if !ok && !loadOnlyInlinedPlugins {
|
||||
return list, nil
|
||||
}
|
||||
|
||||
plugins, ok = plug.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug)
|
||||
@@ -180,24 +241,68 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
|
||||
}
|
||||
list.Plugins = append(list.Plugins, netConf)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func ConfListFromFile(filename string) (*NetworkConfigList, error) {
|
||||
func NetworkConfFromFile(filename string) (*NetworkConfigList, error) {
|
||||
bytes, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %w", filename, err)
|
||||
}
|
||||
return ConfListFromBytes(bytes)
|
||||
|
||||
conf, err := NetworkConfFromBytes(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !conf.LoadOnlyInlinedPlugins {
|
||||
plugins, err := NetworkPluginConfsFromFiles(filepath.Dir(filename), conf.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conf.Plugins = append(conf.Plugins, plugins...)
|
||||
}
|
||||
|
||||
if len(conf.Plugins) == 0 {
|
||||
// Having 0 plugins for a given network is not necessarily a problem,
|
||||
// but return as error for caller to decide, since they tried to load
|
||||
return nil, fmt.Errorf("no plugin configs found")
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// Deprecated: This file format is no longer supported, use NetworkConfXXX and NetworkPluginXXX functions
|
||||
func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
|
||||
return NetworkPluginConfFromBytes(bytes)
|
||||
}
|
||||
|
||||
// Deprecated: This file format is no longer supported, use NetworkConfXXX and NetworkPluginXXX functions
|
||||
func ConfFromFile(filename string) (*NetworkConfig, error) {
|
||||
bytes, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %w", filename, err)
|
||||
}
|
||||
return ConfFromBytes(bytes)
|
||||
}
|
||||
|
||||
func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
|
||||
return NetworkConfFromBytes(bytes)
|
||||
}
|
||||
|
||||
func ConfListFromFile(filename string) (*NetworkConfigList, error) {
|
||||
return NetworkConfFromFile(filename)
|
||||
}
|
||||
|
||||
// ConfFiles simply returns a slice of all files in the provided directory
|
||||
// with extensions matching the provided set.
|
||||
func ConfFiles(dir string, extensions []string) ([]string, error) {
|
||||
// In part, adapted from rkt/networking/podenv.go#listFiles
|
||||
files, err := os.ReadDir(dir)
|
||||
switch {
|
||||
case err == nil: // break
|
||||
case os.IsNotExist(err):
|
||||
// If folder not there, return no error - only return an
|
||||
// error if we cannot read contents or there are no contents.
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, err
|
||||
@@ -218,6 +323,7 @@ func ConfFiles(dir string, extensions []string) ([]string, error) {
|
||||
return confFiles, nil
|
||||
}
|
||||
|
||||
// Deprecated: This file format is no longer supported, use NetworkConfXXX and NetworkPluginXXX functions
|
||||
func LoadConf(dir, name string) (*NetworkConfig, error) {
|
||||
files, err := ConfFiles(dir, []string{".conf", ".json"})
|
||||
switch {
|
||||
@@ -241,6 +347,15 @@ func LoadConf(dir, name string) (*NetworkConfig, error) {
|
||||
}
|
||||
|
||||
func LoadConfList(dir, name string) (*NetworkConfigList, error) {
|
||||
return LoadNetworkConf(dir, name)
|
||||
}
|
||||
|
||||
// LoadNetworkConf looks at all the network configs in a given dir,
|
||||
// loads and parses them all, and returns the first one with an extension of `.conf`
|
||||
// that matches the provided network name predicate.
|
||||
func LoadNetworkConf(dir, name string) (*NetworkConfigList, error) {
|
||||
// TODO this .conflist/.conf extension thing is confusing and inexact
|
||||
// for implementors. We should pick one extension for everything and stick with it.
|
||||
files, err := ConfFiles(dir, []string{".conflist"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -248,7 +363,7 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) {
|
||||
sort.Strings(files)
|
||||
|
||||
for _, confFile := range files {
|
||||
conf, err := ConfListFromFile(confFile)
|
||||
conf, err := NetworkConfFromFile(confFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -257,7 +372,7 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Try and load a network configuration file (instead of list)
|
||||
// Deprecated: Try and load a network configuration file (instead of list)
|
||||
// from the same name, then upconvert.
|
||||
singleConf, err := LoadConf(dir, name)
|
||||
if err != nil {
|
||||
@@ -273,7 +388,8 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) {
|
||||
return ConfListFromConf(singleConf)
|
||||
}
|
||||
|
||||
func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) {
|
||||
// InjectConf takes a PluginConfig and inserts additional values into it, ensuring the result is serializable.
|
||||
func InjectConf(original *PluginConfig, newValues map[string]interface{}) (*PluginConfig, error) {
|
||||
config := make(map[string]interface{})
|
||||
err := json.Unmarshal(original.Bytes, &config)
|
||||
if err != nil {
|
||||
@@ -297,12 +413,14 @@ func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*Net
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ConfFromBytes(newBytes)
|
||||
return NetworkPluginConfFromBytes(newBytes)
|
||||
}
|
||||
|
||||
// ConfListFromConf "upconverts" a network config in to a NetworkConfigList,
|
||||
// with the single network as the only entry in the list.
|
||||
func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) {
|
||||
//
|
||||
// Deprecated: Non-conflist file formats are unsupported, use NetworkConfXXX and NetworkPluginXXX functions
|
||||
func ConfListFromConf(original *PluginConfig) (*NetworkConfigList, error) {
|
||||
// Re-deserialize the config's json, then make a raw map configlist.
|
||||
// This may seem a bit strange, but it's to make the Bytes fields
|
||||
// actually make sense. Otherwise, the generated json is littered with
|
||||
|
||||
58
vendor/github.com/containernetworking/cni/libcni/multierror.go
generated
vendored
58
vendor/github.com/containernetworking/cni/libcni/multierror.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright the CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Adapted from errors/join.go from go 1.20
|
||||
// This package can be removed once the toolchain is updated to 1.20
|
||||
|
||||
package libcni
|
||||
|
||||
func joinErrors(errs ...error) error {
|
||||
n := 0
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
e := &multiError{
|
||||
errs: make([]error, 0, n),
|
||||
}
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
type multiError struct {
|
||||
errs []error
|
||||
}
|
||||
|
||||
func (e *multiError) Error() string {
|
||||
var b []byte
|
||||
for i, err := range e.errs {
|
||||
if i > 0 {
|
||||
b = append(b, '\n')
|
||||
}
|
||||
b = append(b, err.Error()...)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2013 Matt T. Proud
|
||||
// Copyright 2022 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,5 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
||||
package pbutil
|
||||
package ns
|
||||
|
||||
import "github.com/containernetworking/cni/pkg/types"
|
||||
|
||||
func CheckNetNS(nsPath string) (bool, *types.Error) {
|
||||
return false, nil
|
||||
}
|
||||
22
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
22
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
@@ -56,8 +56,12 @@ func (n *IPNet) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetConf describes a network.
|
||||
type NetConf struct {
|
||||
// Use PluginConf instead of NetConf, the NetConf
|
||||
// backwards-compat alias will be removed in a future release.
|
||||
type NetConf = PluginConf
|
||||
|
||||
// PluginConf describes a plugin configuration for a specific network.
|
||||
type PluginConf struct {
|
||||
CNIVersion string `json:"cniVersion,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
@@ -83,11 +87,8 @@ type GCAttachment struct {
|
||||
|
||||
// Note: DNS should be omit if DNS is empty but default Marshal function
|
||||
// will output empty structure hence need to write a Marshal function
|
||||
func (n *NetConf) MarshalJSON() ([]byte, error) {
|
||||
// use type alias to escape recursion for json.Marshal() to MarshalJSON()
|
||||
type fixObjType = NetConf
|
||||
|
||||
bytes, err := json.Marshal(fixObjType(*n)) //nolint:all
|
||||
func (n *PluginConf) MarshalJSON() ([]byte, error) {
|
||||
bytes, err := json.Marshal(*n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -117,9 +118,10 @@ func (i *IPAM) IsEmpty() bool {
|
||||
type NetConfList struct {
|
||||
CNIVersion string `json:"cniVersion,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
DisableCheck bool `json:"disableCheck,omitempty"`
|
||||
Plugins []*NetConf `json:"plugins,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DisableCheck bool `json:"disableCheck,omitempty"`
|
||||
DisableGC bool `json:"disableGC,omitempty"`
|
||||
Plugins []*PluginConf `json:"plugins,omitempty"`
|
||||
}
|
||||
|
||||
// Result is an interface that provides the result of plugin execution
|
||||
|
||||
24
vendor/github.com/containernetworking/cni/pkg/version/plugin.go
generated
vendored
24
vendor/github.com/containernetworking/cni/pkg/version/plugin.go
generated
vendored
@@ -142,3 +142,27 @@ func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GreaterThan returns true if the first version is greater than the second
|
||||
func GreaterThan(version, otherVersion string) (bool, error) {
|
||||
firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if firstMajor > secondMajor {
|
||||
return true, nil
|
||||
} else if firstMajor == secondMajor {
|
||||
if firstMinor > secondMinor {
|
||||
return true, nil
|
||||
} else if firstMinor == secondMinor && firstMicro > secondMicro {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/containernetworking/cni/pkg/version/version.go
generated
vendored
2
vendor/github.com/containernetworking/cni/pkg/version/version.go
generated
vendored
@@ -63,7 +63,7 @@ func NewResult(version string, resultBytes []byte) (types.Result, error) {
|
||||
|
||||
// ParsePrevResult parses a prevResult in a NetConf structure and sets
|
||||
// the NetConf's PrevResult member to the parsed Result object.
|
||||
func ParsePrevResult(conf *types.NetConf) error {
|
||||
func ParsePrevResult(conf *types.PluginConf) error {
|
||||
if conf.RawPrevResult == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
6
vendor/github.com/containernetworking/plugins/pkg/ns/README.md
generated
vendored
6
vendor/github.com/containernetworking/plugins/pkg/ns/README.md
generated
vendored
@@ -13,10 +13,10 @@ The `ns.Do()` method provides **partial** control over network namespaces for yo
|
||||
|
||||
```go
|
||||
err = targetNs.Do(func(hostNs ns.NetNS) error {
|
||||
linkAttrs := netlink.NewLinkAttrs()
|
||||
linkAttrs.Name = "dummy0"
|
||||
dummy := &netlink.Dummy{
|
||||
LinkAttrs: netlink.LinkAttrs{
|
||||
Name: "dummy0",
|
||||
},
|
||||
LinkAttrs: linkAttrs,
|
||||
}
|
||||
return netlink.LinkAdd(dummy)
|
||||
})
|
||||
|
||||
54
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
54
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
@@ -31,6 +31,10 @@ func GetCurrentNS() (NetNS, error) {
|
||||
// return an unexpected network namespace.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
return getCurrentNSNoLock()
|
||||
}
|
||||
|
||||
func getCurrentNSNoLock() (NetNS, error) {
|
||||
return GetNS(getCurrentThreadNetNSPath())
|
||||
}
|
||||
|
||||
@@ -152,6 +156,54 @@ func GetNS(nspath string) (NetNS, error) {
|
||||
return &netNS{file: fd}, nil
|
||||
}
|
||||
|
||||
// Returns a new empty NetNS.
|
||||
// Calling Close() let the kernel garbage collect the network namespace.
|
||||
func TempNetNS() (NetNS, error) {
|
||||
var tempNS NetNS
|
||||
var err error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Create the new namespace in a new goroutine so that if we later fail
|
||||
// to switch the namespace back to the original one, we can safely
|
||||
// leave the thread locked to die without a risk of the current thread
|
||||
// left lingering with incorrect namespace.
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
runtime.LockOSThread()
|
||||
|
||||
var threadNS NetNS
|
||||
// save a handle to current network namespace
|
||||
threadNS, err = getCurrentNSNoLock()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to open current namespace: %v", err)
|
||||
return
|
||||
}
|
||||
defer threadNS.Close()
|
||||
|
||||
// create the temporary network namespace
|
||||
err = unix.Unshare(unix.CLONE_NEWNET)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// get a handle to the temporary network namespace
|
||||
tempNS, err = getCurrentNSNoLock()
|
||||
|
||||
err2 := threadNS.Set()
|
||||
if err2 == nil {
|
||||
// Unlock the current thread only when we successfully switched back
|
||||
// to the original namespace; otherwise leave the thread locked which
|
||||
// will force the runtime to scrap the current thread, that is maybe
|
||||
// not as optimal but at least always safe to do.
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return tempNS, err
|
||||
}
|
||||
|
||||
func (ns *netNS) Path() string {
|
||||
return ns.file.Name()
|
||||
}
|
||||
@@ -173,7 +225,7 @@ func (ns *netNS) Do(toRun func(NetNS) error) error {
|
||||
}
|
||||
|
||||
containedCall := func(hostNS NetNS) error {
|
||||
threadNS, err := GetCurrentNS()
|
||||
threadNS, err := getCurrentNSNoLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open current netns: %v", err)
|
||||
}
|
||||
|
||||
2
vendor/github.com/containernetworking/plugins/pkg/testutils/bad_reader.go
generated
vendored
2
vendor/github.com/containernetworking/plugins/pkg/testutils/bad_reader.go
generated
vendored
@@ -21,7 +21,7 @@ type BadReader struct {
|
||||
Error error
|
||||
}
|
||||
|
||||
func (r *BadReader) Read(buffer []byte) (int, error) {
|
||||
func (r *BadReader) Read(_ []byte) (int, error) {
|
||||
if r.Error != nil {
|
||||
return 0, r.Error
|
||||
}
|
||||
|
||||
21
vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go
generated
vendored
21
vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go
generated
vendored
@@ -15,7 +15,7 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
@@ -29,6 +29,7 @@ func envCleanup() {
|
||||
os.Unsetenv("CNI_NETNS")
|
||||
os.Unsetenv("CNI_IFNAME")
|
||||
os.Unsetenv("CNI_CONTAINERID")
|
||||
os.Unsetenv("CNI_NETNS_OVERRIDE")
|
||||
}
|
||||
|
||||
func CmdAdd(cniNetns, cniContainerID, cniIfname string, conf []byte, f func() error) (types.Result, []byte, error) {
|
||||
@@ -37,6 +38,7 @@ func CmdAdd(cniNetns, cniContainerID, cniIfname string, conf []byte, f func() er
|
||||
os.Setenv("CNI_NETNS", cniNetns)
|
||||
os.Setenv("CNI_IFNAME", cniIfname)
|
||||
os.Setenv("CNI_CONTAINERID", cniContainerID)
|
||||
os.Setenv("CNI_NETNS_OVERRIDE", "1")
|
||||
defer envCleanup()
|
||||
|
||||
// Redirect stdout to capture plugin result
|
||||
@@ -52,7 +54,7 @@ func CmdAdd(cniNetns, cniContainerID, cniIfname string, conf []byte, f func() er
|
||||
|
||||
var out []byte
|
||||
if err == nil {
|
||||
out, err = ioutil.ReadAll(r)
|
||||
out, err = io.ReadAll(r)
|
||||
}
|
||||
os.Stdout = oldStdout
|
||||
|
||||
@@ -81,19 +83,20 @@ func CmdAddWithArgs(args *skel.CmdArgs, f func() error) (types.Result, []byte, e
|
||||
return CmdAdd(args.Netns, args.ContainerID, args.IfName, args.StdinData, f)
|
||||
}
|
||||
|
||||
func CmdCheck(cniNetns, cniContainerID, cniIfname string, conf []byte, f func() error) error {
|
||||
func CmdCheck(cniNetns, cniContainerID, cniIfname string, f func() error) error {
|
||||
os.Setenv("CNI_COMMAND", "CHECK")
|
||||
os.Setenv("CNI_PATH", os.Getenv("PATH"))
|
||||
os.Setenv("CNI_NETNS", cniNetns)
|
||||
os.Setenv("CNI_IFNAME", cniIfname)
|
||||
os.Setenv("CNI_CONTAINERID", cniContainerID)
|
||||
os.Setenv("CNI_NETNS_OVERRIDE", "1")
|
||||
defer envCleanup()
|
||||
|
||||
return f()
|
||||
}
|
||||
|
||||
func CmdCheckWithArgs(args *skel.CmdArgs, f func() error) error {
|
||||
return CmdCheck(args.Netns, args.ContainerID, args.IfName, args.StdinData, f)
|
||||
return CmdCheck(args.Netns, args.ContainerID, args.IfName, f)
|
||||
}
|
||||
|
||||
func CmdDel(cniNetns, cniContainerID, cniIfname string, f func() error) error {
|
||||
@@ -102,6 +105,7 @@ func CmdDel(cniNetns, cniContainerID, cniIfname string, f func() error) error {
|
||||
os.Setenv("CNI_NETNS", cniNetns)
|
||||
os.Setenv("CNI_IFNAME", cniIfname)
|
||||
os.Setenv("CNI_CONTAINERID", cniContainerID)
|
||||
os.Setenv("CNI_NETNS_OVERRIDE", "1")
|
||||
defer envCleanup()
|
||||
|
||||
return f()
|
||||
@@ -110,3 +114,12 @@ func CmdDel(cniNetns, cniContainerID, cniIfname string, f func() error) error {
|
||||
func CmdDelWithArgs(args *skel.CmdArgs, f func() error) error {
|
||||
return CmdDel(args.Netns, args.ContainerID, args.IfName, f)
|
||||
}
|
||||
|
||||
func CmdStatus(f func() error) error {
|
||||
os.Setenv("CNI_COMMAND", "STATUS")
|
||||
os.Setenv("CNI_PATH", os.Getenv("PATH"))
|
||||
os.Setenv("CNI_NETNS_OVERRIDE", "1")
|
||||
defer envCleanup()
|
||||
|
||||
return f()
|
||||
}
|
||||
|
||||
3
vendor/github.com/containernetworking/plugins/pkg/testutils/dns.go
generated
vendored
3
vendor/github.com/containernetworking/plugins/pkg/testutils/dns.go
generated
vendored
@@ -16,7 +16,6 @@ package testutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
// an error if any occurs while creating/writing the file. It is the caller's
|
||||
// responsibility to remove the file.
|
||||
func TmpResolvConf(dnsConf types.DNS) (string, error) {
|
||||
f, err := ioutil.TempFile("", "cni_test_resolv.conf")
|
||||
f, err := os.CreateTemp("", "cni_test_resolv.conf")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get temp file for CNI test resolv.conf: %v", err)
|
||||
}
|
||||
|
||||
6
vendor/github.com/containernetworking/plugins/pkg/testutils/netns_linux.go
generated
vendored
6
vendor/github.com/containernetworking/plugins/pkg/testutils/netns_linux.go
generated
vendored
@@ -24,8 +24,9 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
)
|
||||
|
||||
func getNsRunDir() string {
|
||||
@@ -49,7 +50,6 @@ func getNsRunDir() string {
|
||||
// Creates a new persistent (bind-mounted) network namespace and returns an object
|
||||
// representing that namespace, without switching to it.
|
||||
func NewNS() (ns.NetNS, error) {
|
||||
|
||||
nsRunDir := getNsRunDir()
|
||||
|
||||
b := make([]byte, 16)
|
||||
@@ -61,7 +61,7 @@ func NewNS() (ns.NetNS, error) {
|
||||
// Create the directory for mounting network namespaces
|
||||
// This needs to be a shared mountpoint in case it is mounted in to
|
||||
// other namespaces (containers)
|
||||
err = os.MkdirAll(nsRunDir, 0755)
|
||||
err = os.MkdirAll(nsRunDir, 0o755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
9
vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go
generated
vendored
9
vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go
generated
vendored
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// AllSpecVersions contains all CNI spec version numbers
|
||||
var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0"}
|
||||
var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0"}
|
||||
|
||||
// SpecVersionHasIPVersion returns true if the given CNI specification version
|
||||
// includes the "version" field in the IP address elements
|
||||
@@ -39,6 +39,13 @@ func SpecVersionHasCHECK(ver string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// SpecVersionHasSTATUS returns true if the given CNI specification version
|
||||
// supports the STATUS command
|
||||
func SpecVersionHasSTATUS(ver string) bool {
|
||||
ok, _ := version.GreaterThanOrEqualTo(ver, "1.1.0")
|
||||
return ok
|
||||
}
|
||||
|
||||
// SpecVersionHasChaining returns true if the given CNI specification version
|
||||
// supports plugin chaining
|
||||
func SpecVersionHasChaining(ver string) bool {
|
||||
|
||||
21
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
21
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
@@ -1,5 +1,26 @@
|
||||
# Change history of go-restful
|
||||
|
||||
## [v3.12.2] - 2025-02-21
|
||||
|
||||
- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
|
||||
|
||||
## [v3.12.1] - 2024-05-28
|
||||
|
||||
- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen)
|
||||
|
||||
## [v3.12.0] - 2024-03-11
|
||||
|
||||
- add Flush method #529 (#538)
|
||||
- fix: Improper handling of empty POST requests (#543)
|
||||
|
||||
## [v3.11.3] - 2024-01-09
|
||||
|
||||
- better not have 2 tags on one commit
|
||||
|
||||
## [v3.11.1, v3.11.2] - 2024-01-09
|
||||
|
||||
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
|
||||
|
||||
## [v3.11.0] - 2023-08-19
|
||||
|
||||
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
|
||||
|
||||
6
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
6
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
@@ -2,9 +2,8 @@ go-restful
|
||||
==========
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
|
||||
[](https://codecov.io/gh/emicklei/go-restful)
|
||||
|
||||
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
|
||||
@@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package.
|
||||
- Trace logging
|
||||
- Compression
|
||||
- Encoders for other serializers
|
||||
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
|
||||
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
|
||||
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
10
vendor/github.com/emicklei/go-restful/v3/compress.go
generated
vendored
10
vendor/github.com/emicklei/go-restful/v3/compress.go
generated
vendored
@@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it.
|
||||
func (c *CompressingResponseWriter) Flush() {
|
||||
flusher, ok := c.writer.(http.Flusher)
|
||||
if !ok {
|
||||
// writer doesn't support http.Flusher interface
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
|
||||
48
vendor/github.com/emicklei/go-restful/v3/curly.go
generated
vendored
48
vendor/github.com/emicklei/go-restful/v3/curly.go
generated
vendored
@@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute(
|
||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||
for _, each := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
|
||||
for _, eachRoute := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb)
|
||||
if matches {
|
||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
}
|
||||
}
|
||||
sort.Sort(candidates)
|
||||
@@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin
|
||||
return false, 0, 0
|
||||
}
|
||||
requestToken := requestTokens[i]
|
||||
if routeHasCustomVerb && hasCustomVerb(routeToken){
|
||||
if routeHasCustomVerb && hasCustomVerb(routeToken) {
|
||||
if !isMatchCustomVerb(routeToken, requestToken) {
|
||||
return false, 0, 0
|
||||
}
|
||||
@@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques
|
||||
// detectWebService returns the best matching webService given the list of path tokens.
|
||||
// see also computeWebserviceScore
|
||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
||||
var best *WebService
|
||||
var bestWs *WebService
|
||||
score := -1
|
||||
for _, each := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
||||
for _, eachWS := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens)
|
||||
if matches && (eachScore > score) {
|
||||
best = each
|
||||
bestWs = eachWS
|
||||
score = eachScore
|
||||
}
|
||||
}
|
||||
return best
|
||||
return bestWs
|
||||
}
|
||||
|
||||
// computeWebserviceScore returns whether tokens match and
|
||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
||||
if len(tokens) > len(requestTokens) {
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) {
|
||||
if len(routeTokens) > len(requestTokens) {
|
||||
return false, 0
|
||||
}
|
||||
score := 0
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
each := requestTokens[i]
|
||||
other := tokens[i]
|
||||
if len(each) == 0 && len(other) == 0 {
|
||||
for i := 0; i < len(routeTokens); i++ {
|
||||
eachRequestToken := requestTokens[i]
|
||||
eachRouteToken := routeTokens[i]
|
||||
if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 {
|
||||
score++
|
||||
continue
|
||||
}
|
||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
||||
if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") {
|
||||
// no empty match
|
||||
if len(each) == 0 {
|
||||
if len(eachRequestToken) == 0 {
|
||||
return false, score
|
||||
}
|
||||
score += 1
|
||||
score++
|
||||
|
||||
if colon := strings.Index(eachRouteToken, ":"); colon != -1 {
|
||||
// match by regex
|
||||
matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken)
|
||||
if matchesToken {
|
||||
score++ // extra score for regex match
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// not a parameter
|
||||
if each != other {
|
||||
if eachRequestToken != eachRouteToken {
|
||||
return false, score
|
||||
}
|
||||
score += (len(tokens) - i) * 10 //fuzzy
|
||||
score += (len(routeTokens) - i) * 10 //fuzzy
|
||||
}
|
||||
}
|
||||
return true, score
|
||||
|
||||
7
vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
generated
vendored
7
vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
generated
vendored
@@ -5,11 +5,18 @@ package restful
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
|
||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
||||
type EntityReaderWriter interface {
|
||||
// Read a serialized version of the value from the request.
|
||||
|
||||
11
vendor/github.com/emicklei/go-restful/v3/json.go
generated
vendored
11
vendor/github.com/emicklei/go-restful/v3/json.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// +build !jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
12
vendor/github.com/emicklei/go-restful/v3/jsoniter.go
generated
vendored
12
vendor/github.com/emicklei/go-restful/v3/jsoniter.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
// +build jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "github.com/json-iterator/go"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
19
vendor/github.com/emicklei/go-restful/v3/jsr311.go
generated
vendored
19
vendor/github.com/emicklei/go-restful/v3/jsr311.go
generated
vendored
@@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
|
||||
return params
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
|
||||
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
|
||||
candidates := make([]*Route, 0, 8)
|
||||
for i, each := range routes {
|
||||
@@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
|
||||
}
|
||||
if httpRequest.ContentLength > 0 {
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
|
||||
// accept
|
||||
@@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
||||
for _, candidate := range previous {
|
||||
available = append(available, candidate.Produces...)
|
||||
}
|
||||
// if POST,PUT,PATCH without body
|
||||
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
|
||||
if (method == http.MethodPost ||
|
||||
method == http.MethodPut ||
|
||||
method == http.MethodPatch) && length == "" {
|
||||
return nil, NewError(
|
||||
http.StatusUnsupportedMediaType,
|
||||
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
}
|
||||
return nil, NewError(
|
||||
http.StatusNotAcceptable,
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
|
||||
}
|
||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||
return candidates[0], nil
|
||||
|
||||
2
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
@@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
||||
}
|
||||
|
||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
||||
// If the route does not specify Consumes then return true (*/*).
|
||||
// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
|
||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
||||
|
||||
if len(r.Consumes) == 0 {
|
||||
|
||||
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-2
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
test_script:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
@@ -1 +0,0 @@
|
||||
go.sum linguist-generated
|
||||
4
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
4
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@@ -4,3 +4,7 @@
|
||||
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
/test/kqueue
|
||||
/test/a.out
|
||||
|
||||
144
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
144
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@@ -1,16 +1,148 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
1.9.0 2024-04-04
|
||||
----------------
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
### Changes and fixes
|
||||
|
||||
## [Unreleased]
|
||||
- all: make BufferedWatcher buffered again ([#657])
|
||||
|
||||
Nothing yet.
|
||||
- inotify: fix race when adding/removing watches while a watched path is being
|
||||
deleted ([#678], [#686])
|
||||
|
||||
## [1.6.0] - 2022-10-13
|
||||
- inotify: don't send empty event if a watched path is unmounted ([#655])
|
||||
|
||||
- inotify: don't register duplicate watches when watching both a symlink and its
|
||||
target; previously that would get "half-added" and removing the second would
|
||||
panic ([#679])
|
||||
|
||||
- kqueue: fix watching relative symlinks ([#681])
|
||||
|
||||
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
|
||||
kqueue ([#682])
|
||||
|
||||
- illumos: don't send error if changed file is deleted while processing the
|
||||
event ([#678])
|
||||
|
||||
|
||||
[#657]: https://github.com/fsnotify/fsnotify/pull/657
|
||||
[#678]: https://github.com/fsnotify/fsnotify/pull/678
|
||||
[#686]: https://github.com/fsnotify/fsnotify/pull/686
|
||||
[#655]: https://github.com/fsnotify/fsnotify/pull/655
|
||||
[#681]: https://github.com/fsnotify/fsnotify/pull/681
|
||||
[#679]: https://github.com/fsnotify/fsnotify/pull/679
|
||||
[#682]: https://github.com/fsnotify/fsnotify/pull/682
|
||||
|
||||
1.8.0 2024-10-31
|
||||
----------------
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||
|
||||
- kqueue: ignore events with Ident=0 ([#590])
|
||||
|
||||
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||
|
||||
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||
|
||||
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||
|
||||
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||
|
||||
- fen: allow watching subdirectories of watched directories ([#621])
|
||||
|
||||
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.17.
|
||||
|
||||
### Additions
|
||||
|
||||
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||
|
||||
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||
in cases where you can't control the kernel buffer and receive a large number
|
||||
of events in bursts. ([#550], [#572])
|
||||
|
||||
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||
options. ([#521])
|
||||
|
||||
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||
works on all platforms and is enough for most purposes, but in some cases a
|
||||
highest buffer is needed. ([#521])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||
|
||||
After a rename the reported name wasn't updated, or even an empty string.
|
||||
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||
watcher. This is already how it worked on kqueue and FEN.
|
||||
|
||||
On Windows this does work, and remains working.
|
||||
|
||||
- windows: don't listen for file attribute changes ([#520])
|
||||
|
||||
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||
with no way to see if they're a file write or attribute change, so would show
|
||||
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||
spurious Write events.
|
||||
|
||||
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||
|
||||
Before it would merely return "short read", making it hard to detect this
|
||||
error.
|
||||
|
||||
- kqueue: make sure events for all files are delivered properly when removing a
|
||||
watched directory ([#526])
|
||||
|
||||
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||
name.
|
||||
|
||||
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||
|
||||
The link would get resolved but kqueue would "forget" it already saw the link
|
||||
itself, resulting on a Create for every Write event for the directory.
|
||||
|
||||
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||
|
||||
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||
WASM, AIX, etc. ([#528])
|
||||
|
||||
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||
won't compile there.
|
||||
|
||||
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||
|
||||
1.6.0 - 2022-10-13
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||
|
||||
|
||||
121
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
121
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
- To avoid "wasted" work, please discus changes on the issue tracker first. You
|
||||
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
@@ -20,6 +20,125 @@ platforms. Testing different platforms locally can be done with something like
|
||||
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Writing new tests
|
||||
-----------------
|
||||
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||
syntax. The basic format is:
|
||||
|
||||
script
|
||||
|
||||
Output:
|
||||
desired output
|
||||
|
||||
For example:
|
||||
|
||||
# Create a new empty file with some data.
|
||||
watch /
|
||||
echo data >/file
|
||||
|
||||
Output:
|
||||
create /file
|
||||
write /file
|
||||
|
||||
Just create a new file to add a new test; select which tests to run with
|
||||
`-run TestScript/[path]`.
|
||||
|
||||
script
|
||||
------
|
||||
The script is a "shell-like" script:
|
||||
|
||||
cmd arg arg
|
||||
|
||||
Comments are supported with `#`:
|
||||
|
||||
# Comment
|
||||
cmd arg arg # Comment
|
||||
|
||||
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||
"/tmp/TestFoo/foo".
|
||||
|
||||
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||
functionally identical right now, but this may change in the future, so best to
|
||||
assume shell-like rules.
|
||||
|
||||
touch "/file with spaces"
|
||||
|
||||
End-of-line escapes with `\` are not supported.
|
||||
|
||||
### Supported commands
|
||||
|
||||
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||
# watched by default. Optionally a list of ops can be
|
||||
# given, as with AddWith(path, WithOps(...)).
|
||||
unwatch path # Stop watching the path.
|
||||
watchlist n # Assert watchlist length.
|
||||
|
||||
stop # Stop running the script; for debugging.
|
||||
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||
parallel by default, so -parallel=1 is probably a good
|
||||
idea).
|
||||
print [any strings] # Print text to stdout; for debugging.
|
||||
|
||||
touch path
|
||||
mkdir [-p] dir
|
||||
ln -s target link # Only ln -s supported.
|
||||
mkfifo path
|
||||
mknod dev path
|
||||
mv src dst
|
||||
rm [-r] path
|
||||
chmod mode path # Octal only
|
||||
sleep time-in-ms
|
||||
|
||||
cat path # Read path (does nothing with the data; just reads it).
|
||||
echo str >>path # Append "str" to "path".
|
||||
echo str >path # Truncate "path" and write "str".
|
||||
|
||||
require reason # Skip the test if "reason" is true; "skip" and
|
||||
skip reason # "require" behave identical; it supports both for
|
||||
# readability. Possible reasons are:
|
||||
#
|
||||
# always Always skip this test.
|
||||
# symlink Symlinks are supported (requires admin
|
||||
# permissions on Windows).
|
||||
# mkfifo Platform doesn't support FIFO named sockets.
|
||||
# mknod Platform doesn't support device nodes.
|
||||
|
||||
|
||||
output
|
||||
------
|
||||
After `Output:` the desired output is given; this is indented by convention, but
|
||||
that's not required.
|
||||
|
||||
The format of that is:
|
||||
|
||||
# Comment
|
||||
event path # Comment
|
||||
|
||||
system:
|
||||
event path
|
||||
system2:
|
||||
event path
|
||||
|
||||
Every event is one line, and any whitespace between the event and path are
|
||||
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||
ignored.
|
||||
|
||||
Platform-specific tests can be added after GOOS; for example:
|
||||
|
||||
watch /
|
||||
touch /file
|
||||
|
||||
Output:
|
||||
# Tested if nothing else matches
|
||||
create /file
|
||||
|
||||
# Windows-specific test.
|
||||
windows:
|
||||
write /file
|
||||
|
||||
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||
|
||||
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
|
||||
77
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
77
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@@ -1,29 +1,29 @@
|
||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||
Windows, Linux, macOS, and BSD systems.
|
||||
Windows, Linux, macOS, BSD, and illumos.
|
||||
|
||||
Go 1.16 or newer is required; the full documentation is at
|
||||
Go 1.17 or newer is required; the full documentation is at
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
|
||||
released version, whereas this README is for the last development version which
|
||||
may include additions/changes.**
|
||||
|
||||
---
|
||||
|
||||
Platform support:
|
||||
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | ---------------| -------------------------------------------------------------|
|
||||
| inotify | Linux 2.6.32+ | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
|
||||
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
| Backend | OS | Status |
|
||||
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||
| inotify | Linux | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FEN | illumos | Supported |
|
||||
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
Linux and macOS should include Android and iOS, but these are currently untested.
|
||||
Linux and illumos should include Android and Solaris, but these are currently
|
||||
untested.
|
||||
|
||||
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||
|
||||
Usage
|
||||
-----
|
||||
@@ -83,20 +83,23 @@ run with:
|
||||
|
||||
% go run ./cmd/fsnotify
|
||||
|
||||
Further detailed documentation can be found in godoc:
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
FAQ
|
||||
---
|
||||
### Will a file still be watched when it's moved to another directory?
|
||||
No, not unless you are watching the location it was moved to.
|
||||
|
||||
### Are subdirectories watched too?
|
||||
### Are subdirectories watched?
|
||||
No, you must add watches for any directory you want to watch (a recursive
|
||||
watcher is on the roadmap: [#18]).
|
||||
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
|
||||
### Do I have to watch the Error and Event channels in a goroutine?
|
||||
As of now, yes (you can read both channels in the same goroutine using `select`,
|
||||
you don't need a separate goroutine for both channels; see the example).
|
||||
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||
need a separate goroutine for both channels; see the example).
|
||||
|
||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||
@@ -107,6 +110,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||
|
||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||
|
||||
### Why do I get many Chmod events?
|
||||
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||
useful, and tend to cause problems.
|
||||
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||
settings* until we have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
### Watching a file doesn't work well
|
||||
Watching individual files (rather than directories) is generally not recommended
|
||||
as many programs (especially editors) update files atomically: it will write to
|
||||
a temporary file which is then moved to to destination, overwriting the original
|
||||
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||
no longer exists.
|
||||
|
||||
The upshot of this is that a power failure or crash won't leave a half-written
|
||||
file.
|
||||
|
||||
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||
|
||||
Platform-specific notes
|
||||
-----------------------
|
||||
### Linux
|
||||
@@ -151,11 +180,3 @@ these platforms.
|
||||
|
||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||
control the maximum number of open files.
|
||||
|
||||
### macOS
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
|
||||
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
|
||||
have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
585
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
585
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
@@ -1,162 +1,467 @@
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||
//
|
||||
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
type fen struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
dirs map[string]Op // Explicitly watched directories
|
||||
watches map[string]Op // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
w := &fen{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
dirs: make(map[string]Op),
|
||||
watches: make(map[string]Op),
|
||||
}
|
||||
|
||||
var err error
|
||||
w.port, err = unix.NewEventPort()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
func (w *fen) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Associate all files in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.associateFile(name, stat, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
func (w *fen) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
// doesn't cause harm.
|
||||
w.mu.Lock()
|
||||
delete(w.watches, name)
|
||||
delete(w.dirs, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove associations for every file in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.port.DissociatePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *fen) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
pevents := make([]unix.PortEvent, 8)
|
||||
for {
|
||||
count, err := w.port.Get(pevents, 1, nil)
|
||||
if err != nil && err != unix.ETIME {
|
||||
// Interrupted system call (count should be 0) ignore and continue
|
||||
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||
continue
|
||||
}
|
||||
// Get failed because we called w.Close()
|
||||
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||
return
|
||||
}
|
||||
// There was an error not caused by calling w.Close()
|
||||
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p := pevents[:count]
|
||||
for _, pevent := range p {
|
||||
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||
// Event from unexpected source received; should never happen.
|
||||
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(pevent.Path, pevent.Events)
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle all children of the directory.
|
||||
for _, entry := range files {
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally handle the directory itself.
|
||||
return handler(path, stat, follow)
|
||||
}
|
||||
|
||||
// handleEvent might need to emit more than one fsnotify event if the events
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
fmode = event.Cookie.(os.FileMode)
|
||||
reRegister = true
|
||||
)
|
||||
|
||||
w.mu.Lock()
|
||||
_, watchedDir := w.dirs[path]
|
||||
_, watchedPath := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
isWatched := watchedDir || watchedPath
|
||||
|
||||
if events&unix.FILE_DELETE != 0 {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_FROM != 0 {
|
||||
if !w.sendEvent(Event{Name: path, Op: Rename}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the new file name
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_TO != 0 {
|
||||
// We don't report a Rename event for this case, because Rename events
|
||||
// are interpreted as referring to the _old_ name of the file, and in
|
||||
// this case the event would refer to the new name of the file. This
|
||||
// type of rename event is not supported by fsnotify.
|
||||
|
||||
// inotify reports a Remove event in this case, so we simulate this
|
||||
// here.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the file that was removed
|
||||
reRegister = false
|
||||
}
|
||||
|
||||
// The file is gone, nothing left to do.
|
||||
if !reRegister {
|
||||
if watchedDir {
|
||||
w.mu.Lock()
|
||||
delete(w.dirs, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
if watchedPath {
|
||||
w.mu.Lock()
|
||||
delete(w.watches, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we didn't get a deletion the file still exists and we're going to have
|
||||
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||
// and have what we need to continue watching the file
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
// This is unexpected, but we should still emit an event. This happens
|
||||
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||
// get a modify event of something happening inside, but by the time we
|
||||
// get here, the sudirectory is already gone. Clearly we were watching
|
||||
// this path but now it is gone. Let's tell the user that it was
|
||||
// removed.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Suppress extra write events on removed directories; they are not
|
||||
// informative and can be confusing.
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||
if isWatched {
|
||||
stat, err = os.Stat(path)
|
||||
if err != nil {
|
||||
// The symlink still exists, but the target is gone. Report the
|
||||
// Remove similar to above.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't return the error
|
||||
}
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() && watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(Event{Name: path, Op: Write}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||
// Only send Chmod if perms changed
|
||||
if stat.Mode().Perm() != fmode.Perm() {
|
||||
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stat != nil {
|
||||
// If we get here, it means we've hit an event above that requires us to
|
||||
// continue watching the file or directory
|
||||
err := w.associateFile(path, stat, isWatched)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// Path may have been removed since the stat.
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen, as
|
||||
// everything else should still be watched.
|
||||
func (w *fen) updateDirectory(path string) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
// Directory no longer exists: probably just deleted since we got the
|
||||
// event.
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range files {
|
||||
path := filepath.Join(path, entry.Name())
|
||||
if w.port.PathIsWatched(path) {
|
||||
continue
|
||||
}
|
||||
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// File may have disappeared between getting the dir listing and
|
||||
// adding the port: that's okay to ignore.
|
||||
continue
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
// This is primarily protecting the call to AssociatePath but it is
|
||||
// important and intentional that the call to PathIsWatched is also
|
||||
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||
// to error out that the path is already associated.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.port.PathIsWatched(path) {
|
||||
// Remove the old association in favor of this one If we get ENOENT,
|
||||
// then while the x/sys/unix wrapper still thought that this path was
|
||||
// associated, the underlying event port did not. This call will have
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
var events int
|
||||
if !follow {
|
||||
// Watch symlinks themselves rather than their targets unless this entry
|
||||
// is explicitly watched.
|
||||
events |= unix.FILE_NOFOLLOW
|
||||
}
|
||||
if true { // TODO: implement withOps()
|
||||
events |= unix.FILE_MODIFIED
|
||||
}
|
||||
if true {
|
||||
events |= unix.FILE_ATTRIB
|
||||
}
|
||||
err := w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||
for pathname := range w.dirs {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func (w *fen) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
768
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
768
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
//go:build linux && !appengine
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -7,143 +6,149 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
type inotify struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||
fd int
|
||||
mu sync.Mutex // Map access
|
||||
inotifyFile *os.File
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
watches *watches
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
|
||||
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||
// between the two MOVED_* events (including other MOVED_* ones).
|
||||
//
|
||||
// A second issue is that moving a file outside the watched directory will
|
||||
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||
//
|
||||
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||
// Ten items should be more than enough for our purpose, and a loop over
|
||||
// such a short array is faster than a map access anyway (not that it hugely
|
||||
// matters since we're talking about hundreds of ns at the most, but still).
|
||||
cookies [10]koekje
|
||||
cookieIndex uint8
|
||||
cookiesMu sync.Mutex
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
|
||||
// Otherwise, blocking i/o operations won't terminate on close
|
||||
type (
|
||||
watches struct {
|
||||
wd map[uint32]*watch // wd → watch
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
recurse bool // Recursion with ./...?
|
||||
}
|
||||
koekje struct {
|
||||
cookie uint32
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[uint32]*watch),
|
||||
path: make(map[string]uint32),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
|
||||
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
|
||||
func (w *watches) len() int { return len(w.wd) }
|
||||
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
|
||||
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
|
||||
|
||||
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||
path, recurse := recursivePath(path)
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||
}
|
||||
|
||||
watch := w.wd[wd]
|
||||
if recurse && !watch.recurse {
|
||||
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
if !watch.recurse {
|
||||
return []uint32{wd}, nil
|
||||
}
|
||||
|
||||
wds := make([]uint32, 0, 8)
|
||||
wds = append(wds, wd)
|
||||
for p, rwd := range w.path {
|
||||
if strings.HasPrefix(p, path) {
|
||||
delete(w.path, p)
|
||||
delete(w.wd, rwd)
|
||||
wds = append(wds, rwd)
|
||||
}
|
||||
}
|
||||
return wds, nil
|
||||
}
|
||||
|
||||
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||
var existing *watch
|
||||
wd, ok := w.path[path]
|
||||
if ok {
|
||||
existing = w.wd[wd]
|
||||
}
|
||||
|
||||
upd, err := f(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if upd != nil {
|
||||
w.wd[upd.wd] = upd
|
||||
w.path[upd.path] = upd.wd
|
||||
|
||||
if upd.wd != wd {
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
w := &inotify{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
watches: newWatches(),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -151,47 +156,11 @@ func NewWatcher() (*Watcher, error) {
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendEvent(e Event) bool {
|
||||
select {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed() {
|
||||
w.mu.Unlock()
|
||||
func (w *inotify) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
err := w.inotifyFile.Close()
|
||||
@@ -199,165 +168,197 @@ func (w *Watcher) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
<-w.doneResp // Wait for readEvents() to finish.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), path)
|
||||
}
|
||||
|
||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
add := func(path string, with withOpts, recurse bool) error {
|
||||
var flags uint32
|
||||
if with.noFollow {
|
||||
flags |= unix.IN_DONT_FOLLOW
|
||||
}
|
||||
if with.op.Has(Create) {
|
||||
flags |= unix.IN_CREATE
|
||||
}
|
||||
if with.op.Has(Write) {
|
||||
flags |= unix.IN_MODIFY
|
||||
}
|
||||
if with.op.Has(Remove) {
|
||||
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
}
|
||||
if with.op.Has(Rename) {
|
||||
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||
}
|
||||
if with.op.Has(Chmod) {
|
||||
flags |= unix.IN_ATTRIB
|
||||
}
|
||||
if with.op.Has(xUnportableOpen) {
|
||||
flags |= unix.IN_OPEN
|
||||
}
|
||||
if with.op.Has(xUnportableRead) {
|
||||
flags |= unix.IN_ACCESS
|
||||
}
|
||||
if with.op.Has(xUnportableCloseWrite) {
|
||||
flags |= unix.IN_CLOSE_WRITE
|
||||
}
|
||||
if with.op.Has(xUnportableCloseRead) {
|
||||
flags |= unix.IN_CLOSE_NOWRITE
|
||||
}
|
||||
return w.register(path, flags, recurse)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
path, recurse := recursivePath(path)
|
||||
if recurse {
|
||||
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
if root == path {
|
||||
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a Create event when adding new directory from a recursive
|
||||
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||
// directories will be created before we can set up watchers on the
|
||||
// subdirectories, so only "one" would be sent as a Create event and
|
||||
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||
// problem).
|
||||
if with.sendCreate && root != path {
|
||||
w.sendEvent(Event{Name: root, Op: Create})
|
||||
}
|
||||
|
||||
return add(root, with, true)
|
||||
})
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
return add(path, with, false)
|
||||
}
|
||||
|
||||
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if e, ok := w.watches.wd[uint32(wd)]; ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: path,
|
||||
flags: flags,
|
||||
recurse: recurse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
existing.wd = uint32(wd)
|
||||
existing.flags = flags
|
||||
return existing, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *inotify) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
func (w *inotify) remove(name string) error {
|
||||
wds, err := w.watches.removePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, wd := range wds {
|
||||
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||
if err != nil {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every
|
||||
// case; the only two possible errors are:
|
||||
//
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||
// any kind.
|
||||
//
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||
// when they are removed explicitly or implicitly; explicitly by
|
||||
// inotify_rm_watch, implicitly when the file they are watching is
|
||||
// deleted.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
func (w *inotify) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
||||
// The only two possible errors are:
|
||||
//
|
||||
// - EBADF, which happens when w.fd is not a valid file descriptor
|
||||
// of any kind.
|
||||
// - EINVAL, which is when fd is not an inotify descriptor or wd
|
||||
// is not a valid watch descriptor. Watch descriptors are
|
||||
// invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they
|
||||
// are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries := make([]string, 0, w.watches.len())
|
||||
for pathname := range w.watches.path {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *inotify) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
errno error // Syscall errno
|
||||
)
|
||||
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := w.inotifyFile.Read(buf[:])
|
||||
switch {
|
||||
case errors.Unwrap(err) == os.ErrClosed:
|
||||
return
|
||||
case err != nil:
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
@@ -365,16 +366,9 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
err := errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
err = io.EOF // If EOF is received. This should really never happen.
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
@@ -382,63 +376,146 @@ func (w *Watcher) readEvents() {
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't know how many events we just read into the buffer While the
|
||||
// offset points to at least one whole event.
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
var (
|
||||
// Point "raw" to the event in the buffer
|
||||
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
mask = uint32(raw.Mask)
|
||||
nameLen = uint32(raw.Len)
|
||||
)
|
||||
// Point to the event in the buffer.
|
||||
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if !w.sendError(ErrEventOverflow) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
ev, ok := w.handleEvent(inEvent, &buf, offset)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := w.newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if mask&unix.IN_IGNORED == 0 {
|
||||
if !w.sendEvent(event) {
|
||||
return
|
||||
}
|
||||
if !w.sendEvent(ev) {
|
||||
return
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
offset += unix.SizeofInotifyEvent + inEvent.Len
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
/// If the event happened to the watched directory or the watched file, the
|
||||
/// kernel doesn't append the filename to the event, but we would like to
|
||||
/// always fill the the "Name" field with a valid filename. We retrieve the
|
||||
/// path of the watch from the "paths" map.
|
||||
///
|
||||
/// Can be nil if Remove() was called in another goroutine for this path
|
||||
/// inbetween reading the events from the kernel and reading the internal
|
||||
/// state. Not much we can do about it, so just skip. See #616.
|
||||
watch := w.watches.byWd(uint32(inEvent.Wd))
|
||||
if watch == nil {
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
var (
|
||||
name = watch.path
|
||||
nameLen = uint32(inEvent.Len)
|
||||
)
|
||||
if nameLen > 0 {
|
||||
/// Point "bytes" at the first byte of the filename
|
||||
bb := *buf
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
|
||||
}
|
||||
|
||||
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
|
||||
w.watches.remove(watch)
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch)
|
||||
}
|
||||
|
||||
// We can't really update the state when a watched path is moved; only
|
||||
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
|
||||
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if watch.recurse { // Do nothing
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip if we're watching both this path and the parent; the parent will
|
||||
/// already send a delete so no need to do it twice.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
|
||||
_, ok := w.watches.path[filepath.Dir(watch.path)]
|
||||
if ok {
|
||||
return Event{}, true
|
||||
}
|
||||
}
|
||||
|
||||
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
|
||||
// Need to update watch path for recurse.
|
||||
if watch.recurse {
|
||||
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||
/// New directory created: set up watch on it.
|
||||
if isDir && ev.Has(Create) {
|
||||
err := w.register(ev.Name, watch.flags, true)
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
|
||||
// This was a directory rename, so we need to update all the
|
||||
// children.
|
||||
//
|
||||
// TODO: this is of course pretty slow; we should use a better data
|
||||
// structure for storing all of this, e.g. store children in the
|
||||
// watch. I have some code for this in my kqueue refactor we can use
|
||||
// in the future. For now I'm okay with this as it's not publicly
|
||||
// available. Correctness first, performance second.
|
||||
if ev.renamedFrom != "" {
|
||||
for k, ww := range w.watches.wd {
|
||||
if k == watch.wd || ww.path == ev.Name {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||
w.watches.wd[k] = ww
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ev, true
|
||||
}
|
||||
|
||||
func (w *inotify) isRecursive(path string) bool {
|
||||
ww := w.watches.byPath(path)
|
||||
if ww == nil { // path could be a file, so also check the Dir.
|
||||
ww = w.watches.byPath(filepath.Dir(path))
|
||||
}
|
||||
return ww != nil && ww.recurse
|
||||
}
|
||||
|
||||
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
@@ -449,11 +526,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||
e.Op |= xUnportableOpen
|
||||
}
|
||||
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||
e.Op |= xUnportableRead
|
||||
}
|
||||
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||
e.Op |= xUnportableCloseWrite
|
||||
}
|
||||
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||
e.Op |= xUnportableCloseRead
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
|
||||
if cookie != 0 {
|
||||
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
w.cookiesMu.Lock()
|
||||
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||
w.cookieIndex++
|
||||
if w.cookieIndex > 9 {
|
||||
w.cookieIndex = 0
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
w.cookiesMu.Lock()
|
||||
var prev string
|
||||
for _, c := range w.cookies {
|
||||
if c.cookie == cookie {
|
||||
prev = c.path
|
||||
break
|
||||
}
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
e.renamedFrom = prev
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (w *inotify) xSupports(op Op) bool {
|
||||
return true // Supports everything.
|
||||
}
|
||||
|
||||
func (w *inotify) state() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
for wd, ww := range w.watches.wd {
|
||||
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||
}
|
||||
}
|
||||
|
||||
848
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
848
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
File diff suppressed because it is too large
Load Diff
72
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
72
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
@@ -1,66 +1,22 @@
|
||||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
import "errors"
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct{}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
||||
type other struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
var defaultBufferSize = 0
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
func (w *other) Close() error { return nil }
|
||||
func (w *other) WatchList() []string { return nil }
|
||||
func (w *other) Add(name string) error { return nil }
|
||||
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
func (w *other) Remove(name string) error { return nil }
|
||||
func (w *other) xSupports(op Op) bool { return false }
|
||||
|
||||
388
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
388
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
@@ -1,5 +1,8 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -12,223 +15,122 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
type readDirChangesW struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
done chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, isClosed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Protects access to watches, closed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
var defaultBufferSize = 50
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &Watcher{
|
||||
w := &readDirChangesW{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
done: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
func (w *readDirChangesW) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
event.renamedFrom = renamedFrom
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case ch := <-w.done:
|
||||
w.done <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
func (w *readDirChangesW) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.quit:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
func (w *readDirChangesW) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
w.mu.Lock()
|
||||
w.closed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
// Send "done" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
w.done <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return errors.New("watcher already closed")
|
||||
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
bufsize: with.bufsize,
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
@@ -237,13 +139,15 @@ func (w *Watcher) Add(name string) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *readDirChangesW) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
@@ -256,15 +160,24 @@ func (w *Watcher) Remove(name string) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *readDirChangesW) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
entries = append(entries, watchEntry.path)
|
||||
for name := range watchEntry.names {
|
||||
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||
}
|
||||
// the directory itself is being watched
|
||||
if watchEntry.mask != 0 {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,7 +192,6 @@ func (w *Watcher) WatchList() []string {
|
||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||
const (
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
@@ -291,7 +203,7 @@ const (
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
@@ -305,9 +217,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -321,10 +230,11 @@ const (
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
bufsize int
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
@@ -334,13 +244,14 @@ type inode struct {
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [65536]byte // 64K buffer
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
recurse bool // Recursive watch?
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf []byte // buffer, allocated later
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -348,7 +259,7 @@ type (
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
func (w *readDirChangesW) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
@@ -356,7 +267,7 @@ func (w *Watcher) wakeupReader() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
@@ -370,7 +281,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Watcher) getIno(path string) (ino *inode, err error) {
|
||||
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
@@ -413,7 +324,9 @@ func (m watchMap) set(ino *inode, watch *watch) {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -433,9 +346,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
recurse: recurse,
|
||||
buf: make([]byte, bufsize),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
@@ -464,7 +379,9 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -478,6 +395,10 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
|
||||
if recurse && !watch.recurse {
|
||||
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||
}
|
||||
|
||||
err = windows.CloseHandle(ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
@@ -486,11 +407,11 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
@@ -498,23 +419,23 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
@@ -535,13 +456,16 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
// We need to pass the array, rather than the slice.
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||
watch.recurse, mask, nil, &watch.ov, 0)
|
||||
if rdErr != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
@@ -554,7 +478,7 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *readDirChangesW) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
@@ -563,14 +487,13 @@ func (w *Watcher) readEvents() {
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
// This error is handled after the watch == nil check below.
|
||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||
// This error is handled after the watch == nil check below. NOTE: this
|
||||
// seems odd, note sure if it's correct.
|
||||
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
case ch := <-w.done:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
@@ -595,7 +518,7 @@ func (w *Watcher) readEvents() {
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
@@ -605,6 +528,8 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
|
||||
switch qErr {
|
||||
case nil:
|
||||
// No error
|
||||
case windows.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||
@@ -616,7 +541,7 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
@@ -626,13 +551,12 @@ func (w *Watcher) readEvents() {
|
||||
default:
|
||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.sendError(errors.New("short read in readEvents()"))
|
||||
w.sendError(ErrEventOverflow)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -650,6 +574,10 @@ func (w *Watcher) readEvents() {
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
if debug {
|
||||
internal.Debug(fullname, raw.Action)
|
||||
}
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
@@ -678,21 +606,22 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
w.sendEvent(fullname, watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
} else {
|
||||
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
}
|
||||
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
@@ -703,8 +632,8 @@ func (w *Watcher) readEvents() {
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.sendError(errors.New(
|
||||
"Windows system assumed buffer larger than it is, events have likely been missed."))
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -715,21 +644,18 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
@@ -744,3 +670,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
441
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
441
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@@ -1,16 +1,148 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a cross-platform interface for file system
|
||||
// notifications.
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// - Linux via inotify
|
||||
// - BSD, macOS via kqueue
|
||||
// - Windows via ReadDirectoryChangesW
|
||||
// - illumos via FEN
|
||||
//
|
||||
// # FSNOTIFY_DEBUG
|
||||
//
|
||||
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||
// stderr. This can be useful to track down some problems, especially in cases
|
||||
// where fsnotify is used as an indirect dependency.
|
||||
//
|
||||
// Every event will be printed as soon as there's something useful to print,
|
||||
// with as little processing from fsnotify.
|
||||
//
|
||||
// Example output:
|
||||
//
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all files, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
b backend
|
||||
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
// Path to the file or directory.
|
||||
@@ -25,6 +157,16 @@ type Event struct {
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
|
||||
// Create events will have this set to the old path if it's a rename. This
|
||||
// only works when both the source and destination are watched. It's not
|
||||
// reliable when watching individual files, only directories.
|
||||
//
|
||||
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||
//
|
||||
// Event{Op: Rename, Name: "/tmp/file"}
|
||||
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||
renamedFrom string
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
@@ -33,34 +175,206 @@ type Op uint32
|
||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||
// full description, and check them with [Event.Has].
|
||||
const (
|
||||
// A new pathname was created.
|
||||
Create Op = 1 << iota
|
||||
|
||||
// The pathname was written to; this does *not* mean the write has finished,
|
||||
// and a write can be followed by more writes.
|
||||
Write
|
||||
|
||||
// The path was removed; any watches on it will be removed. Some "remove"
|
||||
// operations may trigger a Rename if the file is actually moved (for
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watches on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
// File attributes were changed.
|
||||
//
|
||||
// It's generally not recommended to take action on this event, as it may
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
|
||||
// File descriptor was opened.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableOpen
|
||||
|
||||
// File was read from.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableRead
|
||||
|
||||
// File opened for writing was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
//
|
||||
// The advantage of using this over Write is that it's more reliable than
|
||||
// waiting for Write events to stop. It's also faster (if you're not
|
||||
// listening to Write events): copying a file of a few GB can easily
|
||||
// generate tens of thousands of Write events in a short span of time.
|
||||
xUnportableCloseWrite
|
||||
|
||||
// File opened for reading was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableCloseRead
|
||||
)
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var (
|
||||
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||
// added.
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
|
||||
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||
// many events:
|
||||
//
|
||||
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||
// many queued events (the fs.inotify.max_queued_events
|
||||
// sysctl can be used to increase this).
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
|
||||
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||
// Unportable event that's not supported on this platform.
|
||||
//lint:ignore ST1012 not relevant
|
||||
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
ev, errs := make(chan Event, sz), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return w.b.Close() }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// The order is undefined, and may differ per call. Returns nil if
|
||||
// [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||
|
||||
// Supports reports if all the listed operations are supported by this platform.
|
||||
//
|
||||
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||
// return false for an Op starting with Unportable.
|
||||
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if op.Has(Create) {
|
||||
if o.Has(Create) {
|
||||
b.WriteString("|CREATE")
|
||||
}
|
||||
if op.Has(Remove) {
|
||||
if o.Has(Remove) {
|
||||
b.WriteString("|REMOVE")
|
||||
}
|
||||
if op.Has(Write) {
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if op.Has(Rename) {
|
||||
if o.Has(xUnportableOpen) {
|
||||
b.WriteString("|OPEN")
|
||||
}
|
||||
if o.Has(xUnportableRead) {
|
||||
b.WriteString("|READ")
|
||||
}
|
||||
if o.Has(xUnportableCloseWrite) {
|
||||
b.WriteString("|CLOSE_WRITE")
|
||||
}
|
||||
if o.Has(xUnportableCloseRead) {
|
||||
b.WriteString("|CLOSE_READ")
|
||||
}
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
if op.Has(Chmod) {
|
||||
if o.Has(Chmod) {
|
||||
b.WriteString("|CHMOD")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
@@ -70,12 +384,113 @@ func (op Op) String() string {
|
||||
}
|
||||
|
||||
// Has reports if this operation has the given operation.
|
||||
func (o Op) Has(h Op) bool { return o&h == h }
|
||||
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||
|
||||
// Has reports if this event has the given operation.
|
||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
if e.renamedFrom != "" {
|
||||
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||
}
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
backend interface {
|
||||
Add(string) error
|
||||
AddWith(string, ...addOpt) error
|
||||
Remove(string) error
|
||||
WatchList() []string
|
||||
Close() error
|
||||
xSupports(Op) bool
|
||||
}
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
op Op
|
||||
noFollow bool
|
||||
sendCreate bool
|
||||
}
|
||||
)
|
||||
|
||||
var debug = func() bool {
|
||||
// Check for exactly "1" (rather than mere existence) so we can add
|
||||
// options/flags in the future. I don't know if we ever want that, but it's
|
||||
// nice to leave the option open.
|
||||
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||
}()
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
op: Create | Write | Remove | Rename | Chmod,
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
if o != nil {
|
||||
o(&with)
|
||||
}
|
||||
}
|
||||
return with
|
||||
}
|
||||
|
||||
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||
//
|
||||
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||
//
|
||||
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||
// on all filesystems and should be enough for most applications, but if you
|
||||
// have a large burst of events it may not be enough. You can increase it if
|
||||
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||
//
|
||||
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// WithOps sets which operations to listen for. The default is [Create],
|
||||
// [Write], [Remove], [Rename], and [Chmod].
|
||||
//
|
||||
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||
// or Chmod operations per second.
|
||||
//
|
||||
// This can also be used to add unportable operations not supported by all
|
||||
// platforms; unportable operations all start with "Unportable":
|
||||
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||
// [UnportableCloseRead].
|
||||
//
|
||||
// AddWith returns an error when using an unportable operation that's not
|
||||
// supported. Use [Watcher.Support] to check for support.
|
||||
func withOps(op Op) addOpt {
|
||||
return func(opt *withOpts) { opt.op = op }
|
||||
}
|
||||
|
||||
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||
// watched.
|
||||
func withNoFollow() addOpt {
|
||||
return func(opt *withOpts) { opt.noFollow = true }
|
||||
}
|
||||
|
||||
// "Internal" option for recursive watches on inotify.
|
||||
func withCreate() addOpt {
|
||||
return func(opt *withOpts) { opt.sendCreate = true }
|
||||
}
|
||||
|
||||
var enableRecurse = false
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
path = filepath.Clean(path)
|
||||
if !enableRecurse { // Only enabled in tests for now.
|
||||
return path, false
|
||||
}
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
return path, false
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user