Merge pull request #50984 from timothysc/checkpoint

Automatic merge from submit-queue (batch tested with PRs 55812, 55752, 55447, 55848, 50984). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Initial basic bootstrap-checkpoint support

**What this PR does / why we need it**:
Adds initial support for Pod checkpointing to allow for controlled recovery of the control plane during self host failure conditions. 

fixes #49236
xref https://github.com/kubernetes/features/issues/378

**Special notes for your reviewer**:

Proposal is here: https://docs.google.com/document/d/1hhrCa_nv0Sg4O_zJYOnelE8a5ClieyewEsQM6c7-5-o/edit?ts=5988fba8#

1. Controlled tests work, but I have not tested the self hosted api-server recovery, that requires validation and logs.  /cc @luxas 
2. In adding hooks for checkpoint manager much of the tests around basicpodmanager appears to be stub'd.  This has become an anti-pattern in the code and should be avoided.  
3. I need a node-e2e to ensure consistency of behavior. 

**Release note**:
```
Add basic bootstrap checkpointing support to the kubelet for control plane recovery
```

/cc @kubernetes/sig-cluster-lifecycle-misc @kubernetes/sig-node-pr-reviews
This commit is contained in:
Kubernetes Submit Queue 2017-11-21 17:57:40 -08:00 committed by GitHub
commit 277d866111
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 918 additions and 101 deletions

147
Godeps/Godeps.json generated
View File

@ -847,6 +847,10 @@
"ImportPath": "github.com/daviddengcn/go-colortext",
"Rev": "511bcaf42ccd42c38aba7427b6673277bf19e2a1"
},
{
"ImportPath": "github.com/dchest/safefile",
"Rev": "855e8d98f1852d48dde521e0522408d1fe7e836a"
},
{
"ImportPath": "github.com/dgrijalva/jwt-go",
"Comment": "v3.0.0-4-g01aeca5",
@ -854,152 +858,152 @@
},
{
"ImportPath": "github.com/docker/distribution/digestset",
"Comment": "v2.6.0-rc.1-209-gedc3ab29",
"Comment": "v2.6.0-rc.1-209-gedc3ab2",
"Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
},
{
"ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.6.0-rc.1-209-gedc3ab29",
"Comment": "v2.6.0-rc.1-209-gedc3ab2",
"Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
},
{
"ImportPath": "github.com/docker/docker/api",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/blkiodev",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/container",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/events",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/filters",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/image",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/mount",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/network",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/registry",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/strslice",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/swarm",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/swarm/runtime",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/time",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/versions",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/api/types/volume",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/client",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/ioutils",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/jsonlog",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/jsonmessage",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/longpath",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/mount",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/stdcopy",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/symlink",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/system",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/term",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/term/windows",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
"ImportPath": "github.com/docker/docker/pkg/tlsconfig",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f",
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
},
{
@ -1024,7 +1028,7 @@
},
{
"ImportPath": "github.com/docker/libnetwork/ipvs",
"Comment": "v0.8.0-dev.2-910-gba46b928",
"Comment": "v0.8.0-dev.2-910-gba46b92",
"Rev": "ba46b928444931e6865d8618dc03622cac79aa6f"
},
{
@ -1151,127 +1155,127 @@
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity/command",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
@ -1783,6 +1787,7 @@
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Comment": "v1.0",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
@ -2167,77 +2172,77 @@
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/keys",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
"Comment": "v1.0.0-rc4-50-g4d6e6720",
"Comment": "v1.0.0-rc4-50-g4d6e672",
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
},
{

34
Godeps/LICENSES generated
View File

@ -29377,6 +29377,40 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/dchest/safefile licensed under: =
Copyright (c) 2013 Dmitry Chestnykh <dmitry@codingrobots.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/dchest/safefile/LICENSE 499ffd499356286ac590c21f7b8bd677
================================================================================
================================================================================
= vendor/github.com/dgrijalva/jwt-go licensed under: =

View File

@ -157,6 +157,9 @@ type KubeletFlags struct {
ExitOnLockContention bool
// seccompProfileRoot is the directory path for seccomp profiles.
SeccompProfileRoot string
// bootstrapCheckpointPath is the path to the directory containing pod checkpoints to
// run on restore
BootstrapCheckpointPath string
// DEPRECATED FLAGS
// minimumGCAge is the minimum age for a finished container before it is
@ -343,6 +346,7 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&f.LockFilePath, "lock-file", f.LockFilePath, "<Warning: Alpha feature> The path to file for kubelet to use as a lock file.")
fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.")
fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, "<Warning: Alpha feature> Directory path for seccomp profiles.")
fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, "<Warning: Alpha feature> Path to to the directory where the checkpoints are stored")
// DEPRECATED FLAGS
fs.DurationVar(&f.MinimumGCAge.Duration, "minimum-container-ttl-duration", f.MinimumGCAge.Duration, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'")

View File

@ -729,7 +729,8 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.
kubeFlags.NonMasqueradeCIDR,
kubeFlags.KeepTerminatedPodVolumes,
kubeFlags.NodeLabels,
kubeFlags.SeccompProfileRoot)
kubeFlags.SeccompProfileRoot,
kubeFlags.BootstrapCheckpointPath)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
@ -802,7 +803,8 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nonMasqueradeCIDR string,
keepTerminatedPodVolumes bool,
nodeLabels map[string]string,
seccompProfileRoot string) (k kubelet.Bootstrap, err error) {
seccompProfileRoot string,
bootstrapCheckpointPath string) (k kubelet.Bootstrap, err error) {
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
// up into "per source" synchronizations
@ -835,7 +837,8 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nonMasqueradeCIDR,
keepTerminatedPodVolumes,
nodeLabels,
seccompProfileRoot)
seccompProfileRoot,
bootstrapCheckpointPath)
if err != nil {
return nil, err
}

View File

@ -68,6 +68,10 @@ const (
// This annotation can be attached to node.
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
// BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by
// the kubelet prior to running
BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint"
// annotation key prefix used to identify non-convertible json paths.
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"

View File

@ -254,6 +254,7 @@ filegroup(
"//pkg/kubelet/apis:all-srcs",
"//pkg/kubelet/cadvisor:all-srcs",
"//pkg/kubelet/certificate:all-srcs",
"//pkg/kubelet/checkpoint:all-srcs",
"//pkg/kubelet/client:all-srcs",
"//pkg/kubelet/cm:all-srcs",
"//pkg/kubelet/config:all-srcs",

View File

@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["checkpoint.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/checkpoint",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/dchest/safefile:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["checkpoint_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/checkpoint",
library = ":go_default_library",
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,151 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package checkpoint
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"github.com/dchest/safefile"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
// Delimiter used on checkpoints written to disk
delimiter = "_"
podPrefix = "Pod"
)
// Manager is the interface used to manage checkpoints
// which involves writing resources to disk to recover
// during restart or failure scenarios.
// https://github.com/kubernetes/community/pull/1241/files
type Manager interface {
// LoadPods will load checkpointed Pods from disk
LoadPods() ([]*v1.Pod, error)
// WritePod will serialize a Pod to disk
WritePod(pod *v1.Pod) error
// Deletes the checkpoint of the given pod from disk
DeletePod(pod *v1.Pod) error
}
var instance Manager
var mutex = &sync.Mutex{}
// fileCheckPointManager - is a checkpointer that writes contents to disk
// The type information of the resource objects are encoded in the name
type fileCheckPointManager struct {
path string
}
// NewCheckpointManager will create a Manager that points to the following path
func NewCheckpointManager(path string) Manager {
// NOTE: This is a precaution; current implementation should not run
// multiple checkpoint managers.
mutex.Lock()
defer mutex.Unlock()
instance = &fileCheckPointManager{path: path}
return instance
}
// GetInstance will return the current Manager, there should be only one.
func GetInstance() Manager {
mutex.Lock()
defer mutex.Unlock()
return instance
}
// loadPod will load Pod Checkpoint yaml file.
func (fcp *fileCheckPointManager) loadPod(file string) (*v1.Pod, error) {
return util.LoadPodFromFile(file)
}
// checkAnnotations will validate the checkpoint annotations exist on the Pod
func (fcp *fileCheckPointManager) checkAnnotations(pod *v1.Pod) bool {
if podAnnotations := pod.GetAnnotations(); podAnnotations != nil {
if podAnnotations[core.BootstrapCheckpointAnnotationKey] == "true" {
return true
}
}
return false
}
// getPodPath returns the full qualified path for the pod checkpoint
func (fcp *fileCheckPointManager) getPodPath(pod *v1.Pod) string {
return fmt.Sprintf("%v/Pod%v%v.yaml", fcp.path, delimiter, pod.GetUID())
}
// LoadPods Loads All Checkpoints from disk
func (fcp *fileCheckPointManager) LoadPods() ([]*v1.Pod, error) {
checkpoints := make([]*v1.Pod, 0)
files, err := ioutil.ReadDir(fcp.path)
if err != nil {
return nil, err
}
for _, f := range files {
// get just the filename
_, fname := filepath.Split(f.Name())
// Get just the Resource from "Resource_Name"
fnfields := strings.Split(fname, delimiter)
switch fnfields[0] {
case podPrefix:
pod, err := fcp.loadPod(fmt.Sprintf("%s/%s", fcp.path, f.Name()))
if err != nil {
return nil, err
}
checkpoints = append(checkpoints, pod)
default:
glog.Warningf("Unsupported checkpoint file detected %v", f)
}
}
return checkpoints, nil
}
// Writes a checkpoint to a file on disk if annotation is present
func (fcp *fileCheckPointManager) WritePod(pod *v1.Pod) error {
var err error
if fcp.checkAnnotations(pod) {
if blob, err := yaml.Marshal(pod); err == nil {
err = safefile.WriteFile(fcp.getPodPath(pod), blob, 0644)
}
} else {
// This is to handle an edge where a pod update could remove
// an annotation and the checkpoint should then be removed.
err = fcp.DeletePod(pod)
}
return err
}
// Deletes a checkpoint from disk if present
func (fcp *fileCheckPointManager) DeletePod(pod *v1.Pod) error {
podPath := fcp.getPodPath(pod)
if err := os.Remove(podPath); !os.IsNotExist(err) {
return err
}
return nil
}

View File

@ -0,0 +1,120 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package checkpoint
import (
"io/ioutil"
"os"
"reflect"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/core"
)
// TestWriteLoadDeletePods validates all combinations of write, load, and delete
func TestWriteLoadDeletePods(t *testing.T) {
testPods := []struct {
pod *v1.Pod
written bool
}{
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "Foo",
Annotations: map[string]string{core.BootstrapCheckpointAnnotationKey: "true"},
UID: "1",
},
},
written: true,
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "Foo2",
Annotations: map[string]string{core.BootstrapCheckpointAnnotationKey: "true"},
UID: "2",
},
},
written: true,
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "Bar",
UID: "3",
},
},
written: false,
},
}
dir, err := ioutil.TempDir("", "checkpoint")
if err != nil {
t.Errorf("Failed to allocate temp directory for TestWriteLoadDeletePods error=%v", err)
}
defer os.RemoveAll(dir)
cp := NewCheckpointManager(dir)
for _, p := range testPods {
// Write pods should always pass unless there is an fs error
if err := cp.WritePod(p.pod); err != nil {
t.Errorf("Failed to Write Pod: %v", err)
}
}
// verify the correct written files are loaded from disk
pods, err := cp.LoadPods()
if err != nil {
t.Errorf("Failed to Load Pods: %v", err)
}
// loop through contents and check make sure
// what was loaded matched the expected results.
for _, p := range testPods {
pname := p.pod.GetName()
var lpod *v1.Pod
for _, check := range pods {
if check.GetName() == pname {
lpod = check
break
}
}
if p.written {
if lpod != nil {
if !reflect.DeepEqual(p.pod, lpod) {
t.Errorf("expected %#v, \ngot %#v", p.pod, lpod)
}
} else {
t.Errorf("Got unexpected result for %v, should have been loaded", pname)
}
} else if lpod != nil {
t.Errorf("Got unexpected result for %v, should not have been loaded", pname)
}
err = cp.DeletePod(p.pod)
if err != nil {
t.Errorf("Failed to delete pod %v", pname)
}
}
// finally validate the contents of the directory is empty.
files, err := ioutil.ReadDir(dir)
if err != nil {
t.Errorf("Failed to read directory %v", dir)
}
if len(files) > 0 {
t.Errorf("Directory %v should be empty but found %#v", dir, files)
}
}

View File

@ -1,10 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -26,6 +20,7 @@ go_library(
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/kubelet/config",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core:go_default_library",
@ -33,6 +28,7 @@ go_library(
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/core/v1:go_default_library",
"//pkg/apis/core/validation:go_default_library",
"//pkg/kubelet/checkpoint:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/types:go_default_library",
@ -112,4 +108,5 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/checkpoint"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -61,8 +62,9 @@ type PodConfig struct {
updates chan kubetypes.PodUpdate
// contains the list of all configured sources
sourcesLock sync.Mutex
sources sets.String
sourcesLock sync.Mutex
sources sets.String
checkpointManager checkpoint.Manager
}
// NewPodConfig creates an object that can merge many configuration sources into a stream
@ -108,6 +110,19 @@ func (c *PodConfig) Sync() {
c.pods.Sync()
}
// Restore restores pods from the checkpoint path, *once*
func (c *PodConfig) Restore(path string, updates chan<- interface{}) error {
var err error
if c.checkpointManager == nil {
c.checkpointManager = checkpoint.NewCheckpointManager(path)
pods, err := c.checkpointManager.LoadPods()
if err == nil {
updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.RESTORE, Source: kubetypes.ApiserverSource}
}
}
return err
}
// podStorage manages the current pod state at any point in time and ensures updates
// to the channel are delivered in order. Note that this object is an in-memory source of
// "truth" and on creation contains zero entries. Once all previously read sources are
@ -152,7 +167,7 @@ func (s *podStorage) Merge(source string, change interface{}) error {
defer s.updateLock.Unlock()
seenBefore := s.sourcesSeen.Has(source)
adds, updates, deletes, removes, reconciles := s.merge(source, change)
adds, updates, deletes, removes, reconciles, restores := s.merge(source, change)
firstSet := !seenBefore && s.sourcesSeen.Has(source)
// deliver update notifications
@ -170,6 +185,9 @@ func (s *podStorage) Merge(source string, change interface{}) error {
if len(deletes.Pods) > 0 {
s.updates <- *deletes
}
if len(restores.Pods) > 0 {
s.updates <- *restores
}
if firstSet && len(adds.Pods) == 0 && len(updates.Pods) == 0 && len(deletes.Pods) == 0 {
// Send an empty update when first seeing the source and there are
// no ADD or UPDATE or DELETE pods from the source. This signals kubelet that
@ -206,7 +224,7 @@ func (s *podStorage) Merge(source string, change interface{}) error {
return nil
}
func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes, removes, reconciles *kubetypes.PodUpdate) {
func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes, removes, reconciles, restores *kubetypes.PodUpdate) {
s.podLock.Lock()
defer s.podLock.Unlock()
@ -215,6 +233,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
deletePods := []*v1.Pod{}
removePods := []*v1.Pod{}
reconcilePods := []*v1.Pod{}
restorePods := []*v1.Pod{}
pods := s.pods[source]
if pods == nil {
@ -287,6 +306,8 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
removePods = append(removePods, existing)
}
}
case kubetypes.RESTORE:
glog.V(4).Infof("Restoring pods for source %s", source)
default:
glog.Warningf("Received invalid update type: %v", update)
@ -300,8 +321,9 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
deletes = &kubetypes.PodUpdate{Op: kubetypes.DELETE, Pods: copyPods(deletePods), Source: source}
removes = &kubetypes.PodUpdate{Op: kubetypes.REMOVE, Pods: copyPods(removePods), Source: source}
reconciles = &kubetypes.PodUpdate{Op: kubetypes.RECONCILE, Pods: copyPods(reconcilePods), Source: source}
restores = &kubetypes.PodUpdate{Op: kubetypes.RESTORE, Pods: copyPods(restorePods), Source: source}
return adds, updates, deletes, removes, reconciles
return adds, updates, deletes, removes, reconciles, restores
}
func (s *podStorage) markSourceSet(source string) {

View File

@ -218,7 +218,8 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nonMasqueradeCIDR string,
keepTerminatedPodVolumes bool,
nodeLabels map[string]string,
seccompProfileRoot string) (Bootstrap, error)
seccompProfileRoot string,
bootstrapCheckpointPath string) (Bootstrap, error)
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
@ -271,7 +272,7 @@ type Dependencies struct {
// makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) {
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, bootstrapCheckpointPath string) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header)
if len(kubeCfg.ManifestURLHeader) > 0 {
for k, v := range kubeCfg.ManifestURLHeader {
@ -286,7 +287,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
// define file config source
if kubeCfg.PodManifestPath != "" {
glog.Infof("Adding manifest file: %v", kubeCfg.PodManifestPath)
glog.Infof("Adding manifest path: %v", kubeCfg.PodManifestPath)
config.NewSourceFile(kubeCfg.PodManifestPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource))
}
@ -295,9 +296,22 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
glog.Infof("Adding manifest url %q with HTTP header %v", kubeCfg.ManifestURL, manifestURLHeader)
config.NewSourceURL(kubeCfg.ManifestURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource))
}
// Restore from the checkpoint path
// NOTE: This MUST happen before creating the apiserver source
// below, or the checkpoint would override the source of truth.
updatechannel := cfg.Channel(kubetypes.ApiserverSource)
if bootstrapCheckpointPath != "" {
glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath)
err := cfg.Restore(bootstrapCheckpointPath, updatechannel)
if err != nil {
return nil, err
}
}
if kubeDeps.KubeClient != nil {
glog.Infof("Watching apiserver")
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, cfg.Channel(kubetypes.ApiserverSource))
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, updatechannel)
}
return cfg, nil
}
@ -345,7 +359,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nonMasqueradeCIDR string,
keepTerminatedPodVolumes bool,
nodeLabels map[string]string,
seccompProfileRoot string) (*Kubelet, error) {
seccompProfileRoot string,
bootstrapCheckpointPath string) (*Kubelet, error) {
if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
}
@ -406,7 +421,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
if kubeDeps.PodConfig == nil {
var err error
kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName)
kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName, bootstrapCheckpointPath)
if err != nil {
return nil, err
}
@ -1837,17 +1852,28 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
glog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods))
// DELETE is treated as a UPDATE because of graceful deletion.
handler.HandlePodUpdates(u.Pods)
case kubetypes.RESTORE:
glog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods))
// These are pods restored from the checkpoint. Treat them as new
// pods.
handler.HandlePodAdditions(u.Pods)
case kubetypes.SET:
// TODO: Do we want to support this?
glog.Errorf("Kubelet does not support snapshot update")
}
// Mark the source ready after receiving at least one update from the
// source. Once all the sources are marked ready, various cleanup
// routines will start reclaiming resources. It is important that this
// takes place only after kubelet calls the update handler to process
// the update to ensure the internal pod cache is up-to-date.
kl.sourcesReady.AddSource(u.Source)
if u.Op != kubetypes.RESTORE {
// If the update type is RESTORE, it means that the update is from
// the pod checkpoints and may be incomplete. Do not mark the
// source as ready.
// Mark the source ready after receiving at least one update from the
// source. Once all the sources are marked ready, various cleanup
// routines will start reclaiming resources. It is important that this
// takes place only after kubelet calls the update handler to process
// the update to ensure the internal pod cache is up-to-date.
kl.sourcesReady.AddSource(u.Source)
}
case e := <-plegCh:
if isSyncPodWorthy(e) {
// PLEG event for a pod; sync it.

View File

@ -14,6 +14,7 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/kubelet/pod",
deps = [
"//pkg/kubelet/checkpoint:go_default_library",
"//pkg/kubelet/configmap:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/secret:go_default_library",

View File

@ -19,8 +19,11 @@ package pod
import (
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/kubelet/checkpoint"
"k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/secret"
@ -116,8 +119,9 @@ type basicManager struct {
translationByUID map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID
// basicManager is keeping secretManager and configMapManager up-to-date.
secretManager secret.Manager
configMapManager configmap.Manager
secretManager secret.Manager
configMapManager configmap.Manager
checkpointManager checkpoint.Manager
// A mirror pod client to create/delete mirror pods.
MirrorClient
@ -128,6 +132,7 @@ func NewBasicPodManager(client MirrorClient, secretManager secret.Manager, confi
pm := &basicManager{}
pm.secretManager = secretManager
pm.configMapManager = configMapManager
pm.checkpointManager = checkpoint.GetInstance()
pm.MirrorClient = client
pm.SetPods(nil)
return pm
@ -155,6 +160,11 @@ func (pm *basicManager) UpdatePod(pod *v1.Pod) {
pm.lock.Lock()
defer pm.lock.Unlock()
pm.updatePodsInternal(pod)
if pm.checkpointManager != nil {
if err := pm.checkpointManager.WritePod(pod); err != nil {
glog.Errorf("Error writing checkpoint for pod: %v", pod.GetName())
}
}
}
// updatePodsInternal replaces the given pods in the current state of the
@ -213,6 +223,11 @@ func (pm *basicManager) DeletePod(pod *v1.Pod) {
delete(pm.podByUID, kubetypes.ResolvedPodUID(pod.UID))
delete(pm.podByFullName, podFullName)
}
if pm.checkpointManager != nil {
if err := pm.checkpointManager.DeletePod(pod); err != nil {
glog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName())
}
}
}
func (pm *basicManager) GetPods() []*v1.Pod {

View File

@ -49,6 +49,8 @@ const (
// Pods with the given ids have unexpected status in this source,
// kubelet should reconcile status with this source
RECONCILE
// Pods with the given ids have been restored from a checkpoint.
RESTORE
// These constants identify the sources of pods
// Updates from a file

View File

@ -233,7 +233,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
}
pod := &v1.Pod{}
codec := legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion)
codec := legacyscheme.Codecs.UniversalDecoder()
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
return nil, fmt.Errorf("failed decoding file: %v", err)
}

1
vendor/BUILD vendored
View File

@ -125,6 +125,7 @@ filegroup(
"//vendor/github.com/d2g/dhcp4client:all-srcs",
"//vendor/github.com/davecgh/go-spew/spew:all-srcs",
"//vendor/github.com/daviddengcn/go-colortext:all-srcs",
"//vendor/github.com/dchest/safefile:all-srcs",
"//vendor/github.com/dgrijalva/jwt-go:all-srcs",
"//vendor/github.com/docker/distribution/digestset:all-srcs",
"//vendor/github.com/docker/distribution/reference:all-srcs",

8
vendor/github.com/dchest/safefile/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- tip

30
vendor/github.com/dchest/safefile/BUILD generated vendored Normal file
View File

@ -0,0 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"rename.go",
"safefile.go",
] + select({
"@io_bazel_rules_go//go/platform:windows_amd64": [
"rename_nonatomic.go",
],
"//conditions:default": [],
}),
importpath = "github.com/dchest/safefile",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

26
vendor/github.com/dchest/safefile/LICENSE generated vendored Normal file
View File

@ -0,0 +1,26 @@
Copyright (c) 2013 Dmitry Chestnykh <dmitry@codingrobots.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

44
vendor/github.com/dchest/safefile/README.md generated vendored Normal file
View File

@ -0,0 +1,44 @@
# safefile
[![Build Status](https://travis-ci.org/dchest/safefile.svg)](https://travis-ci.org/dchest/safefile) [![Windows Build status](https://ci.appveyor.com/api/projects/status/owlifxeekg75t2ho?svg=true)](https://ci.appveyor.com/project/dchest/safefile)
Go package safefile implements safe "atomic" saving of files.
Instead of truncating and overwriting the destination file, it creates a
temporary file in the same directory, writes to it, and then renames the
temporary file to the original name when calling Commit.
### Installation
```
$ go get github.com/dchest/safefile
```
### Documentation
<https://godoc.org/github.com/dchest/safefile>
### Example
```go
f, err := safefile.Create("/home/ken/report.txt", 0644)
if err != nil {
// ...
}
// Created temporary file /home/ken/sf-ppcyksu5hyw2mfec.tmp
defer f.Close()
_, err = io.WriteString(f, "Hello world")
if err != nil {
// ...
}
// Wrote "Hello world" to /home/ken/sf-ppcyksu5hyw2mfec.tmp
err = f.Commit()
if err != nil {
// ...
}
// Renamed /home/ken/sf-ppcyksu5hyw2mfec.tmp to /home/ken/report.txt
```

24
vendor/github.com/dchest/safefile/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,24 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\projects\src\github.com\dchest\safefile
environment:
PATH: c:\projects\bin;%PATH%
GOPATH: c:\projects
NOTIFY_TIMEOUT: 5s
install:
- go version
- go get golang.org/x/tools/cmd/vet
- go get -v -t ./...
build_script:
- go tool vet -all .
- go build ./...
- go test -v -race ./...
test: off
deploy: off

9
vendor/github.com/dchest/safefile/rename.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build !plan9,!windows windows,go1.5
package safefile
import "os"
func rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}

51
vendor/github.com/dchest/safefile/rename_nonatomic.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// +build plan9 windows,!go1.5
// os.Rename on Windows before Go 1.5 and Plan 9 will not overwrite existing
// files, thus we cannot guarantee atomic saving of file by doing rename.
// We will have to do some voodoo to minimize data loss on those systems.
package safefile
import (
"os"
"path/filepath"
)
func rename(oldname, newname string) error {
err := os.Rename(oldname, newname)
if err != nil {
// If newname exists ("original"), we will try renaming it to a
// new temporary name, then renaming oldname to the newname,
// and deleting the renamed original. If system crashes between
// renaming and deleting, the original file will still be available
// under the temporary name, so users can manually recover data.
// (No automatic recovery is possible because after crash the
// temporary name is not known.)
var origtmp string
for {
origtmp, err = makeTempName(newname, filepath.Base(newname))
if err != nil {
return err
}
_, err = os.Stat(origtmp)
if err == nil {
continue // most likely will never happen
}
break
}
err = os.Rename(newname, origtmp)
if err != nil {
return err
}
err = os.Rename(oldname, newname)
if err != nil {
// Rename still fails, try to revert original rename,
// ignoring errors.
os.Rename(origtmp, newname)
return err
}
// Rename succeeded, now delete original file.
os.Remove(origtmp)
}
return nil
}

197
vendor/github.com/dchest/safefile/safefile.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
// Copyright 2013 Dmitry Chestnykh. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package safefile implements safe "atomic" saving of files.
//
// Instead of truncating and overwriting the destination file, it creates a
// temporary file in the same directory, writes to it, and then renames the
// temporary file to the original name when calling Commit.
//
// Example:
//
// f, err := safefile.Create("/home/ken/report.txt", 0644)
// if err != nil {
// // ...
// }
// // Created temporary file /home/ken/sf-ppcyksu5hyw2mfec.tmp
//
// defer f.Close()
//
// _, err = io.WriteString(f, "Hello world")
// if err != nil {
// // ...
// }
// // Wrote "Hello world" to /home/ken/sf-ppcyksu5hyw2mfec.tmp
//
// err = f.Commit()
// if err != nil {
// // ...
// }
// // Renamed /home/ken/sf-ppcyksu5hyw2mfec.tmp to /home/ken/report.txt
//
package safefile
import (
"crypto/rand"
"encoding/base32"
"errors"
"io"
"os"
"path/filepath"
"strings"
)
// ErrAlreadyCommitted error is returned when calling Commit on a file that
// has been already successfully committed.
var ErrAlreadyCommitted = errors.New("file already committed")
type File struct {
*os.File
origName string
closeFunc func(*File) error
isClosed bool // if true, temporary file has been closed, but not renamed
isCommitted bool // if true, the file has been successfully committed
}
func makeTempName(origname, prefix string) (tempname string, err error) {
origname = filepath.Clean(origname)
if len(origname) == 0 || origname[len(origname)-1] == filepath.Separator {
return "", os.ErrInvalid
}
// Generate 10 random bytes.
// This gives 80 bits of entropy, good enough
// for making temporary file name unpredictable.
var rnd [10]byte
if _, err := rand.Read(rnd[:]); err != nil {
return "", err
}
name := prefix + "-" + strings.ToLower(base32.StdEncoding.EncodeToString(rnd[:])) + ".tmp"
return filepath.Join(filepath.Dir(origname), name), nil
}
// Create creates a temporary file in the same directory as filename,
// which will be renamed to the given filename when calling Commit.
func Create(filename string, perm os.FileMode) (*File, error) {
for {
tempname, err := makeTempName(filename, "sf")
if err != nil {
return nil, err
}
f, err := os.OpenFile(tempname, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
if err != nil {
if os.IsExist(err) {
continue
}
return nil, err
}
return &File{
File: f,
origName: filename,
closeFunc: closeUncommitted,
}, nil
}
}
// OrigName returns the original filename given to Create.
func (f *File) OrigName() string {
return f.origName
}
// Close closes temporary file and removes it.
// If the file has been committed, Close is no-op.
func (f *File) Close() error {
return f.closeFunc(f)
}
func closeUncommitted(f *File) error {
err0 := f.File.Close()
err1 := os.Remove(f.Name())
f.closeFunc = closeAgainError
if err0 != nil {
return err0
}
return err1
}
func closeAfterFailedRename(f *File) error {
// Remove temporary file.
//
// The note from Commit function applies here too, as we may be
// removing a different file. However, since we rely on our temporary
// names being unpredictable, this should not be a concern.
f.closeFunc = closeAgainError
return os.Remove(f.Name())
}
func closeCommitted(f *File) error {
// noop
return nil
}
func closeAgainError(f *File) error {
return os.ErrInvalid
}
// Commit safely commits data into the original file by syncing temporary
// file to disk, closing it and renaming to the original file name.
//
// In case of success, the temporary file is closed and no longer exists
// on disk. It is safe to call Close after Commit: the operation will do
// nothing.
//
// In case of error, the temporary file is still opened and exists on disk;
// it must be closed by callers by calling Close or by trying to commit again.
// Note that when trying to Commit again after a failed Commit when the file
// has been closed, but not renamed to its original name (the new commit will
// try again to rename it), safefile cannot guarantee that the temporary file
// has not been changed, or that it is the same temporary file we were dealing
// with. However, since the temporary name is unpredictable, it is unlikely
// that this happened accidentally. If complete atomicity is needed, do not
// Commit again after error, write the file again.
func (f *File) Commit() error {
if f.isCommitted {
return ErrAlreadyCommitted
}
if !f.isClosed {
// Sync to disk.
err := f.Sync()
if err != nil {
return err
}
// Close underlying os.File.
err = f.File.Close()
if err != nil {
return err
}
f.isClosed = true
}
// Rename.
err := rename(f.Name(), f.origName)
if err != nil {
f.closeFunc = closeAfterFailedRename
return err
}
f.closeFunc = closeCommitted
f.isCommitted = true
return nil
}
// WriteFile is a safe analog of ioutil.WriteFile.
func WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := Create(filename, perm)
if err != nil {
return err
}
defer f.Close()
n, err := f.Write(data)
if err != nil {
return err
}
if err == nil && n < len(data) {
err = io.ErrShortWrite
return err
}
return f.Commit()
}