Merge pull request #40727 from Random-Liu/handle-cri-in-place-upgrade

Automatic merge from submit-queue

CRI: Handle cri in-place upgrade

Fixes https://github.com/kubernetes/kubernetes/issues/40051.

## How does this PR restart/remove legacy containers/sandboxes?
With this PR, dockershim will convert and return legacy containers and infra containers as regular containers/sandboxes. Then we can rely on the SyncPod logic to stop the legacy containers/sandboxes, and the garbage collector to remove the legacy containers/sandboxes.

To forcibly trigger restart:
* For infra containers, we manually set `hostNetwork` to opposite value to trigger a restart (See [here](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kuberuntime/kuberuntime_manager.go#L389))
* For application containers, they will be restarted with the infra container.
## How does this PR avoid extra overhead when there is no legacy container/sandbox?
For the lack of some labels, listing legacy containers needs extra `docker ps`. We should not introduce constant performance regression for legacy container cleanup. So we added the `legacyCleanupFlag`:
* In `ListContainers` and `ListPodSandbox`, only do extra `ListLegacyContainers` and `ListLegacyPodSandbox` when `legacyCleanupFlag` is `NotDone`.
* When dockershim starts, it will check whether there are legacy containers/sandboxes.
  * If there are none, it will mark `legacyCleanupFlag` as `Done`.
  * If there are any, it will leave `legacyCleanupFlag` as `NotDone`, and start a goroutine periodically check whether legacy cleanup is done.
This makes sure that there is overhead only when there are legacy containers/sandboxes not cleaned up yet.

## Caveats
* In-place upgrade will cause kubelet to restart all running containers.
* RestartNever container will not be restarted.
* Garbage collector sometimes keep the legacy containers for a long time if there aren't too many containers on the node. In that case, dockershim will keep performing extra `docker ps` which introduces overhead.
  * Manually remove all legacy containers will fix this.
  * Should we garbage collect legacy containers/sandboxes in dockershim by ourselves? /cc @yujuhong 
* Host port will not be reclaimed for the lack of checkpoint for legacy sandboxes. https://github.com/kubernetes/kubernetes/pull/39903 /cc @freehan 

/cc @yujuhong @feiskyer @dchen1107 @kubernetes/sig-node-api-reviews 
**Release note**:

```release-note
We should mention the caveats of in-place upgrade in release note.
```
This commit is contained in:
Kubernetes Submit Queue 2017-02-03 22:17:56 -08:00 committed by GitHub
commit 4796c7b409
9 changed files with 629 additions and 6 deletions

View File

@ -17,6 +17,7 @@ go_library(
"docker_checkpoint.go",
"docker_container.go",
"docker_image.go",
"docker_legacy.go",
"docker_sandbox.go",
"docker_service.go",
"docker_streaming.go",
@ -53,6 +54,8 @@ go_library(
"//vendor:github.com/docker/go-connections/nat",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
],
)
@ -64,6 +67,7 @@ go_test(
"docker_checkpoint_test.go",
"docker_container_test.go",
"docker_image_test.go",
"docker_legacy_test.go",
"docker_sandbox_test.go",
"docker_service_test.go",
"helpers_test.go",

View File

@ -75,6 +75,15 @@ func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*
result = append(result, converted)
}
// Include legacy containers if there are still legacy containers not cleaned up yet.
if !ds.legacyCleanup.Done() {
legacyContainers, err := ds.ListLegacyContainers(filter)
if err != nil {
return nil, err
}
// Legacy containers are always older, so we can safely append them to the end.
result = append(result, legacyContainers...)
}
return result, nil
}
@ -360,6 +369,15 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.Contai
ct, st, ft := createdAt.UnixNano(), startedAt.UnixNano(), finishedAt.UnixNano()
exitCode := int32(r.State.ExitCode)
// If the container has no containerTypeLabelKey label, treat it as a legacy container.
if _, ok := r.Config.Labels[containerTypeLabelKey]; !ok {
names, labels, err := convertLegacyNameAndLabels([]string{r.Name}, r.Config.Labels)
if err != nil {
return nil, err
}
r.Name, r.Config.Labels = names[0], labels
}
metadata, err := parseContainerName(r.Name)
if err != nil {
return nil, err

View File

@ -0,0 +1,284 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"strings"
"sync/atomic"
"time"
dockertypes "github.com/docker/engine-api/types"
dockerfilters "github.com/docker/engine-api/types/filters"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/leaky"
)
// These are labels used by kuberuntime. Ideally, we should not rely on kuberuntime implementation
// detail in dockershim. However, we need these labels for legacy container (containers created by
// kubernetes 1.4 and 1.5) support.
// TODO(random-liu): Remove this file and related code in kubernetes 1.8.
const (
podDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
containerHashLabel = "io.kubernetes.container.hash"
containerRestartCountLabel = "io.kubernetes.container.restartCount"
containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
containerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
containerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
containerPortsLabel = "io.kubernetes.container.ports"
)
// NOTE that we can't handle the following dockershim internal labels, so they will be empty:
// * containerLogPathLabelKey ("io.kubernetes.container.logpath"): RemoveContainer will ignore
// the label if it's empty.
// * sandboxIDLabelKey ("io.kubernetes.sandbox.id"): This is used in 2 places:
// * filter.PodSandboxId: The filter is not used in kuberuntime now.
// * runtimeapi.Container.PodSandboxId: toRuntimeAPIContainer retrieves PodSandboxId from the
// label. The field is used in kuberuntime sandbox garbage collection. Missing this may cause
// pod sandbox to be removed before its containers are removed.
// convertLegacyNameAndLabels converts legacy name and labels into dockershim name and labels.
// The function can be used to either legacy infra container or regular container.
// NOTE that legacy infra container doesn't have restart count label, so the returned attempt for
// sandbox will always be 0. The sandbox attempt is only used to generate new sandbox name, and
// there is no naming conflict between legacy and new containers/sandboxes, so it should be fine.
func convertLegacyNameAndLabels(names []string, labels map[string]string) ([]string, map[string]string, error) {
if len(names) == 0 {
return nil, nil, fmt.Errorf("unexpected empty name")
}
// Generate new dockershim name.
m, _, err := dockertools.ParseDockerName(names[0])
if err != nil {
return nil, nil, err
}
sandboxName, sandboxNamespace, err := kubecontainer.ParsePodFullName(m.PodFullName)
if err != nil {
return nil, nil, err
}
newNames := []string{strings.Join([]string{
kubePrefix, // 0
m.ContainerName, // 1: container name
sandboxName, // 2: sandbox name
sandboxNamespace, // 3: sandbox namesapce
string(m.PodUID), // 4: sandbox uid
labels[containerRestartCountLabel], // 5
}, nameDelimiter)}
// Generate new labels.
legacyAnnotations := sets.NewString(
containerHashLabel,
containerRestartCountLabel,
containerTerminationMessagePathLabel,
containerTerminationMessagePolicyLabel,
containerPreStopHandlerLabel,
containerPortsLabel,
)
newLabels := map[string]string{}
for k, v := range labels {
if legacyAnnotations.Has(k) {
// Add annotation prefix for all legacy labels which should be annotations in dockershim.
newLabels[fmt.Sprintf("%s%s", annotationPrefix, k)] = v
} else {
newLabels[k] = v
}
}
// Add containerTypeLabelKey indicating the container is sandbox or application container.
if m.ContainerName == leaky.PodInfraContainerName {
newLabels[containerTypeLabelKey] = containerTypeLabelSandbox
} else {
newLabels[containerTypeLabelKey] = containerTypeLabelContainer
}
return newNames, newLabels, nil
}
// legacyCleanupCheckInterval is the interval legacyCleanupCheck is performed.
const legacyCleanupCheckInterval = 10 * time.Second
// LegacyCleanupInit initializes the legacy cleanup flag. If necessary, it will starts a goroutine
// which periodically checks legacy cleanup until it finishes.
func (ds *dockerService) LegacyCleanupInit() {
// If there is no legacy container/sandbox, just return.
if clean, _ := ds.checkLegacyCleanup(); clean {
return
}
// Or else start the cleanup routine.
go wait.PollInfinite(legacyCleanupCheckInterval, ds.checkLegacyCleanup)
}
// checkLegacyCleanup lists legacy containers/sandboxes, if no legacy containers/sandboxes are found,
// mark the legacy cleanup flag as done.
func (ds *dockerService) checkLegacyCleanup() (bool, error) {
// Always do legacy cleanup when list fails.
sandboxes, err := ds.ListLegacyPodSandbox(nil)
if err != nil {
glog.Errorf("Failed to list legacy pod sandboxes: %v", err)
return false, nil
}
containers, err := ds.ListLegacyContainers(nil)
if err != nil {
glog.Errorf("Failed to list legacy containers: %v", err)
return false, nil
}
if len(sandboxes) != 0 || len(containers) != 0 {
glog.V(4).Infof("Found legacy sandboxes %+v, legacy containers %+v, continue legacy cleanup",
sandboxes, containers)
return false, nil
}
ds.legacyCleanup.MarkDone()
glog.V(2).Infof("No legacy containers found, stop performing legacy cleanup.")
return true, nil
}
// ListLegacyPodSandbox only lists all legacy pod sandboxes.
func (ds *dockerService) ListLegacyPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
// By default, list all containers whether they are running or not.
opts := dockertypes.ContainerListOptions{All: true, Filter: dockerfilters.NewArgs()}
filterOutReadySandboxes := false
f := newDockerFilter(&opts.Filter)
if filter != nil {
if filter.Id != "" {
f.Add("id", filter.Id)
}
if filter.State != nil {
if filter.GetState().State == runtimeapi.PodSandboxState_SANDBOX_READY {
// Only list running containers.
opts.All = false
} else {
// runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the
// container is in any of the non-running state (e.g., created,
// exited). We can't tell docker to filter out running
// containers directly, so we'll need to filter them out
// ourselves after getting the results.
filterOutReadySandboxes = true
}
}
if filter.LabelSelector != nil {
for k, v := range filter.LabelSelector {
f.AddLabel(k, v)
}
}
}
containers, err := ds.client.ListContainers(opts)
if err != nil {
return nil, err
}
// Convert docker containers to runtime api sandboxes.
result := []*runtimeapi.PodSandbox{}
for i := range containers {
c := containers[i]
// Skip new containers with containerTypeLabelKey label.
if _, ok := c.Labels[containerTypeLabelKey]; ok {
continue
}
// If the container has no containerTypeLabelKey label, treat it as a legacy container.
c.Names, c.Labels, err = convertLegacyNameAndLabels(c.Names, c.Labels)
if err != nil {
glog.V(4).Infof("Unable to convert legacy container %+v: %v", c, err)
continue
}
if c.Labels[containerTypeLabelKey] != containerTypeLabelSandbox {
continue
}
converted, err := containerToRuntimeAPISandbox(&c)
if err != nil {
glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err)
continue
}
if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}
result = append(result, converted)
}
return result, nil
}
// ListLegacyPodSandbox only lists all legacy containers.
func (ds *dockerService) ListLegacyContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
opts := dockertypes.ContainerListOptions{All: true, Filter: dockerfilters.NewArgs()}
f := newDockerFilter(&opts.Filter)
if filter != nil {
if filter.Id != "" {
f.Add("id", filter.Id)
}
if filter.State != nil {
f.Add("status", toDockerContainerStatus(filter.GetState().State))
}
// NOTE: Legacy container doesn't have sandboxIDLabelKey label, so we can't filter with
// it. Fortunately, PodSandboxId is not used in kuberuntime now.
if filter.LabelSelector != nil {
for k, v := range filter.LabelSelector {
f.AddLabel(k, v)
}
}
}
containers, err := ds.client.ListContainers(opts)
if err != nil {
return nil, err
}
// Convert docker to runtime api containers.
result := []*runtimeapi.Container{}
for i := range containers {
c := containers[i]
// Skip new containers with containerTypeLabelKey label.
if _, ok := c.Labels[containerTypeLabelKey]; ok {
continue
}
// If the container has no containerTypeLabelKey label, treat it as a legacy container.
c.Names, c.Labels, err = convertLegacyNameAndLabels(c.Names, c.Labels)
if err != nil {
glog.V(4).Infof("Unable to convert legacy container %+v: %v", c, err)
continue
}
if c.Labels[containerTypeLabelKey] != containerTypeLabelContainer {
continue
}
converted, err := toRuntimeAPIContainer(&c)
if err != nil {
glog.V(4).Infof("Unable to convert docker container to runtime API container %+v: %v", c, err)
continue
}
result = append(result, converted)
}
return result, nil
}
// legacyCleanupFlag is the flag indicating whether legacy cleanup is done.
type legacyCleanupFlag struct {
done int32
}
// Done checks whether legacy cleanup is done.
func (f *legacyCleanupFlag) Done() bool {
return atomic.LoadInt32(&f.done) == 1
}
// MarkDone sets legacy cleanup as done.
func (f *legacyCleanupFlag) MarkDone() {
atomic.StoreInt32(&f.done, 1)
}

View File

@ -0,0 +1,266 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"testing"
dockercontainer "github.com/docker/engine-api/types/container"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/types"
)
func TestConvertLegacyNameAndLabels(t *testing.T) {
for desc, test := range map[string]struct {
names []string
labels map[string]string
expectNames []string
expectLabels map[string]string
expectError bool
}{
"legacy infra container": {
names: []string{"k8s_POD.hash1_podname_podnamespace_poduid_randomid"},
labels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "POD",
containerHashLabel: "hash",
containerRestartCountLabel: "0",
},
expectNames: []string{"k8s_POD_podname_podnamespace_poduid_0"},
expectLabels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "POD",
annotationPrefix + containerHashLabel: "hash",
annotationPrefix + containerRestartCountLabel: "0",
containerTypeLabelKey: containerTypeLabelSandbox,
},
},
"legacy application container": {
names: []string{"k8s_containername.hash_podname_podnamespace_poduid_randomid"},
labels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "containername",
containerHashLabel: "hash",
containerRestartCountLabel: "5",
containerTerminationMessagePathLabel: "terminationmessagepath",
containerTerminationMessagePolicyLabel: "terminationmessagepolicy",
containerPreStopHandlerLabel: "prestophandler",
containerPortsLabel: "ports",
},
expectNames: []string{"k8s_containername_podname_podnamespace_poduid_5"},
expectLabels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "containername",
annotationPrefix + containerHashLabel: "hash",
annotationPrefix + containerRestartCountLabel: "5",
annotationPrefix + containerTerminationMessagePathLabel: "terminationmessagepath",
annotationPrefix + containerTerminationMessagePolicyLabel: "terminationmessagepolicy",
annotationPrefix + containerPreStopHandlerLabel: "prestophandler",
annotationPrefix + containerPortsLabel: "ports",
containerTypeLabelKey: containerTypeLabelContainer,
},
expectError: false,
},
"invalid sandbox name": {
names: []string{"POD_podname_podnamespace_poduid_0"},
expectError: true,
},
"invalid dockershim container": {
names: []string{"containername_podname_podnamespace_poduid_5"},
expectError: true,
},
} {
t.Logf("TestCase %q", desc)
names, labels, err := convertLegacyNameAndLabels(test.names, test.labels)
require.Equal(t, test.expectError, err != nil)
assert.Equal(t, test.expectNames, names)
assert.Equal(t, test.expectLabels, labels)
}
}
// getFakeLegacyContainers returns a list of fake legacy containers.
func getFakeLegacyContainers() []*dockertools.FakeContainer {
return []*dockertools.FakeContainer{
{
ID: "12",
Name: "k8s_POD.hash1_podname_podnamespace_poduid_randomid",
Config: &dockercontainer.Config{
Labels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "POD",
containerHashLabel: "hash1",
containerRestartCountLabel: "0",
},
},
},
{
ID: "34",
Name: "k8s_legacycontainer.hash2_podname_podnamespace_poduid_randomid",
Config: &dockercontainer.Config{
Labels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "legacyContainer",
containerHashLabel: "hash2",
containerRestartCountLabel: "5",
},
},
},
}
}
// getFakeNewContainers returns a list of fake new containers.
func getFakeNewContainers() []*dockertools.FakeContainer {
return []*dockertools.FakeContainer{
{
ID: "56",
Name: "k8s_POD_podname_podnamespace_poduid_0",
Config: &dockercontainer.Config{
Labels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "POD",
containerTypeLabelKey: containerTypeLabelSandbox,
},
},
},
{
ID: "78",
Name: "k8s_newcontainer_podname_podnamespace_poduid_3",
Config: &dockercontainer.Config{
Labels: map[string]string{
types.KubernetesPodNameLabel: "podname",
types.KubernetesPodNamespaceLabel: "podnamespace",
types.KubernetesPodUIDLabel: "poduid",
types.KubernetesContainerNameLabel: "newcontainer",
annotationPrefix + containerHashLabel: "hash4",
annotationPrefix + containerRestartCountLabel: "3",
containerTypeLabelKey: containerTypeLabelContainer,
},
},
},
}
}
func TestListLegacyContainers(t *testing.T) {
ds, fDocker, _ := newTestDockerService()
newContainers := getFakeLegacyContainers()
legacyContainers := getFakeNewContainers()
fDocker.SetFakeContainers(append(newContainers, legacyContainers...))
// ListContainers should list only new containers when legacyCleanup is done.
containers, err := ds.ListContainers(nil)
assert.NoError(t, err)
require.Len(t, containers, 1)
assert.Equal(t, "78", containers[0].Id)
// ListLegacyContainers should list only legacy containers.
containers, err = ds.ListLegacyContainers(nil)
assert.NoError(t, err)
require.Len(t, containers, 1)
assert.Equal(t, "34", containers[0].Id)
// Mark legacyCleanup as not done.
ds.legacyCleanup.done = 0
// ListContainers should list all containers when legacyCleanup is not done.
containers, err = ds.ListContainers(nil)
assert.NoError(t, err)
require.Len(t, containers, 2)
assert.Contains(t, []string{containers[0].Id, containers[1].Id}, "34")
assert.Contains(t, []string{containers[0].Id, containers[1].Id}, "78")
}
func TestListLegacyPodSandbox(t *testing.T) {
ds, fDocker, _ := newTestDockerService()
newContainers := getFakeLegacyContainers()
legacyContainers := getFakeNewContainers()
fDocker.SetFakeContainers(append(newContainers, legacyContainers...))
// ListPodSandbox should list only new sandboxes when legacyCleanup is done.
sandboxes, err := ds.ListPodSandbox(nil)
assert.NoError(t, err)
require.Len(t, sandboxes, 1)
assert.Equal(t, "56", sandboxes[0].Id)
// ListLegacyPodSandbox should list only legacy sandboxes.
sandboxes, err = ds.ListLegacyPodSandbox(nil)
assert.NoError(t, err)
require.Len(t, sandboxes, 1)
assert.Equal(t, "12", sandboxes[0].Id)
// Mark legacyCleanup as not done.
ds.legacyCleanup.done = 0
// ListPodSandbox should list all sandboxes when legacyCleanup is not done.
sandboxes, err = ds.ListPodSandbox(nil)
assert.NoError(t, err)
require.Len(t, sandboxes, 2)
assert.Contains(t, []string{sandboxes[0].Id, sandboxes[1].Id}, "12")
assert.Contains(t, []string{sandboxes[0].Id, sandboxes[1].Id}, "56")
}
func TestCheckLegacyCleanup(t *testing.T) {
for desc, test := range map[string]struct {
containers []*dockertools.FakeContainer
done bool
}{
"no containers": {
containers: []*dockertools.FakeContainer{},
done: true,
},
"only new containers": {
containers: getFakeNewContainers(),
done: true,
},
"only legacy containers": {
containers: getFakeLegacyContainers(),
done: false,
},
"both legacy and new containers": {
containers: append(getFakeNewContainers(), getFakeLegacyContainers()...),
done: false,
},
} {
t.Logf("TestCase %q", desc)
ds, fDocker, _ := newTestDockerService()
fDocker.SetFakeContainers(test.containers)
ds.legacyCleanup.done = 0
clean, err := ds.checkLegacyCleanup()
assert.NoError(t, err)
assert.Equal(t, test.done, clean)
assert.Equal(t, test.done, ds.legacyCleanup.Done())
}
}

View File

@ -244,12 +244,23 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodS
}
network := &runtimeapi.PodSandboxNetworkStatus{Ip: IP}
netNS := getNetworkNamespace(r)
hostNetwork := sharesHostNetwork(r)
// If the sandbox has no containerTypeLabelKey label, treat it as a legacy sandbox.
if _, ok := r.Config.Labels[containerTypeLabelKey]; !ok {
names, labels, err := convertLegacyNameAndLabels([]string{r.Name}, r.Config.Labels)
if err != nil {
return nil, err
}
r.Name, r.Config.Labels = names[0], labels
// Forcibly trigger infra container restart.
hostNetwork = !hostNetwork
}
metadata, err := parseSandboxName(r.Name)
if err != nil {
return nil, err
}
hostNetwork := sharesHostNetwork(r)
labels, annotations := extractLabels(r.Config.Labels)
return &runtimeapi.PodSandboxStatus{
Id: r.ID,
@ -318,7 +329,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]
c := containers[i]
converted, err := containerToRuntimeAPISandbox(&c)
if err != nil {
glog.V(4).Infof("Unable to convert docker to runtime API sandbox: %v", err)
glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err)
continue
}
if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY {
@ -353,6 +364,16 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]
result = append(result, checkpointToRuntimeAPISandbox(id, checkpoint))
}
}
// Include legacy sandboxes if there are still legacy sandboxes not cleaned up yet.
if !ds.legacyCleanup.Done() {
legacySandboxes, err := ds.ListLegacyPodSandbox(filter)
if err != nil {
return nil, err
}
// Legacy sandboxes are always older, so we can safely append them to the end.
result = append(result, legacySandboxes...)
}
return result, nil
}

View File

@ -153,7 +153,6 @@ func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot str
glog.Infof("Setting cgroupDriver to %s", cgroupDriver)
}
ds.cgroupDriver = cgroupDriver
return ds, nil
}
@ -179,6 +178,8 @@ type dockerService struct {
// cgroup driver used by Docker runtime.
cgroupDriver string
checkpointHandler CheckpointHandler
// legacyCleanup indicates whether legacy cleanup has finished or not.
legacyCleanup legacyCleanupFlag
}
// Version returns the runtime name, runtime version and runtime API version
@ -241,6 +242,8 @@ type dockerNetworkHost struct {
// Start initializes and starts components in dockerService.
func (ds *dockerService) Start() error {
// Initialize the legacy cleanup flag.
ds.LegacyCleanupInit()
return ds.containerManager.Start()
}

View File

@ -41,7 +41,8 @@ func newTestNetworkPlugin(t *testing.T) *mock_network.MockNetworkPlugin {
func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clock.FakeClock) {
fakeClock := clock.NewFakeClock(time.Time{})
c := dockertools.NewFakeDockerClient().WithClock(fakeClock)
return &dockerService{client: c, os: &containertest.FakeOS{}, networkPlugin: &network.NoopNetworkPlugin{}, checkpointHandler: NewTestPersistentCheckpointHandler()}, c, fakeClock
return &dockerService{client: c, os: &containertest.FakeOS{}, networkPlugin: &network.NoopNetworkPlugin{},
legacyCleanup: legacyCleanupFlag{done: 1}, checkpointHandler: NewTestPersistentCheckpointHandler()}, c, fakeClock
}
// TestStatus tests the runtime status logic.

View File

@ -76,7 +76,6 @@ func makeContainerName(s *runtimeapi.PodSandboxConfig, c *runtimeapi.ContainerCo
s.Metadata.Uid, // 4 sandbox uid
fmt.Sprintf("%d", c.Metadata.Attempt), // 5
}, nameDelimiter)
}
// randomizeName randomizes the container name. This should only be used when we hit the

View File

@ -23,6 +23,7 @@ import (
"os"
"reflect"
"sort"
"strings"
"sync"
"time"
@ -232,6 +233,9 @@ func (f *FakeDockerClient) SetFakeContainers(containers []*FakeContainer) {
Names: []string{c.Name},
ID: c.ID,
}
if c.Config != nil {
container.Labels = c.Config.Labels
}
if c.Running {
f.RunningContainerList = append(f.RunningContainerList, container)
} else {
@ -349,7 +353,30 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio
// TODO(random-liu): Is a fully sorted array needed?
containerList = append(containerList, f.ExitedContainerList...)
}
return containerList, err
// TODO: Support other filters.
// Filter containers with label filter.
labelFilters := options.Filter.Get("label")
if len(labelFilters) == 0 {
return containerList, err
}
var filtered []dockertypes.Container
for _, container := range containerList {
match := true
for _, labelFilter := range labelFilters {
kv := strings.Split(labelFilter, "=")
if len(kv) != 2 {
return nil, fmt.Errorf("invalid label filter %q", labelFilter)
}
if container.Labels[kv[0]] != kv[1] {
match = false
break
}
}
if match {
filtered = append(filtered, container)
}
}
return filtered, err
}
// InspectContainer is a test-spy implementation of DockerInterface.InspectContainer.