mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Merge pull request #18125 from janetkuo/rollover-test
Auto commit by PR queue bot
This commit is contained in:
commit
671b5982cf
@ -22,6 +22,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||||
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -39,20 +40,43 @@ var _ = Describe("Deployment", func() {
|
|||||||
It("deployment should scale up and down in the right order", func() {
|
It("deployment should scale up and down in the right order", func() {
|
||||||
testRollingUpdateDeploymentEvents(f)
|
testRollingUpdateDeploymentEvents(f)
|
||||||
})
|
})
|
||||||
|
It("deployment should support rollover", func() {
|
||||||
|
testRolloverDeployment(f)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func testNewDeployment(f *Framework) {
|
func newRC(rcName string, replicas int, rcPodLabels map[string]string, imageName string, image string) *api.ReplicationController {
|
||||||
ns := f.Namespace.Name
|
return &api.ReplicationController{
|
||||||
c := f.Client
|
ObjectMeta: api.ObjectMeta{
|
||||||
deploymentName := "nginx-deployment"
|
Name: rcName,
|
||||||
podLabels := map[string]string{"name": "nginx"}
|
},
|
||||||
Logf("Creating simple deployment %s", deploymentName)
|
Spec: api.ReplicationControllerSpec{
|
||||||
_, err := c.Deployments(ns).Create(&extensions.Deployment{
|
Replicas: replicas,
|
||||||
|
Selector: rcPodLabels,
|
||||||
|
Template: &api.PodTemplateSpec{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Labels: rcPodLabels,
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: imageName,
|
||||||
|
Image: image,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeployment(deploymentName string, replicas int, podLabels map[string]string, imageName string, image string) *extensions.Deployment {
|
||||||
|
return &extensions.Deployment{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: deploymentName,
|
Name: deploymentName,
|
||||||
},
|
},
|
||||||
Spec: extensions.DeploymentSpec{
|
Spec: extensions.DeploymentSpec{
|
||||||
Replicas: 1,
|
Replicas: replicas,
|
||||||
Selector: podLabels,
|
Selector: podLabels,
|
||||||
UniqueLabelKey: extensions.DefaultDeploymentUniqueLabelKey,
|
UniqueLabelKey: extensions.DefaultDeploymentUniqueLabelKey,
|
||||||
Template: api.PodTemplateSpec{
|
Template: api.PodTemplateSpec{
|
||||||
@ -62,14 +86,24 @@ func testNewDeployment(f *Framework) {
|
|||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
Name: "nginx",
|
Name: imageName,
|
||||||
Image: "nginx",
|
Image: image,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testNewDeployment(f *Framework) {
|
||||||
|
ns := f.Namespace.Name
|
||||||
|
c := f.Client
|
||||||
|
deploymentName := "nginx-deployment"
|
||||||
|
podLabels := map[string]string{"name": "nginx"}
|
||||||
|
replicas := 1
|
||||||
|
Logf("Creating simple deployment %s", deploymentName)
|
||||||
|
_, err := c.Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx"))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
deployment, err := c.Deployments(ns).Get(deploymentName)
|
deployment, err := c.Deployments(ns).Get(deploymentName)
|
||||||
@ -86,7 +120,7 @@ func testNewDeployment(f *Framework) {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Verify that the required pods have come up.
|
// Verify that the required pods have come up.
|
||||||
err = verifyPods(c, ns, "nginx", false, 1)
|
err = verifyPods(c, ns, "nginx", false, replicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("error in waiting for pods to come up: %s", err)
|
Logf("error in waiting for pods to come up: %s", err)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -94,8 +128,8 @@ func testNewDeployment(f *Framework) {
|
|||||||
// DeploymentStatus should be appropriately updated.
|
// DeploymentStatus should be appropriately updated.
|
||||||
deployment, err = c.Deployments(ns).Get(deploymentName)
|
deployment, err = c.Deployments(ns).Get(deploymentName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(deployment.Status.Replicas).Should(Equal(1))
|
Expect(deployment.Status.Replicas).Should(Equal(replicas))
|
||||||
Expect(deployment.Status.UpdatedReplicas).Should(Equal(1))
|
Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRollingUpdateDeployment(f *Framework) {
|
func testRollingUpdateDeployment(f *Framework) {
|
||||||
@ -109,28 +143,8 @@ func testRollingUpdateDeployment(f *Framework) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rcName := "nginx-controller"
|
rcName := "nginx-controller"
|
||||||
_, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
|
replicas := 3
|
||||||
ObjectMeta: api.ObjectMeta{
|
_, err := c.ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
|
||||||
Name: rcName,
|
|
||||||
},
|
|
||||||
Spec: api.ReplicationControllerSpec{
|
|
||||||
Replicas: 3,
|
|
||||||
Selector: rcPodLabels,
|
|
||||||
Template: &api.PodTemplateSpec{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Labels: rcPodLabels,
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{
|
|
||||||
{
|
|
||||||
Name: "nginx",
|
|
||||||
Image: "nginx",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
Logf("deleting replication controller %s", rcName)
|
Logf("deleting replication controller %s", rcName)
|
||||||
@ -146,30 +160,7 @@ func testRollingUpdateDeployment(f *Framework) {
|
|||||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||||
deploymentName := "redis-deployment"
|
deploymentName := "redis-deployment"
|
||||||
Logf("Creating deployment %s", deploymentName)
|
Logf("Creating deployment %s", deploymentName)
|
||||||
newDeployment := extensions.Deployment{
|
_, err = c.Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis"))
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: deploymentName,
|
|
||||||
},
|
|
||||||
Spec: extensions.DeploymentSpec{
|
|
||||||
Replicas: 3,
|
|
||||||
Selector: deploymentPodLabels,
|
|
||||||
UniqueLabelKey: extensions.DefaultDeploymentUniqueLabelKey,
|
|
||||||
Template: api.PodTemplateSpec{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Labels: deploymentPodLabels,
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{
|
|
||||||
{
|
|
||||||
Name: "redis",
|
|
||||||
Image: "redis",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err = c.Deployments(ns).Create(&newDeployment)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
deployment, err := c.Deployments(ns).Get(deploymentName)
|
deployment, err := c.Deployments(ns).Get(deploymentName)
|
||||||
@ -182,7 +173,7 @@ func testRollingUpdateDeployment(f *Framework) {
|
|||||||
Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
|
Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = waitForDeploymentStatus(c, ns, deploymentName, 3, 2, 4, 0)
|
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,28 +187,8 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
|||||||
"pod": "nginx",
|
"pod": "nginx",
|
||||||
}
|
}
|
||||||
rcName := "nginx-controller"
|
rcName := "nginx-controller"
|
||||||
_, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
|
replicas := 1
|
||||||
ObjectMeta: api.ObjectMeta{
|
_, err := c.ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
|
||||||
Name: rcName,
|
|
||||||
},
|
|
||||||
Spec: api.ReplicationControllerSpec{
|
|
||||||
Replicas: 1,
|
|
||||||
Selector: rcPodLabels,
|
|
||||||
Template: &api.PodTemplateSpec{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Labels: rcPodLabels,
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{
|
|
||||||
{
|
|
||||||
Name: "nginx",
|
|
||||||
Image: "nginx",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
Logf("deleting replication controller %s", rcName)
|
Logf("deleting replication controller %s", rcName)
|
||||||
@ -233,30 +204,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
|||||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||||
deploymentName := "redis-deployment-2"
|
deploymentName := "redis-deployment-2"
|
||||||
Logf("Creating deployment %s", deploymentName)
|
Logf("Creating deployment %s", deploymentName)
|
||||||
newDeployment := extensions.Deployment{
|
_, err = c.Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis"))
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: deploymentName,
|
|
||||||
},
|
|
||||||
Spec: extensions.DeploymentSpec{
|
|
||||||
Replicas: 1,
|
|
||||||
Selector: deploymentPodLabels,
|
|
||||||
UniqueLabelKey: extensions.DefaultDeploymentUniqueLabelKey,
|
|
||||||
Template: api.PodTemplateSpec{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Labels: deploymentPodLabels,
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{
|
|
||||||
{
|
|
||||||
Name: "redis",
|
|
||||||
Image: "redis",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err = c.Deployments(ns).Create(&newDeployment)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
deployment, err := c.Deployments(ns).Get(deploymentName)
|
deployment, err := c.Deployments(ns).Get(deploymentName)
|
||||||
@ -269,7 +217,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
|||||||
Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
|
Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = waitForDeploymentStatus(c, ns, deploymentName, 1, 0, 2, 0)
|
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||||
deployment, err := c.Deployments(ns).Get(deploymentName)
|
deployment, err := c.Deployments(ns).Get(deploymentName)
|
||||||
@ -288,3 +236,89 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
|||||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 1", newRC.Name)))
|
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 1", newRC.Name)))
|
||||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
|
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// testRolloverDeployment tests that deployment supports rollover.
|
||||||
|
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
|
||||||
|
func testRolloverDeployment(f *Framework) {
|
||||||
|
ns := f.Namespace.Name
|
||||||
|
c := f.Client
|
||||||
|
podName := "rollover-pod"
|
||||||
|
deploymentPodLabels := map[string]string{"name": podName}
|
||||||
|
rcPodLabels := map[string]string{
|
||||||
|
"name": podName,
|
||||||
|
"pod": "nginx",
|
||||||
|
}
|
||||||
|
|
||||||
|
rcName := "nginx-controller"
|
||||||
|
rcReplicas := 4
|
||||||
|
_, err := c.ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx"))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
defer func() {
|
||||||
|
Logf("deleting replication controller %s", rcName)
|
||||||
|
Expect(c.ReplicationControllers(ns).Delete(rcName)).NotTo(HaveOccurred())
|
||||||
|
}()
|
||||||
|
// Verify that the required pods have come up.
|
||||||
|
err = verifyPods(c, ns, podName, false, rcReplicas)
|
||||||
|
if err != nil {
|
||||||
|
Logf("error in waiting for pods to come up: %s", err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||||
|
deploymentName, deploymentImageName := "redis-deployment", "redis-slave"
|
||||||
|
deploymentReplicas := 4
|
||||||
|
deploymentImage := "gcr.io/google_samples/gb-redisslave:v1"
|
||||||
|
deploymentMinReadySeconds := 5
|
||||||
|
Logf("Creating deployment %s", deploymentName)
|
||||||
|
newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage)
|
||||||
|
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||||
|
MaxUnavailable: intstr.FromInt(1),
|
||||||
|
MaxSurge: intstr.FromInt(1),
|
||||||
|
MinReadySeconds: deploymentMinReadySeconds,
|
||||||
|
}
|
||||||
|
_, err = c.Deployments(ns).Create(newDeployment)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
defer func() {
|
||||||
|
deployment, err := c.Deployments(ns).Get(deploymentName)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Logf("deleting deployment %s", deploymentName)
|
||||||
|
Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||||
|
// TODO: remove this once we can delete rcs with deployment
|
||||||
|
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
|
||||||
|
}()
|
||||||
|
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||||
|
deployment, err := c.Deployments(ns).Get(deploymentName)
|
||||||
|
// Make sure the deployment starts to scale up and down RCs
|
||||||
|
waitForPartialEvents(c, ns, deployment, 2)
|
||||||
|
events, err := c.Events(ns).Search(deployment)
|
||||||
|
if err != nil {
|
||||||
|
Logf("error in listing events: %s", err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(newRC).NotTo(Equal(nil))
|
||||||
|
|
||||||
|
// Before the deployment finishes, update the deployment to rollover the above 2 rcs and bring up redis pods.
|
||||||
|
// If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it.
|
||||||
|
Expect(len(events.Items)).Should(BeNumerically("<", deploymentReplicas))
|
||||||
|
updatedDeploymentImage := "redis"
|
||||||
|
newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage
|
||||||
|
newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||||
|
Logf("updating deployment %s", deploymentName)
|
||||||
|
_, err = c.Deployments(ns).Update(newDeployment)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// Make sure updated deployment contains "redis" image
|
||||||
|
deployment, err = c.Deployments(ns).Get(deploymentName)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(updatedDeploymentImage))
|
||||||
|
// Make sure new RC contains "redis" image
|
||||||
|
newRC, err = deploymentutil.GetNewRC(*deployment, c)
|
||||||
|
Expect(newRC.Spec.Template.Spec.Containers[0].Image).Should(Equal(updatedDeploymentImage))
|
||||||
|
}
|
||||||
|
@ -1952,7 +1952,7 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
|
|||||||
return false, fmt.Errorf("old RCs are not fully scaled down")
|
return false, fmt.Errorf("old RCs are not fully scaled down")
|
||||||
}
|
}
|
||||||
if deploymentutil.GetReplicaCountForRCs([]*api.ReplicationController{newRC}) != desiredUpdatedReplicas {
|
if deploymentutil.GetReplicaCountForRCs([]*api.ReplicationController{newRC}) != desiredUpdatedReplicas {
|
||||||
return false, fmt.Errorf("new RCs is not fully scaled up")
|
return false, fmt.Errorf("new RC is not fully scaled up")
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@ -1979,6 +1979,21 @@ func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desired
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Waits for the number of events on the given object to be at least a desired count.
|
||||||
|
func waitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
|
||||||
|
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
||||||
|
events, err := c.Events(ns).Search(objOrRef)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error in listing events: %s", err)
|
||||||
|
}
|
||||||
|
eventsCount := len(events.Items)
|
||||||
|
if eventsCount >= atLeastEventsCount {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// FailedContainers inspects all containers in a pod and returns failure
|
// FailedContainers inspects all containers in a pod and returns failure
|
||||||
// information for containers that have failed or been restarted.
|
// information for containers that have failed or been restarted.
|
||||||
// A map is returned where the key is the containerID and the value is a
|
// A map is returned where the key is the containerID and the value is a
|
||||||
|
Loading…
Reference in New Issue
Block a user