mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-20 00:48:04 +00:00
virtcontainers: reimplement sandbox cgroup
All containers run in different cgroups even the sandbox, with this new implementation the sandbox cpu cgroup wil be equal to the sum of all its containers and the hypervisor process will be placed there impacting to the containers running in the sandbox (VM). The default number of vcpus is used when the sandbox has no constraints. For example, if default_vcpus is 2, then quota will be 200000 and period 100000. **c-ray test** http://www.futuretech.blinkenlights.nl/c-ray.html ``` +=============================================+ | | 6 threads 6cpus | 1 thread 1 cpu | +=============================================+ | current | 40 seconds | 122 seconds | +============================================== | new | 37 seconds | 124 seconds | +============================================== ``` current = current cgroups implementation new = new cgroups implementation **workload** ```yaml apiVersion: v1 kind: Pod metadata: name: c-ray annotations: io.kubernetes.cri.untrusted-workload: "true" spec: restartPolicy: Never containers: - name: c-ray-1 image: docker.io/devimc/c-ray:latest imagePullPolicy: IfNotPresent args: ["-t", "6", "-s", "1600x1200", "-r", "8", "-i", "/c-ray-1.1/sphfract", "-o", "/tmp/output.ppm"] resources: limits: cpu: 6 - name: c-ray-2 image: docker.io/devimc/c-ray:latest imagePullPolicy: IfNotPresent args: ["-t", "1", "-s", "1600x1200", "-r", "8", "-i", "/c-ray-1.1/sphfract", "-o", "/tmp/output.ppm"] resources: limits: cpu: 1 ``` fixes #1153 Signed-off-by: Julio Montes <julio.montes@intel.com>
This commit is contained in:
parent
9758cdba7c
commit
5201860bb0
@ -121,11 +121,6 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup host cgroups
|
|
||||||
if err = s.setupCgroups(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/containernetworking/plugins/pkg/ns"
|
"github.com/containernetworking/plugins/pkg/ns"
|
||||||
|
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
|
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
|
||||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||||
@ -31,13 +32,15 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var sandboxAnnotations = map[string]string{
|
var sandboxAnnotations = map[string]string{
|
||||||
"sandbox.foo": "sandbox.bar",
|
"sandbox.foo": "sandbox.bar",
|
||||||
"sandbox.hello": "sandbox.world",
|
"sandbox.hello": "sandbox.world",
|
||||||
|
annotations.ConfigJSONKey: `{"linux":{"resources":{}}}`,
|
||||||
}
|
}
|
||||||
|
|
||||||
var containerAnnotations = map[string]string{
|
var containerAnnotations = map[string]string{
|
||||||
"container.foo": "container.bar",
|
"container.foo": "container.bar",
|
||||||
"container.hello": "container.world",
|
"container.hello": "container.world",
|
||||||
|
annotations.ConfigJSONKey: `{"linux":{"resources":{}}}`,
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBasicTestCmd() types.Cmd {
|
func newBasicTestCmd() types.Cmd {
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
// Copyright (c) 2018 Huawei Corporation
|
// Copyright (c) 2018 Huawei Corporation
|
||||||
|
// Copyright (c) 2019 Intel Corporation
|
||||||
//
|
//
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
@ -6,95 +7,218 @@
|
|||||||
package virtcontainers
|
package virtcontainers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/cgroups"
|
"github.com/containerd/cgroups"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
type cgroupPather interface {
|
||||||
vcpuGroupName = "vcpu"
|
cgroups.Subsystem
|
||||||
defaultCgroupParent = "/kata"
|
Path(path string) string
|
||||||
)
|
|
||||||
|
|
||||||
type sandboxCgroups struct {
|
|
||||||
commonParent cgroups.Cgroup
|
|
||||||
sandboxSub cgroups.Cgroup
|
|
||||||
vcpuSub cgroups.Cgroup
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sandbox) newCgroups() error {
|
// unconstrained cgroups are placed here.
|
||||||
// New will still succeed when cgroup exists
|
// for example /sys/fs/cgroup/memory/kata/$CGPATH
|
||||||
// create common parent for all kata-containers
|
// where path is defined by the containers manager
|
||||||
// e.g. /sys/fs/cgroup/cpu/vc
|
const cgroupKataPath = "/kata/"
|
||||||
parent, err := cgroups.New(cgroups.V1,
|
|
||||||
cgroups.StaticPath(defaultCgroupParent), &specs.LinuxResources{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create cgroup for %q", defaultCgroupParent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create sub-cgroup for each sandbox
|
var cgroupsLoadFunc = cgroups.Load
|
||||||
// e.g. /sys/fs/cgroup/cpu/vc/<sandbox>
|
var cgroupsNewFunc = cgroups.New
|
||||||
sandboxSub, err := parent.New(s.id, &specs.LinuxResources{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create cgroup for %s/%s", defaultCgroupParent, s.id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create sub-cgroup for vcpu threads
|
// V1Constraints returns the cgroups that are compatible with th VC architecture
|
||||||
vcpuSub, err := sandboxSub.New(vcpuGroupName, &specs.LinuxResources{})
|
// and hypervisor, constraints can be applied to these cgroups.
|
||||||
|
func V1Constraints() ([]cgroups.Subsystem, error) {
|
||||||
|
root, err := cgroupV1MountPoint()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create cgroup for %s/%s/%s", defaultCgroupParent, s.id, vcpuGroupName)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
subsystems := []cgroups.Subsystem{
|
||||||
s.cgroup = &sandboxCgroups{
|
cgroups.NewCputset(root),
|
||||||
commonParent: parent,
|
cgroups.NewCpu(root),
|
||||||
sandboxSub: sandboxSub,
|
cgroups.NewCpuacct(root),
|
||||||
vcpuSub: vcpuSub,
|
|
||||||
}
|
}
|
||||||
return nil
|
return cgroupsSubsystems(subsystems)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sandbox) destroyCgroups() error {
|
// V1NoConstraints returns the cgroups that are *not* compatible with th VC
|
||||||
if s.cgroup == nil {
|
// architecture and hypervisor, constraints MUST NOT be applied to these cgroups.
|
||||||
s.Logger().Warningf("cgroup is not initialized, no need to destroy")
|
func V1NoConstraints() ([]cgroups.Subsystem, error) {
|
||||||
|
root, err := cgroupV1MountPoint()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
subsystems := []cgroups.Subsystem{
|
||||||
|
// Some constainers managers, like k8s, take the control of cgroups.
|
||||||
|
// k8s: the memory cgroup for the dns containers is small to place
|
||||||
|
// a hypervisor there.
|
||||||
|
cgroups.NewMemory(root),
|
||||||
|
}
|
||||||
|
return cgroupsSubsystems(subsystems)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cgroupsSubsystems(subsystems []cgroups.Subsystem) ([]cgroups.Subsystem, error) {
|
||||||
|
var enabled []cgroups.Subsystem
|
||||||
|
for _, s := range cgroupPathers(subsystems) {
|
||||||
|
// check and remove the default groups that do not exist
|
||||||
|
if _, err := os.Lstat(s.Path("/")); err == nil {
|
||||||
|
enabled = append(enabled, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return enabled, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cgroupPathers(subystems []cgroups.Subsystem) []cgroupPather {
|
||||||
|
var out []cgroupPather
|
||||||
|
for _, s := range subystems {
|
||||||
|
if p, ok := s.(cgroupPather); ok {
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// v1MountPoint returns the mount point where the cgroup
|
||||||
|
// mountpoints are mounted in a single hiearchy
|
||||||
|
func cgroupV1MountPoint() (string, error) {
|
||||||
|
f, err := os.Open("/proc/self/mountinfo")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
scanner := bufio.NewScanner(f)
|
||||||
|
for scanner.Scan() {
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
text = scanner.Text()
|
||||||
|
fields = strings.Split(text, " ")
|
||||||
|
// safe as mountinfo encodes mountpoints with spaces as \040.
|
||||||
|
index = strings.Index(text, " - ")
|
||||||
|
postSeparatorFields = strings.Fields(text[index+3:])
|
||||||
|
numPostFields = len(postSeparatorFields)
|
||||||
|
)
|
||||||
|
// this is an error as we can't detect if the mount is for "cgroup"
|
||||||
|
if numPostFields == 0 {
|
||||||
|
return "", fmt.Errorf("Found no fields post '-' in %q", text)
|
||||||
|
}
|
||||||
|
if postSeparatorFields[0] == "cgroup" {
|
||||||
|
// check that the mount is properly formated.
|
||||||
|
if numPostFields < 3 {
|
||||||
|
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
|
||||||
|
}
|
||||||
|
return filepath.Dir(fields[4]), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", cgroups.ErrMountPointNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
func cgroupNoConstraintsPath(path string) string {
|
||||||
|
return filepath.Join(cgroupKataPath, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// return the parent cgroup for the given path
|
||||||
|
func parentCgroup(path string) (cgroups.Cgroup, error) {
|
||||||
|
// append '/' just in case CgroupsPath doesn't start with it
|
||||||
|
parent := filepath.Dir("/" + path)
|
||||||
|
|
||||||
|
parentCgroup, err := cgroupsLoadFunc(cgroups.V1,
|
||||||
|
cgroups.StaticPath(parent))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not load parent cgroup %v: %v", parent, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parentCgroup, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sandbox) updateCgroups() error {
|
||||||
|
if s.state.CgroupPath == "" {
|
||||||
|
s.Logger().Warn("sandbox's cgroup won't be updated: cgroup path is empty")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// first move all processes in subgroup to parent in case live process blocks
|
cgroup, err := cgroupsLoadFunc(V1Constraints, cgroups.StaticPath(s.state.CgroupPath))
|
||||||
// deletion of cgroup
|
if err != nil {
|
||||||
if err := s.cgroup.sandboxSub.MoveTo(s.cgroup.commonParent); err != nil {
|
return fmt.Errorf("Could not load cgroup %v: %v", s.state.CgroupPath, err)
|
||||||
return fmt.Errorf("failed to clear cgroup processes")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.cgroup.sandboxSub.Delete()
|
if err := s.constrainHypervisor(cgroup); err != nil {
|
||||||
}
|
return err
|
||||||
|
|
||||||
func (s *Sandbox) setupCgroups() error {
|
|
||||||
if s.cgroup == nil {
|
|
||||||
return fmt.Errorf("failed to setup uninitialized cgroup for sandbox")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource, err := s.mergeSpecResource()
|
if len(s.containers) <= 1 {
|
||||||
|
// nothing to update
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resources, err := s.resources()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.applyCPUCgroup(resource); err != nil {
|
if err := cgroup.Update(&resources); err != nil {
|
||||||
return err
|
return fmt.Errorf("Could not update cgroup %v: %v", s.state.CgroupPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sandbox) applyCPUCgroup(rc *specs.LinuxResources) error {
|
func (s *Sandbox) deleteCgroups() error {
|
||||||
if s.cgroup == nil {
|
s.Logger().Debug("Deleting sandbox cgroup")
|
||||||
return fmt.Errorf("failed to setup uninitialized cgroup for sandbox")
|
|
||||||
|
path := cgroupNoConstraintsPath(s.state.CgroupPath)
|
||||||
|
s.Logger().WithField("path", path).Debug("Deleting no constraints cgroup")
|
||||||
|
noConstraintsCgroup, err := cgroupsLoadFunc(V1NoConstraints, cgroups.StaticPath(path))
|
||||||
|
if err == cgroups.ErrCgroupDeleted {
|
||||||
|
// cgroup already deleted
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply cpu constraint to vcpu cgroup
|
if err != nil {
|
||||||
if err := s.cgroup.vcpuSub.Update(rc); err != nil {
|
return fmt.Errorf("Could not load cgroup without constraints %v: %v", path, err)
|
||||||
return err
|
}
|
||||||
|
|
||||||
|
// move running process here, that way cgroup can be removed
|
||||||
|
parent, err := parentCgroup(path)
|
||||||
|
if err != nil {
|
||||||
|
// parent cgroup doesn't exist, that means there are no process running
|
||||||
|
// and the no constraints cgroup was removed.
|
||||||
|
s.Logger().WithError(err).Warn("Parent cgroup doesn't exist")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := noConstraintsCgroup.MoveTo(parent); err != nil {
|
||||||
|
// Don't fail, cgroup can be deleted
|
||||||
|
s.Logger().WithError(err).Warn("Could not move process from no constraints to parent cgroup")
|
||||||
|
}
|
||||||
|
|
||||||
|
return noConstraintsCgroup.Delete()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sandbox) constrainHypervisor(cgroup cgroups.Cgroup) error {
|
||||||
|
pid := s.hypervisor.pid()
|
||||||
|
if pid <= 0 {
|
||||||
|
return fmt.Errorf("Invalid hypervisor PID: %d", pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move hypervisor into cgroups without constraints,
|
||||||
|
// those cgroups are not yet supported.
|
||||||
|
resources := &specs.LinuxResources{}
|
||||||
|
path := cgroupNoConstraintsPath(s.state.CgroupPath)
|
||||||
|
noConstraintsCgroup, err := cgroupsNewFunc(V1NoConstraints, cgroups.StaticPath(path), resources)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not create cgroup %v: %v", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := noConstraintsCgroup.Add(cgroups.Process{Pid: pid}); err != nil {
|
||||||
|
return fmt.Errorf("Could not add hypervisor PID %d to cgroup %v: %v", pid, path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// when new container joins, new CPU could be hotplugged, so we
|
// when new container joins, new CPU could be hotplugged, so we
|
||||||
@ -103,33 +227,21 @@ func (s *Sandbox) applyCPUCgroup(rc *specs.LinuxResources) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
|
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
|
||||||
}
|
}
|
||||||
if tids == nil {
|
if tids == nil || len(tids.vcpus) == 0 {
|
||||||
// If there's no tid returned from the hypervisor, this is not
|
// If there's no tid returned from the hypervisor, this is not
|
||||||
// a bug. It simply means there is nothing to constrain, hence
|
// a bug. It simply means there is nothing to constrain, hence
|
||||||
// let's return without any error from here.
|
// let's return without any error from here.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// use Add() to add vcpu thread to s.cgroup, it will write thread id to
|
// We are about to move just the vcpus (threads) into cgroups with constraints.
|
||||||
// `cgroup.procs` which will move all threads in qemu process to this cgroup
|
// Move whole hypervisor process whould be easier but the IO/network performance
|
||||||
// immediately as default behaviour.
|
// whould be impacted.
|
||||||
if len(tids.vcpus) > 0 {
|
|
||||||
if err := s.cgroup.sandboxSub.Add(cgroups.Process{
|
|
||||||
Pid: tids.vcpus[0],
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, i := range tids.vcpus {
|
for _, i := range tids.vcpus {
|
||||||
if i <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// In contrast, AddTask will write thread id to `tasks`
|
// In contrast, AddTask will write thread id to `tasks`
|
||||||
// After this, vcpu threads are in "vcpu" sub-cgroup, other threads in
|
// After this, vcpu threads are in "vcpu" sub-cgroup, other threads in
|
||||||
// qemu will be left in parent cgroup untouched.
|
// qemu will be left in parent cgroup untouched.
|
||||||
if err := s.cgroup.vcpuSub.AddTask(cgroups.Process{
|
if err := cgroup.AddTask(cgroups.Process{
|
||||||
Pid: i,
|
Pid: i,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -139,59 +251,107 @@ func (s *Sandbox) applyCPUCgroup(rc *specs.LinuxResources) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sandbox) mergeSpecResource() (*specs.LinuxResources, error) {
|
func (s *Sandbox) resources() (specs.LinuxResources, error) {
|
||||||
if s.config == nil {
|
resources := specs.LinuxResources{
|
||||||
return nil, fmt.Errorf("sandbox config is nil")
|
CPU: s.cpuResources(),
|
||||||
}
|
}
|
||||||
|
|
||||||
resource := &specs.LinuxResources{
|
return resources, nil
|
||||||
CPU: &specs.LinuxCPU{},
|
}
|
||||||
|
|
||||||
|
func (s *Sandbox) cpuResources() *specs.LinuxCPU {
|
||||||
|
quota := int64(0)
|
||||||
|
period := uint64(0)
|
||||||
|
shares := uint64(0)
|
||||||
|
realtimePeriod := uint64(0)
|
||||||
|
realtimeRuntime := int64(0)
|
||||||
|
|
||||||
|
cpu := &specs.LinuxCPU{
|
||||||
|
Quota: "a,
|
||||||
|
Period: &period,
|
||||||
|
Shares: &shares,
|
||||||
|
RealtimePeriod: &realtimePeriod,
|
||||||
|
RealtimeRuntime: &realtimeRuntime,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range s.config.Containers {
|
for _, c := range s.containers {
|
||||||
config, ok := c.Annotations[annotations.ConfigJSONKey]
|
ann := c.GetAnnotations()
|
||||||
if !ok {
|
if ann[annotations.ContainerTypeKey] == string(PodSandbox) {
|
||||||
s.Logger().WithField("container", c.ID).Warningf("failed to find config from container annotations")
|
// skip sandbox container
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var spec specs.Spec
|
if c.state.Resources.CPU == nil {
|
||||||
if err := json.Unmarshal([]byte(config), &spec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: how to handle empty/unlimited resource?
|
|
||||||
// maybe we should add a default CPU/Memory delta when no
|
|
||||||
// resource limit is given. -- @WeiZhang555
|
|
||||||
if spec.Linux == nil || spec.Linux.Resources == nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// calculate cpu quota and period
|
|
||||||
s.mergeCPUResource(resource, spec.Linux.Resources)
|
|
||||||
}
|
|
||||||
return resource, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sandbox) mergeCPUResource(orig, rc *specs.LinuxResources) {
|
if c.state.Resources.CPU.Shares != nil {
|
||||||
if orig.CPU == nil {
|
shares = uint64(math.Max(float64(*c.state.Resources.CPU.Shares), float64(shares)))
|
||||||
orig.CPU = &specs.LinuxCPU{}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if rc.CPU != nil && rc.CPU.Quota != nil && rc.CPU.Period != nil &&
|
if c.state.Resources.CPU.Quota != nil {
|
||||||
*rc.CPU.Quota > 0 && *rc.CPU.Period > 0 {
|
quota += *c.state.Resources.CPU.Quota
|
||||||
if orig.CPU.Period == nil {
|
}
|
||||||
orig.CPU.Period = rc.CPU.Period
|
|
||||||
orig.CPU.Quota = rc.CPU.Quota
|
if c.state.Resources.CPU.Period != nil {
|
||||||
} else {
|
period = uint64(math.Max(float64(*c.state.Resources.CPU.Period), float64(period)))
|
||||||
// this is an example to show how it works:
|
}
|
||||||
// container A and `orig` has quota: 5000 and period 10000
|
|
||||||
// here comes container B with quota 40 and period 100,
|
if c.state.Resources.CPU.Cpus != "" {
|
||||||
// so use previous period 10000 as a baseline, container B
|
cpu.Cpus += c.state.Resources.CPU.Cpus + ","
|
||||||
// has proportional resource of quota 4000 and period 10000, calculated as
|
}
|
||||||
// delta := 40 / 100 * 10000 = 4000
|
|
||||||
// and final `*orig.CPU.Quota` = 5000 + 4000 = 9000
|
if c.state.Resources.CPU.RealtimeRuntime != nil {
|
||||||
delta := float64(*rc.CPU.Quota) / float64(*rc.CPU.Period) * float64(*orig.CPU.Period)
|
realtimeRuntime += *c.state.Resources.CPU.RealtimeRuntime
|
||||||
*orig.CPU.Quota += int64(delta)
|
}
|
||||||
|
|
||||||
|
if c.state.Resources.CPU.RealtimePeriod != nil {
|
||||||
|
realtimePeriod += *c.state.Resources.CPU.RealtimePeriod
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.state.Resources.CPU.Mems != "" {
|
||||||
|
cpu.Mems += c.state.Resources.CPU.Mems + ","
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpu.Cpus = strings.Trim(cpu.Cpus, " \n\t,")
|
||||||
|
|
||||||
|
// use a default constraint for sandboxes without cpu constraints
|
||||||
|
if period == uint64(0) && quota == int64(0) {
|
||||||
|
// set a quota and period equal to the default number of vcpus
|
||||||
|
quota = int64(s.config.HypervisorConfig.NumVCPUs) * 100000
|
||||||
|
period = 100000
|
||||||
|
}
|
||||||
|
|
||||||
|
return validCPUResources(cpu)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validCPUResources checks CPU resources coherency
|
||||||
|
func validCPUResources(cpuSpec *specs.LinuxCPU) *specs.LinuxCPU {
|
||||||
|
if cpuSpec == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu := *cpuSpec
|
||||||
|
if cpu.Period != nil && *cpu.Period < 1 {
|
||||||
|
cpu.Period = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpu.Quota != nil && *cpu.Quota < 1 {
|
||||||
|
cpu.Quota = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpu.Shares != nil && *cpu.Shares < 1 {
|
||||||
|
cpu.Shares = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpu.RealtimePeriod != nil && *cpu.RealtimePeriod < 1 {
|
||||||
|
cpu.RealtimePeriod = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpu.RealtimeRuntime != nil && *cpu.RealtimeRuntime < 1 {
|
||||||
|
cpu.RealtimeRuntime = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cpu
|
||||||
}
|
}
|
||||||
|
@ -6,206 +6,188 @@
|
|||||||
package virtcontainers
|
package virtcontainers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containerd/cgroups"
|
||||||
|
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func getCgroupDestination(subsystem string) (string, error) {
|
type mockCgroup struct {
|
||||||
f, err := os.Open("/proc/self/mountinfo")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
s := bufio.NewScanner(f)
|
|
||||||
for s.Scan() {
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
fields := strings.Fields(s.Text())
|
|
||||||
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
|
|
||||||
if opt == subsystem {
|
|
||||||
return fields[4], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("failed to find cgroup mountpoint for %q", subsystem)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMergeSpecResource(t *testing.T) {
|
func (m *mockCgroup) New(string, *specs.LinuxResources) (cgroups.Cgroup, error) {
|
||||||
s := &Sandbox{
|
return &mockCgroup{}, nil
|
||||||
config: &SandboxConfig{
|
}
|
||||||
Containers: []ContainerConfig{
|
func (m *mockCgroup) Add(cgroups.Process) error {
|
||||||
{
|
return nil
|
||||||
ID: "containerA",
|
|
||||||
Annotations: make(map[string]string),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "containerA",
|
|
||||||
Annotations: make(map[string]string),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
contA := s.config.Containers[0]
|
|
||||||
contB := s.config.Containers[1]
|
|
||||||
|
|
||||||
getIntP := func(x int64) *int64 { return &x }
|
|
||||||
getUintP := func(x uint64) *uint64 { return &x }
|
|
||||||
|
|
||||||
type testData struct {
|
|
||||||
first *specs.LinuxResources
|
|
||||||
second *specs.LinuxResources
|
|
||||||
expected *specs.LinuxResources
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, testdata := range []testData{
|
|
||||||
{
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
nil,
|
|
||||||
&specs.LinuxResources{},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(0), Period: getUintP(100000)}},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(20000), Period: getUintP(100000)}},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(20000), Period: getUintP(100000)}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(10000), Period: getUintP(0)}},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(20000), Period: getUintP(100000)}},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(20000), Period: getUintP(100000)}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(1000), Period: getUintP(2000)}},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(20000), Period: getUintP(100000)}},
|
|
||||||
&specs.LinuxResources{CPU: &specs.LinuxCPU{Quota: getIntP(1400), Period: getUintP(2000)}},
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
data, err := json.Marshal(&specs.Spec{
|
|
||||||
Linux: &specs.Linux{
|
|
||||||
Resources: testdata.first,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
contA.Annotations[annotations.ConfigJSONKey] = string(data)
|
|
||||||
|
|
||||||
data, err = json.Marshal(&specs.Spec{
|
|
||||||
Linux: &specs.Linux{
|
|
||||||
Resources: testdata.second,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
contB.Annotations[annotations.ConfigJSONKey] = string(data)
|
|
||||||
|
|
||||||
rc, err := s.mergeSpecResource()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.True(t, reflect.DeepEqual(rc, testdata.expected), "should be equal, got: %#v, expected: %#v", rc, testdata.expected)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetupCgroups(t *testing.T) {
|
func (m *mockCgroup) AddTask(cgroups.Process) error {
|
||||||
if os.Geteuid() != 0 {
|
return nil
|
||||||
t.Skip("Test disabled as requires root privileges")
|
}
|
||||||
}
|
|
||||||
|
func (m *mockCgroup) Delete() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) MoveTo(cgroups.Cgroup) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) Stat(...cgroups.ErrorHandler) (*cgroups.Metrics, error) {
|
||||||
|
return &cgroups.Metrics{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) Update(resources *specs.LinuxResources) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) Processes(cgroups.Name, bool) ([]cgroups.Process, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) Freeze() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) Thaw() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) OOMEventFD() (uintptr, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) State() cgroups.State {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCgroup) Subsystems() []cgroups.Subsystem {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockCgroupNew(hierarchy cgroups.Hierarchy, path cgroups.Path, resources *specs.LinuxResources) (cgroups.Cgroup, error) {
|
||||||
|
return &mockCgroup{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockCgroupLoad(hierarchy cgroups.Hierarchy, path cgroups.Path) (cgroups.Cgroup, error) {
|
||||||
|
return &mockCgroup{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cgroupsNewFunc = mockCgroupNew
|
||||||
|
cgroupsLoadFunc = mockCgroupLoad
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestV1Constraints(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
systems, err := V1Constraints()
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotEmpty(systems)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestV1NoConstraints(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
systems, err := V1NoConstraints()
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotEmpty(systems)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupNoConstraintsPath(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
cgrouPath := "abc"
|
||||||
|
expectedPath := filepath.Join(cgroupKataPath, cgrouPath)
|
||||||
|
path := cgroupNoConstraintsPath(cgrouPath)
|
||||||
|
assert.Equal(expectedPath, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateCgroups(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
oldCgroupsNew := cgroupsNewFunc
|
||||||
|
oldCgroupsLoad := cgroupsLoadFunc
|
||||||
|
cgroupsNewFunc = cgroups.New
|
||||||
|
cgroupsLoadFunc = cgroups.Load
|
||||||
|
defer func() {
|
||||||
|
cgroupsNewFunc = oldCgroupsNew
|
||||||
|
cgroupsLoadFunc = oldCgroupsLoad
|
||||||
|
}()
|
||||||
|
|
||||||
s := &Sandbox{
|
s := &Sandbox{
|
||||||
id: "test-sandbox",
|
state: types.State{
|
||||||
hypervisor: &mockHypervisor{},
|
CgroupPath: "",
|
||||||
config: &SandboxConfig{
|
},
|
||||||
Containers: []ContainerConfig{
|
}
|
||||||
{
|
|
||||||
ID: "containerA",
|
// empty path
|
||||||
Annotations: make(map[string]string),
|
err := s.updateCgroups()
|
||||||
},
|
assert.NoError(err)
|
||||||
{
|
|
||||||
ID: "containerA",
|
// path doesn't exist
|
||||||
Annotations: make(map[string]string),
|
s.state.CgroupPath = "/abc/123/rgb"
|
||||||
},
|
err = s.updateCgroups()
|
||||||
|
assert.Error(err)
|
||||||
|
|
||||||
|
if os.Getuid() != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.state.CgroupPath = fmt.Sprintf("/kata-tests-%d", os.Getpid())
|
||||||
|
testCgroup, err := cgroups.New(cgroups.V1, cgroups.StaticPath(s.state.CgroupPath), &specs.LinuxResources{})
|
||||||
|
assert.NoError(err)
|
||||||
|
defer testCgroup.Delete()
|
||||||
|
s.hypervisor = &mockHypervisor{mockPid: 0}
|
||||||
|
|
||||||
|
// bad pid
|
||||||
|
err = s.updateCgroups()
|
||||||
|
assert.Error(err)
|
||||||
|
|
||||||
|
// fake workload
|
||||||
|
cmd := exec.Command("tail", "-f", "/dev/null")
|
||||||
|
assert.NoError(cmd.Start())
|
||||||
|
s.state.Pid = cmd.Process.Pid
|
||||||
|
s.hypervisor = &mockHypervisor{mockPid: s.state.Pid}
|
||||||
|
|
||||||
|
// no containers
|
||||||
|
err = s.updateCgroups()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
s.config = &SandboxConfig{}
|
||||||
|
s.config.HypervisorConfig.NumVCPUs = 1
|
||||||
|
|
||||||
|
s.containers = map[string]*Container{
|
||||||
|
"abc": {
|
||||||
|
process: Process{
|
||||||
|
Pid: s.state.Pid,
|
||||||
|
},
|
||||||
|
config: &ContainerConfig{
|
||||||
|
Annotations: containerAnnotations,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"xyz": {
|
||||||
|
process: Process{
|
||||||
|
Pid: s.state.Pid,
|
||||||
|
},
|
||||||
|
config: &ContainerConfig{
|
||||||
|
Annotations: containerAnnotations,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
contA := s.config.Containers[0]
|
err = s.updateCgroups()
|
||||||
contB := s.config.Containers[1]
|
assert.NoError(err)
|
||||||
|
|
||||||
getIntP := func(x int64) *int64 { return &x }
|
// cleanup
|
||||||
getUintP := func(x uint64) *uint64 { return &x }
|
assert.NoError(cmd.Process.Kill())
|
||||||
|
err = s.deleteCgroups()
|
||||||
data, err := json.Marshal(&specs.Spec{
|
assert.NoError(err)
|
||||||
Linux: &specs.Linux{
|
|
||||||
Resources: &specs.LinuxResources{
|
|
||||||
CPU: &specs.LinuxCPU{
|
|
||||||
Quota: getIntP(5000),
|
|
||||||
Period: getUintP(10000),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
contA.Annotations[annotations.ConfigJSONKey] = string(data)
|
|
||||||
|
|
||||||
data, err = json.Marshal(&specs.Spec{
|
|
||||||
Linux: &specs.Linux{
|
|
||||||
Resources: &specs.LinuxResources{
|
|
||||||
CPU: &specs.LinuxCPU{
|
|
||||||
Quota: getIntP(10000),
|
|
||||||
Period: getUintP(40000),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
contB.Annotations[annotations.ConfigJSONKey] = string(data)
|
|
||||||
|
|
||||||
err = s.newCgroups()
|
|
||||||
assert.Nil(t, err, "failed to create cgroups")
|
|
||||||
|
|
||||||
defer s.destroyCgroups()
|
|
||||||
|
|
||||||
// test if function works without error
|
|
||||||
err = s.setupCgroups()
|
|
||||||
assert.Nil(t, err, "setup host cgroup failed")
|
|
||||||
|
|
||||||
// test if the quota and period value are written into cgroup files
|
|
||||||
cpu, err := getCgroupDestination("cpu")
|
|
||||||
assert.Nil(t, err, "failed to get cpu cgroup path")
|
|
||||||
assert.NotEqual(t, "", cpu, "cpu cgroup value can't be empty")
|
|
||||||
|
|
||||||
parentDir := filepath.Join(cpu, defaultCgroupParent, "test-sandbox", "vcpu")
|
|
||||||
quotaFile := filepath.Join(parentDir, "cpu.cfs_quota_us")
|
|
||||||
periodFile := filepath.Join(parentDir, "cpu.cfs_period_us")
|
|
||||||
|
|
||||||
expectedQuota := "7500\n"
|
|
||||||
expectedPeriod := "10000\n"
|
|
||||||
|
|
||||||
fquota, err := os.Open(quotaFile)
|
|
||||||
assert.Nil(t, err, "open file %q failed", quotaFile)
|
|
||||||
defer fquota.Close()
|
|
||||||
data, err = ioutil.ReadAll(fquota)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, expectedQuota, string(data), "failed to get expected cfs_quota")
|
|
||||||
|
|
||||||
fperiod, err := os.Open(periodFile)
|
|
||||||
assert.Nil(t, err, "open file %q failed", periodFile)
|
|
||||||
defer fperiod.Close()
|
|
||||||
data, err = ioutil.ReadAll(fperiod)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, expectedPeriod, string(data), "failed to get expected cfs_period")
|
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||||
deviceManager "github.com/kata-containers/runtime/virtcontainers/device/manager"
|
deviceManager "github.com/kata-containers/runtime/virtcontainers/device/manager"
|
||||||
|
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
||||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||||
@ -175,8 +176,6 @@ type Sandbox struct {
|
|||||||
seccompSupported bool
|
seccompSupported bool
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|
||||||
cgroup *sandboxCgroups
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the sandbox identifier string.
|
// ID returns the sandbox identifier string.
|
||||||
@ -541,11 +540,6 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// create new cgroup for sandbox
|
|
||||||
if err := s.newCgroups(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -702,10 +696,8 @@ func (s *Sandbox) Delete() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy sandbox cgroup
|
if err := s.deleteCgroups(); err != nil {
|
||||||
if err := s.destroyCgroups(); err != nil {
|
return err
|
||||||
// continue the removal process even cgroup failed to destroy
|
|
||||||
s.Logger().WithError(err).Error("failed to destroy cgroup")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSandboxList.removeSandbox(s.id)
|
globalSandboxList.removeSandbox(s.id)
|
||||||
@ -987,6 +979,13 @@ func (s *Sandbox) addContainer(c *Container) error {
|
|||||||
}
|
}
|
||||||
s.containers[c.id] = c
|
s.containers[c.id] = c
|
||||||
|
|
||||||
|
ann := c.GetAnnotations()
|
||||||
|
if ann[annotations.ContainerTypeKey] == string(PodSandbox) {
|
||||||
|
s.state.Pid = c.process.Pid
|
||||||
|
s.state.CgroupPath = c.state.CgroupPath
|
||||||
|
return s.store.Store(store.State, s.state)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1048,10 +1047,10 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup host cgroups for new container
|
if err := s.updateCgroups(); err != nil {
|
||||||
if err := s.setupCgroups(); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1202,6 +1201,10 @@ func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResou
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.updateCgroups(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return c.storeContainer()
|
return c.storeContainer()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1269,6 +1272,10 @@ func (s *Sandbox) createContainers() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.updateCgroups(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1427,15 +1434,6 @@ func (s *Sandbox) decrementSandboxBlockIndex() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setSandboxPid sets the Pid of the the shim process belonging to the
|
|
||||||
// sandbox container as the Pid of the sandbox.
|
|
||||||
func (s *Sandbox) setSandboxPid(pid int) error {
|
|
||||||
s.state.Pid = pid
|
|
||||||
|
|
||||||
// update on-disk state
|
|
||||||
return s.store.Store(store.State, s.state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sandbox) setContainersState(state types.StateString) error {
|
func (s *Sandbox) setContainersState(state types.StateString) error {
|
||||||
if state == "" {
|
if state == "" {
|
||||||
return errNeedState
|
return errNeedState
|
||||||
|
@ -53,6 +53,7 @@ func testCreateSandbox(t *testing.T, id string,
|
|||||||
NetworkConfig: nconfig,
|
NetworkConfig: nconfig,
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
Containers: containers,
|
Containers: containers,
|
||||||
|
Annotations: sandboxAnnotations,
|
||||||
}
|
}
|
||||||
|
|
||||||
sandbox, err := createSandbox(context.Background(), sconfig, nil)
|
sandbox, err := createSandbox(context.Background(), sconfig, nil)
|
||||||
@ -689,7 +690,8 @@ func TestSandboxGetContainer(t *testing.T) {
|
|||||||
func TestContainerSetStateBlockIndex(t *testing.T) {
|
func TestContainerSetStateBlockIndex(t *testing.T) {
|
||||||
containers := []ContainerConfig{
|
containers := []ContainerConfig{
|
||||||
{
|
{
|
||||||
ID: "100",
|
ID: "100",
|
||||||
|
Annotations: containerAnnotations,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -784,7 +786,8 @@ func TestContainerStateSetFstype(t *testing.T) {
|
|||||||
|
|
||||||
containers := []ContainerConfig{
|
containers := []ContainerConfig{
|
||||||
{
|
{
|
||||||
ID: "100",
|
ID: "100",
|
||||||
|
Annotations: containerAnnotations,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user