virtcontainers: Rename the cgroups package

To resourcecontrol, and make it consistent with the fact that cgroups
are a Linux implementation of the ResourceController interface.

Fixes: #3601

Signed-off-by: Samuel Ortiz <s.ortiz@apple.com>
This commit is contained in:
Samuel Ortiz 2022-02-02 16:11:14 +00:00 committed by Samuel Ortiz
parent 0d1a7da682
commit 823faee83a
11 changed files with 110 additions and 109 deletions

View File

@ -12,8 +12,8 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
deviceApi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
deviceConfig "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/sirupsen/logrus"
)
@ -39,7 +39,7 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) {
deviceApi.SetLogger(virtLog)
compatoci.SetLogger(virtLog)
deviceConfig.SetLogger(virtLog)
cgroups.SetLogger(virtLog)
resCtrl.SetLogger(virtLog)
}
// CreateSandbox is the virtcontainers sandbox creation entry point.
@ -83,7 +83,7 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
}()
// Set the sandbox host cgroups.
if err := s.setupCgroups(); err != nil {
if err := s.setupResourceController(); err != nil {
return nil, err
}

View File

@ -17,8 +17,8 @@ import (
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
@ -49,7 +49,7 @@ func newEmptySpec() *specs.Spec {
return &specs.Spec{
Linux: &specs.Linux{
Resources: &specs.LinuxResources{},
CgroupsPath: vccgroups.DefaultCgroupPath,
CgroupsPath: resCtrl.DefaultResourceControllerID,
},
Process: &specs.Process{
Capabilities: &specs.LinuxCapabilities{},

View File

@ -27,7 +27,7 @@ import (
kataclient "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/client"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
@ -1061,7 +1061,7 @@ func (k *kataAgent) constrainGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool, str
// - Initrd image doesn't have systemd.
// - Nobody will be able to modify the resources of a specific container by using systemctl set-property.
// - docker is not running in the VM.
if vccgroups.IsSystemdCgroup(grpcSpec.Linux.CgroupsPath) {
if resCtrl.IsSystemdCgroup(grpcSpec.Linux.CgroupsPath) {
// Convert systemd cgroup to cgroupfs
slice := strings.Split(grpcSpec.Linux.CgroupsPath, ":")
// 0 - slice: system.slice

View File

@ -17,7 +17,7 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/drivers"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/safchain/ethtool"
)
@ -89,7 +89,7 @@ func (endpoint *PhysicalEndpoint) Attach(ctx context.Context, s *Sandbox) error
return err
}
c, err := cgroups.DeviceToCgroupDeviceRule(vfioPath)
c, err := resCtrl.DeviceToCgroupDeviceRule(vfioPath)
if err != nil {
return err
}

View File

@ -5,7 +5,7 @@
// SPDX-License-Identifier: Apache-2.0
//
package cgroups
package resourcecontrol
import (
"fmt"
@ -25,12 +25,9 @@ import (
// from grabbing the stats data.
const CgroupKataPrefix = "kata"
// DefaultCgroupPath runtime-determined location in the cgroups hierarchy.
const DefaultCgroupPath = "/vc"
func RenameCgroupPath(path string) (string, error) {
if path == "" {
path = DefaultCgroupPath
path = DefaultResourceControllerID
}
cgroupPathDir := filepath.Dir(path)

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
package cgroups
package resourcecontrol
import (
v1 "github.com/containerd/cgroups/stats/v1"
@ -12,7 +12,7 @@ import (
)
var (
controllerLogger = logrus.WithField("source", "virtcontainers/pkg/cgroups")
controllerLogger = logrus.WithField("source", "virtcontainers/pkg/resourcecontrol")
)
// SetLogger sets up a logger for this pkg

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
package cgroups
package resourcecontrol
import (
"fmt"

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
package cgroups
package resourcecontrol
import (
"context"
@ -17,6 +17,9 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
)
// DefaultResourceControllerID runtime-determined location in the cgroups hierarchy.
const DefaultResourceControllerID = "/vc"
// validCgroupPath returns a valid cgroup path.
// see https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#cgroups-path
func ValidCgroupPath(path string, systemdCgroup bool) (string, error) {
@ -36,8 +39,8 @@ func ValidCgroupPath(path string, systemdCgroup bool) (string, error) {
// In the case of a relative path (not starting with /), the runtime MAY
// interpret the path relative to a runtime-determined location in the cgroups hierarchy.
// clean up path and return a new path relative to DefaultCgroupPath
return filepath.Join(DefaultCgroupPath, filepath.Clean("/"+path)), nil
// clean up path and return a new path relative to DefaultResourceControllerID
return filepath.Join(DefaultResourceControllerID, filepath.Clean("/"+path)), nil
}
func IsSystemdCgroup(cgroupPath string) bool {

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
package cgroups
package resourcecontrol
import (
"os"
@ -99,8 +99,8 @@ func TestValidCgroupPath(t *testing.T) {
assert.Equal(t.path, path)
} else {
assert.True(
strings.HasPrefix(path, DefaultCgroupPath),
"%v should have prefix /%v", path, DefaultCgroupPath)
strings.HasPrefix(path, DefaultResourceControllerID),
"%v should have prefix /%v", path, DefaultResourceControllerID)
}
}

View File

@ -36,9 +36,9 @@ import (
pbTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cpuset"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
@ -64,13 +64,13 @@ const (
rwm = "rwm"
// When the Kata overhead threads (I/O, VMM, etc) are not
// placed in the sandbox cgroup, they are moved to a specific,
// unconstrained cgroup hierarchy.
// Assuming the cgroup mount point is at /sys/fs/cgroup/, on a
// cgroup v1 system, the Kata overhead memory cgroup will be at
// placed in the sandbox resource controller (A cgroup on Linux),
// they are moved to a specific, unconstrained resource controller.
// On Linux, assuming the cgroup mount point is at /sys/fs/cgroup/,
// on a cgroup v1 system, the Kata overhead memory cgroup will be at
// /sys/fs/cgroup/memory/kata_overhead/$CGPATH where $CGPATH is
// defined by the orchestrator.
cgroupKataOverheadPath = "/kata_overhead/"
resCtrlKataOverheadID = "/kata_overhead/"
)
var (
@ -199,10 +199,11 @@ type Sandbox struct {
config *SandboxConfig
annotationsLock *sync.RWMutex
wg *sync.WaitGroup
sandboxCgroup cgroups.ResourceController
overheadCgroup cgroups.ResourceController
cw *consoleWatcher
sandboxController resCtrl.ResourceController
overheadController resCtrl.ResourceController
containers map[string]*Container
id string
@ -565,8 +566,8 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
sandboxConfig.HypervisorConfig.EnableVhostUserStore,
sandboxConfig.HypervisorConfig.VhostUserStorePath, nil)
// Create the sandbox cgroups
if err := s.createCgroups(); err != nil {
// Create the sandbox resource controllers.
if err := s.createResourceController(); err != nil {
return nil, err
}
@ -587,7 +588,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
return s, nil
}
func (s *Sandbox) createCgroups() error {
func (s *Sandbox) createResourceController() error {
var err error
cgroupPath := ""
@ -596,20 +597,19 @@ func (s *Sandbox) createCgroups() error {
resources := specs.LinuxResources{}
if s.config == nil {
return fmt.Errorf("Could not create cgroup manager: empty sandbox configuration")
return fmt.Errorf("Could not create %s resource controller manager: empty sandbox configuration", s.sandboxController)
}
spec := s.GetPatchedOCISpec()
if spec != nil && spec.Linux != nil {
cgroupPath = spec.Linux.CgroupsPath
// Kata relies on the cgroup parent created and configured by the container
// engine by default. The exception is for devices whitelist as well as sandbox-level
// CPUSet.
// For the sandbox cgroups we create and manage, rename the base of the cgroup path to
// Kata relies on the resource controller (cgroups on Linux) parent created and configured by the
// container engine by default. The exception is for devices whitelist as well as sandbox-level CPUSet.
// For the sandbox controllers we create and manage, rename the base of the controller ID to
// include "kata_"
if !cgroups.IsSystemdCgroup(cgroupPath) { // don't add prefix when cgroups are managed by systemd
cgroupPath, err = cgroups.RenameCgroupPath(cgroupPath)
if !resCtrl.IsSystemdCgroup(cgroupPath) { // don't add prefix when cgroups are managed by systemd
cgroupPath, err = resCtrl.RenameCgroupPath(cgroupPath)
if err != nil {
return err
}
@ -659,7 +659,7 @@ func (s *Sandbox) createCgroups() error {
if s.devManager != nil {
for _, d := range s.devManager.GetAllDevices() {
dev, err := cgroups.DeviceToLinuxDevice(d.GetHostPath())
dev, err := resCtrl.DeviceToLinuxDevice(d.GetHostPath())
if err != nil {
s.Logger().WithError(err).WithField("device", d.GetHostPath()).Warn("Could not add device to sandbox resources")
continue
@ -668,34 +668,34 @@ func (s *Sandbox) createCgroups() error {
}
}
// Create the sandbox cgroup.
// Create the sandbox resource controller (cgroups on Linux).
// Depending on the SandboxCgroupOnly value, this cgroup
// will either hold all the pod threads (SandboxCgroupOnly is true)
// or only the virtual CPU ones (SandboxCgroupOnly is false).
s.sandboxCgroup, err = cgroups.NewSandboxResourceController(cgroupPath, &resources, s.config.SandboxCgroupOnly)
s.sandboxController, err = resCtrl.NewSandboxResourceController(cgroupPath, &resources, s.config.SandboxCgroupOnly)
if err != nil {
return fmt.Errorf("Could not create the sandbox cgroup %v", err)
return fmt.Errorf("Could not create the sandbox resource controller %v", err)
}
// Now that the sandbox cgroup is created, we can set the state cgroup root paths.
s.state.SandboxCgroupPath = s.sandboxCgroup.ID()
// Now that the sandbox resource controller is created, we can set the state controller paths..
s.state.SandboxCgroupPath = s.sandboxController.ID()
s.state.OverheadCgroupPath = ""
if s.config.SandboxCgroupOnly {
s.overheadCgroup = nil
s.overheadController = nil
} else {
// The shim configuration is requesting that we do not put all threads
// into the sandbox cgroup.
// We're creating an overhead cgroup, with no constraints. Everything but
// into the sandbox resource controller.
// We're creating an overhead controller, with no constraints. Everything but
// the vCPU threads will eventually make it there.
overheadCgroup, err := cgroups.NewResourceController(fmt.Sprintf("/%s/%s", cgroupKataOverheadPath, s.id), &specs.LinuxResources{})
overheadController, err := resCtrl.NewResourceController(fmt.Sprintf("/%s/%s", resCtrlKataOverheadID, s.id), &specs.LinuxResources{})
// TODO: support systemd cgroups overhead cgroup
// https://github.com/kata-containers/kata-containers/issues/2963
if err != nil {
return err
}
s.overheadCgroup = overheadCgroup
s.state.OverheadCgroupPath = s.overheadCgroup.ID()
s.overheadController = overheadController
s.state.OverheadCgroupPath = s.overheadController.ID()
}
return nil
@ -778,8 +778,8 @@ func (s *Sandbox) Delete(ctx context.Context) error {
}
if !rootless.IsRootless() {
if err := s.cgroupsDelete(); err != nil {
s.Logger().WithError(err).Error("failed to Cleanup cgroups")
if err := s.resourceControllerDelete(); err != nil {
s.Logger().WithError(err).Errorf("failed to cleanup the %s resource controllers", s.sandboxController)
}
}
@ -1318,7 +1318,7 @@ func (s *Sandbox) CreateContainer(ctx context.Context, contConfig ContainerConfi
return nil, err
}
if err = s.cgroupsUpdate(ctx); err != nil {
if err = s.resourceControllerUpdate(ctx); err != nil {
return nil, err
}
@ -1421,8 +1421,8 @@ func (s *Sandbox) DeleteContainer(ctx context.Context, containerID string) (VCCo
}
}
// update the sandbox cgroup
if err = s.cgroupsUpdate(ctx); err != nil {
// update the sandbox resource controller
if err = s.resourceControllerUpdate(ctx); err != nil {
return nil, err
}
@ -1487,7 +1487,7 @@ func (s *Sandbox) UpdateContainer(ctx context.Context, containerID string, resou
return err
}
if err := s.cgroupsUpdate(ctx); err != nil {
if err := s.resourceControllerUpdate(ctx); err != nil {
return err
}
@ -1515,7 +1515,7 @@ func (s *Sandbox) StatsContainer(ctx context.Context, containerID string) (Conta
// Stats returns the stats of a running sandbox
func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) {
metrics, err := s.sandboxCgroup.Stat()
metrics, err := s.sandboxController.Stat()
if err != nil {
return SandboxStats{}, err
}
@ -1599,7 +1599,7 @@ func (s *Sandbox) createContainers(ctx context.Context) error {
return err
}
if err := s.cgroupsUpdate(ctx); err != nil {
if err := s.resourceControllerUpdate(ctx); err != nil {
return err
}
if err := s.storeSandbox(ctx); err != nil {
@ -1752,10 +1752,10 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
span, ctx := katatrace.Trace(ctx, s.Logger(), "HotplugAddDevice", sandboxTracingTags, map[string]string{"sandbox_id": s.id})
defer span.End()
if s.sandboxCgroup != nil {
if err := s.sandboxCgroup.AddDevice(device.GetHostPath()); err != nil {
if s.sandboxController != nil {
if err := s.sandboxController.AddDevice(device.GetHostPath()); err != nil {
s.Logger().WithError(err).WithField("device", device).
Warn("Could not add device to cgroup")
Warnf("Could not add device to the %s controller", s.sandboxController)
}
}
@ -1803,11 +1803,11 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
// HotplugRemoveDevice is used for removing a device from sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
defer func() {
if s.sandboxCgroup != nil {
if err := s.sandboxCgroup.RemoveDevice(device.GetHostPath()); err != nil {
defer func() {
if s.sandboxController != nil {
if err := s.sandboxController.RemoveDevice(device.GetHostPath()); err != nil {
s.Logger().WithError(err).WithField("device", device).
Warn("Could not add device to cgroup")
Warnf("Could not add device to the %s controller", s.sandboxController)
}
}
}()
@ -2095,25 +2095,26 @@ func (s *Sandbox) GetHypervisorType() string {
return string(s.config.HypervisorType)
}
// cgroupsUpdate updates the sandbox cpuset cgroup subsystem.
// Also, if the sandbox has an overhead cgroup, it updates the hypervisor
// resourceControllerUpdate updates the sandbox cpuset resource controller
// (Linux cgroup) subsystem.
// Also, if the sandbox has an overhead controller, it updates the hypervisor
// constraints by moving the potentially new vCPU threads back to the sandbox
// cgroup.
func (s *Sandbox) cgroupsUpdate(ctx context.Context) error {
// controller.
func (s *Sandbox) resourceControllerUpdate(ctx context.Context) error {
cpuset, memset, err := s.getSandboxCPUSet()
if err != nil {
return err
}
// We update the sandbox cgroup with potentially new virtual CPUs.
if err := s.sandboxCgroup.UpdateCpuSet(cpuset, memset); err != nil {
// We update the sandbox controller with potentially new virtual CPUs.
if err := s.sandboxController.UpdateCpuSet(cpuset, memset); err != nil {
return err
}
if s.overheadCgroup != nil {
// If we have an overhead cgroup, new vCPU threads would start there,
if s.overheadController != nil {
// If we have an overhead controller, new vCPU threads would start there,
// as being children of the VMM PID.
// We need to constrain them by moving them into the sandbox cgroup.
// We need to constrain them by moving them into the sandbox controller.
if err := s.constrainHypervisor(ctx); err != nil {
return err
}
@ -2122,41 +2123,41 @@ func (s *Sandbox) cgroupsUpdate(ctx context.Context) error {
return nil
}
// cgroupsDelete will move the running processes in the sandbox cgroup
// to the parent and then delete the sandbox cgroup
func (s *Sandbox) cgroupsDelete() error {
s.Logger().Debug("Deleting sandbox cgroup")
// resourceControllerDelete will move the running processes in the sandbox resource
// cvontroller to the parent and then delete the sandbox controller.
func (s *Sandbox) resourceControllerDelete() error {
s.Logger().Debugf("Deleting sandbox %s resource controler", s.sandboxController)
if s.state.SandboxCgroupPath == "" {
s.Logger().Warnf("sandbox cgroup path is empty")
s.Logger().Warnf("sandbox %s resource controler path is empty", s.sandboxController)
return nil
}
sandboxCgroup, err := cgroups.LoadResourceController(s.state.SandboxCgroupPath)
sandboxController, err := resCtrl.LoadResourceController(s.state.SandboxCgroupPath)
if err != nil {
return err
}
resCtrlParent := sandboxCgroup.Parent()
if err := sandboxCgroup.MoveTo(resCtrlParent); err != nil {
resCtrlParent := sandboxController.Parent()
if err := sandboxController.MoveTo(resCtrlParent); err != nil {
return err
}
if err := sandboxCgroup.Delete(); err != nil {
if err := sandboxController.Delete(); err != nil {
return err
}
if s.state.OverheadCgroupPath != "" {
overheadCgroup, err := cgroups.LoadResourceController(s.state.OverheadCgroupPath)
overheadController, err := resCtrl.LoadResourceController(s.state.OverheadCgroupPath)
if err != nil {
return err
}
resCtrlParent := overheadCgroup.Parent()
if err := s.overheadCgroup.MoveTo(resCtrlParent); err != nil {
resCtrlParent := overheadController.Parent()
if err := s.overheadController.MoveTo(resCtrlParent); err != nil {
return err
}
if err := overheadCgroup.Delete(); err != nil {
if err := overheadController.Delete(); err != nil {
return err
}
}
@ -2164,16 +2165,16 @@ func (s *Sandbox) cgroupsDelete() error {
return nil
}
// constrainHypervisor will place the VMM and vCPU threads into cgroups.
// constrainHypervisor will place the VMM and vCPU threads into resource controllers (cgroups on Linux).
func (s *Sandbox) constrainHypervisor(ctx context.Context) error {
tids, err := s.hypervisor.GetThreadIDs(ctx)
if err != nil {
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
}
// All vCPU threads move to the sandbox cgroup.
// All vCPU threads move to the sandbox controller.
for _, i := range tids.vcpus {
if err := s.sandboxCgroup.AddThread(i); err != nil {
if err := s.sandboxController.AddThread(i); err != nil {
return err
}
}
@ -2181,22 +2182,22 @@ func (s *Sandbox) constrainHypervisor(ctx context.Context) error {
return nil
}
// setupCgroups adds the runtime process to either the sandbox cgroup or the overhead one,
// depending on the sandbox_cgroup_only configuration setting.
func (s *Sandbox) setupCgroups() error {
vmmCgroup := s.sandboxCgroup
if s.overheadCgroup != nil {
vmmCgroup = s.overheadCgroup
// setupResourceController adds the runtime process to either the sandbox resource controller or the
// overhead one, depending on the sandbox_cgroup_only configuration setting.
func (s *Sandbox) setupResourceController() error {
vmmController := s.sandboxController
if s.overheadController != nil {
vmmController = s.overheadController
}
// By adding the runtime process to either the sandbox or overhead cgroup, we are making
// By adding the runtime process to either the sandbox or overhead controller, we are making
// sure that any child process of the runtime (i.e. *all* processes serving a Kata pod)
// will initially live in this cgroup. Depending on the sandbox_cgroup settings, we will
// then move the vCPU threads between cgroups.
// will initially live in this controller. Depending on the sandbox_cgroup settings, we will
// then move the vCPU threads between resource controllers.
runtimePid := os.Getpid()
// Add the runtime to the VMM sandbox cgroup
if err := vmmCgroup.AddProcess(runtimePid); err != nil {
return fmt.Errorf("Could not add runtime PID %d to sandbox cgroup: %v", runtimePid, err)
// Add the runtime to the VMM sandbox resource controller
if err := vmmController.AddProcess(runtimePid); err != nil {
return fmt.Errorf("Could not add runtime PID %d to the sandbox %s resource controller: %v", runtimePid, s.sandboxController, err)
}
return nil
@ -2217,8 +2218,8 @@ func (s *Sandbox) GetPatchedOCISpec() *specs.Spec {
}
// get the container associated with the PodSandbox annotation. In Kubernetes, this
// represents the pause container. In Docker, this is the container. We derive the
// cgroup path from this container.
// represents the pause container. In Docker, this is the container.
// On Linux, we derive the group path from this container.
for _, cConfig := range s.config.Containers {
if cConfig.Annotations[annotations.ContainerTypeKey] == string(PodSandbox) {
return cConfig.CustomSpec

View File

@ -1502,14 +1502,14 @@ func TestSandbox_Cgroups(t *testing.T) {
}
t.Run(tt.name, func(t *testing.T) {
err := tt.s.createCgroups()
err := tt.s.createResourceController()
t.Logf("create groups error %v", err)
if (err != nil) != tt.wantErr {
t.Errorf("Sandbox.CreateCgroups() error = %v, wantErr %v", err, tt.wantErr)
}
if err == nil {
if err := tt.s.setupCgroups(); (err != nil) != tt.wantErr {
if err := tt.s.setupResourceController(); (err != nil) != tt.wantErr {
t.Errorf("Sandbox.SetupCgroups() error = %v, wantErr %v", err, tt.wantErr)
}
}