virtcontainers: Rename the cgroups package

To resourcecontrol, and make it consistent with the fact that cgroups
are a Linux implementation of the ResourceController interface.

Fixes: #3601

Signed-off-by: Samuel Ortiz <s.ortiz@apple.com>
This commit is contained in:
Samuel Ortiz 2022-02-02 16:11:14 +00:00 committed by Samuel Ortiz
parent 0d1a7da682
commit 823faee83a
11 changed files with 110 additions and 109 deletions

View File

@ -12,8 +12,8 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace" "github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
deviceApi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" deviceApi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
deviceConfig "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config" deviceConfig "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -39,7 +39,7 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) {
deviceApi.SetLogger(virtLog) deviceApi.SetLogger(virtLog)
compatoci.SetLogger(virtLog) compatoci.SetLogger(virtLog)
deviceConfig.SetLogger(virtLog) deviceConfig.SetLogger(virtLog)
cgroups.SetLogger(virtLog) resCtrl.SetLogger(virtLog)
} }
// CreateSandbox is the virtcontainers sandbox creation entry point. // CreateSandbox is the virtcontainers sandbox creation entry point.
@ -83,7 +83,7 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
}() }()
// Set the sandbox host cgroups. // Set the sandbox host cgroups.
if err := s.setupCgroups(); err != nil { if err := s.setupResourceController(); err != nil {
return nil, err return nil, err
} }

View File

@ -17,8 +17,8 @@ import (
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils" ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -49,7 +49,7 @@ func newEmptySpec() *specs.Spec {
return &specs.Spec{ return &specs.Spec{
Linux: &specs.Linux{ Linux: &specs.Linux{
Resources: &specs.LinuxResources{}, Resources: &specs.LinuxResources{},
CgroupsPath: vccgroups.DefaultCgroupPath, CgroupsPath: resCtrl.DefaultResourceControllerID,
}, },
Process: &specs.Process{ Process: &specs.Process{
Capabilities: &specs.LinuxCapabilities{}, Capabilities: &specs.LinuxCapabilities{},

View File

@ -27,7 +27,7 @@ import (
kataclient "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/client" kataclient "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/client"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
@ -1061,7 +1061,7 @@ func (k *kataAgent) constrainGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool, str
// - Initrd image doesn't have systemd. // - Initrd image doesn't have systemd.
// - Nobody will be able to modify the resources of a specific container by using systemctl set-property. // - Nobody will be able to modify the resources of a specific container by using systemctl set-property.
// - docker is not running in the VM. // - docker is not running in the VM.
if vccgroups.IsSystemdCgroup(grpcSpec.Linux.CgroupsPath) { if resCtrl.IsSystemdCgroup(grpcSpec.Linux.CgroupsPath) {
// Convert systemd cgroup to cgroupfs // Convert systemd cgroup to cgroupfs
slice := strings.Split(grpcSpec.Linux.CgroupsPath, ":") slice := strings.Split(grpcSpec.Linux.CgroupsPath, ":")
// 0 - slice: system.slice // 0 - slice: system.slice

View File

@ -17,7 +17,7 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/drivers" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/drivers"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api" persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/safchain/ethtool" "github.com/safchain/ethtool"
) )
@ -89,7 +89,7 @@ func (endpoint *PhysicalEndpoint) Attach(ctx context.Context, s *Sandbox) error
return err return err
} }
c, err := cgroups.DeviceToCgroupDeviceRule(vfioPath) c, err := resCtrl.DeviceToCgroupDeviceRule(vfioPath)
if err != nil { if err != nil {
return err return err
} }

View File

@ -5,7 +5,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
package cgroups package resourcecontrol
import ( import (
"fmt" "fmt"
@ -25,12 +25,9 @@ import (
// from grabbing the stats data. // from grabbing the stats data.
const CgroupKataPrefix = "kata" const CgroupKataPrefix = "kata"
// DefaultCgroupPath runtime-determined location in the cgroups hierarchy.
const DefaultCgroupPath = "/vc"
func RenameCgroupPath(path string) (string, error) { func RenameCgroupPath(path string) (string, error) {
if path == "" { if path == "" {
path = DefaultCgroupPath path = DefaultResourceControllerID
} }
cgroupPathDir := filepath.Dir(path) cgroupPathDir := filepath.Dir(path)

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
package cgroups package resourcecontrol
import ( import (
v1 "github.com/containerd/cgroups/stats/v1" v1 "github.com/containerd/cgroups/stats/v1"
@ -12,7 +12,7 @@ import (
) )
var ( var (
controllerLogger = logrus.WithField("source", "virtcontainers/pkg/cgroups") controllerLogger = logrus.WithField("source", "virtcontainers/pkg/resourcecontrol")
) )
// SetLogger sets up a logger for this pkg // SetLogger sets up a logger for this pkg

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
package cgroups package resourcecontrol
import ( import (
"fmt" "fmt"

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
package cgroups package resourcecontrol
import ( import (
"context" "context"
@ -17,6 +17,9 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/cgroups/systemd"
) )
// DefaultResourceControllerID runtime-determined location in the cgroups hierarchy.
const DefaultResourceControllerID = "/vc"
// validCgroupPath returns a valid cgroup path. // validCgroupPath returns a valid cgroup path.
// see https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#cgroups-path // see https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#cgroups-path
func ValidCgroupPath(path string, systemdCgroup bool) (string, error) { func ValidCgroupPath(path string, systemdCgroup bool) (string, error) {
@ -36,8 +39,8 @@ func ValidCgroupPath(path string, systemdCgroup bool) (string, error) {
// In the case of a relative path (not starting with /), the runtime MAY // In the case of a relative path (not starting with /), the runtime MAY
// interpret the path relative to a runtime-determined location in the cgroups hierarchy. // interpret the path relative to a runtime-determined location in the cgroups hierarchy.
// clean up path and return a new path relative to DefaultCgroupPath // clean up path and return a new path relative to DefaultResourceControllerID
return filepath.Join(DefaultCgroupPath, filepath.Clean("/"+path)), nil return filepath.Join(DefaultResourceControllerID, filepath.Clean("/"+path)), nil
} }
func IsSystemdCgroup(cgroupPath string) bool { func IsSystemdCgroup(cgroupPath string) bool {

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
package cgroups package resourcecontrol
import ( import (
"os" "os"
@ -99,8 +99,8 @@ func TestValidCgroupPath(t *testing.T) {
assert.Equal(t.path, path) assert.Equal(t.path, path)
} else { } else {
assert.True( assert.True(
strings.HasPrefix(path, DefaultCgroupPath), strings.HasPrefix(path, DefaultResourceControllerID),
"%v should have prefix /%v", path, DefaultCgroupPath) "%v should have prefix /%v", path, DefaultResourceControllerID)
} }
} }

View File

@ -36,9 +36,9 @@ import (
pbTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols" pbTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cpuset" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cpuset"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/resourcecontrol"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
@ -64,13 +64,13 @@ const (
rwm = "rwm" rwm = "rwm"
// When the Kata overhead threads (I/O, VMM, etc) are not // When the Kata overhead threads (I/O, VMM, etc) are not
// placed in the sandbox cgroup, they are moved to a specific, // placed in the sandbox resource controller (A cgroup on Linux),
// unconstrained cgroup hierarchy. // they are moved to a specific, unconstrained resource controller.
// Assuming the cgroup mount point is at /sys/fs/cgroup/, on a // On Linux, assuming the cgroup mount point is at /sys/fs/cgroup/,
// cgroup v1 system, the Kata overhead memory cgroup will be at // on a cgroup v1 system, the Kata overhead memory cgroup will be at
// /sys/fs/cgroup/memory/kata_overhead/$CGPATH where $CGPATH is // /sys/fs/cgroup/memory/kata_overhead/$CGPATH where $CGPATH is
// defined by the orchestrator. // defined by the orchestrator.
cgroupKataOverheadPath = "/kata_overhead/" resCtrlKataOverheadID = "/kata_overhead/"
) )
var ( var (
@ -199,10 +199,11 @@ type Sandbox struct {
config *SandboxConfig config *SandboxConfig
annotationsLock *sync.RWMutex annotationsLock *sync.RWMutex
wg *sync.WaitGroup wg *sync.WaitGroup
sandboxCgroup cgroups.ResourceController
overheadCgroup cgroups.ResourceController
cw *consoleWatcher cw *consoleWatcher
sandboxController resCtrl.ResourceController
overheadController resCtrl.ResourceController
containers map[string]*Container containers map[string]*Container
id string id string
@ -565,8 +566,8 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
sandboxConfig.HypervisorConfig.EnableVhostUserStore, sandboxConfig.HypervisorConfig.EnableVhostUserStore,
sandboxConfig.HypervisorConfig.VhostUserStorePath, nil) sandboxConfig.HypervisorConfig.VhostUserStorePath, nil)
// Create the sandbox cgroups // Create the sandbox resource controllers.
if err := s.createCgroups(); err != nil { if err := s.createResourceController(); err != nil {
return nil, err return nil, err
} }
@ -587,7 +588,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
return s, nil return s, nil
} }
func (s *Sandbox) createCgroups() error { func (s *Sandbox) createResourceController() error {
var err error var err error
cgroupPath := "" cgroupPath := ""
@ -596,20 +597,19 @@ func (s *Sandbox) createCgroups() error {
resources := specs.LinuxResources{} resources := specs.LinuxResources{}
if s.config == nil { if s.config == nil {
return fmt.Errorf("Could not create cgroup manager: empty sandbox configuration") return fmt.Errorf("Could not create %s resource controller manager: empty sandbox configuration", s.sandboxController)
} }
spec := s.GetPatchedOCISpec() spec := s.GetPatchedOCISpec()
if spec != nil && spec.Linux != nil { if spec != nil && spec.Linux != nil {
cgroupPath = spec.Linux.CgroupsPath cgroupPath = spec.Linux.CgroupsPath
// Kata relies on the cgroup parent created and configured by the container // Kata relies on the resource controller (cgroups on Linux) parent created and configured by the
// engine by default. The exception is for devices whitelist as well as sandbox-level // container engine by default. The exception is for devices whitelist as well as sandbox-level CPUSet.
// CPUSet. // For the sandbox controllers we create and manage, rename the base of the controller ID to
// For the sandbox cgroups we create and manage, rename the base of the cgroup path to
// include "kata_" // include "kata_"
if !cgroups.IsSystemdCgroup(cgroupPath) { // don't add prefix when cgroups are managed by systemd if !resCtrl.IsSystemdCgroup(cgroupPath) { // don't add prefix when cgroups are managed by systemd
cgroupPath, err = cgroups.RenameCgroupPath(cgroupPath) cgroupPath, err = resCtrl.RenameCgroupPath(cgroupPath)
if err != nil { if err != nil {
return err return err
} }
@ -659,7 +659,7 @@ func (s *Sandbox) createCgroups() error {
if s.devManager != nil { if s.devManager != nil {
for _, d := range s.devManager.GetAllDevices() { for _, d := range s.devManager.GetAllDevices() {
dev, err := cgroups.DeviceToLinuxDevice(d.GetHostPath()) dev, err := resCtrl.DeviceToLinuxDevice(d.GetHostPath())
if err != nil { if err != nil {
s.Logger().WithError(err).WithField("device", d.GetHostPath()).Warn("Could not add device to sandbox resources") s.Logger().WithError(err).WithField("device", d.GetHostPath()).Warn("Could not add device to sandbox resources")
continue continue
@ -668,34 +668,34 @@ func (s *Sandbox) createCgroups() error {
} }
} }
// Create the sandbox cgroup. // Create the sandbox resource controller (cgroups on Linux).
// Depending on the SandboxCgroupOnly value, this cgroup // Depending on the SandboxCgroupOnly value, this cgroup
// will either hold all the pod threads (SandboxCgroupOnly is true) // will either hold all the pod threads (SandboxCgroupOnly is true)
// or only the virtual CPU ones (SandboxCgroupOnly is false). // or only the virtual CPU ones (SandboxCgroupOnly is false).
s.sandboxCgroup, err = cgroups.NewSandboxResourceController(cgroupPath, &resources, s.config.SandboxCgroupOnly) s.sandboxController, err = resCtrl.NewSandboxResourceController(cgroupPath, &resources, s.config.SandboxCgroupOnly)
if err != nil { if err != nil {
return fmt.Errorf("Could not create the sandbox cgroup %v", err) return fmt.Errorf("Could not create the sandbox resource controller %v", err)
} }
// Now that the sandbox cgroup is created, we can set the state cgroup root paths. // Now that the sandbox resource controller is created, we can set the state controller paths..
s.state.SandboxCgroupPath = s.sandboxCgroup.ID() s.state.SandboxCgroupPath = s.sandboxController.ID()
s.state.OverheadCgroupPath = "" s.state.OverheadCgroupPath = ""
if s.config.SandboxCgroupOnly { if s.config.SandboxCgroupOnly {
s.overheadCgroup = nil s.overheadController = nil
} else { } else {
// The shim configuration is requesting that we do not put all threads // The shim configuration is requesting that we do not put all threads
// into the sandbox cgroup. // into the sandbox resource controller.
// We're creating an overhead cgroup, with no constraints. Everything but // We're creating an overhead controller, with no constraints. Everything but
// the vCPU threads will eventually make it there. // the vCPU threads will eventually make it there.
overheadCgroup, err := cgroups.NewResourceController(fmt.Sprintf("/%s/%s", cgroupKataOverheadPath, s.id), &specs.LinuxResources{}) overheadController, err := resCtrl.NewResourceController(fmt.Sprintf("/%s/%s", resCtrlKataOverheadID, s.id), &specs.LinuxResources{})
// TODO: support systemd cgroups overhead cgroup // TODO: support systemd cgroups overhead cgroup
// https://github.com/kata-containers/kata-containers/issues/2963 // https://github.com/kata-containers/kata-containers/issues/2963
if err != nil { if err != nil {
return err return err
} }
s.overheadCgroup = overheadCgroup s.overheadController = overheadController
s.state.OverheadCgroupPath = s.overheadCgroup.ID() s.state.OverheadCgroupPath = s.overheadController.ID()
} }
return nil return nil
@ -778,8 +778,8 @@ func (s *Sandbox) Delete(ctx context.Context) error {
} }
if !rootless.IsRootless() { if !rootless.IsRootless() {
if err := s.cgroupsDelete(); err != nil { if err := s.resourceControllerDelete(); err != nil {
s.Logger().WithError(err).Error("failed to Cleanup cgroups") s.Logger().WithError(err).Errorf("failed to cleanup the %s resource controllers", s.sandboxController)
} }
} }
@ -1318,7 +1318,7 @@ func (s *Sandbox) CreateContainer(ctx context.Context, contConfig ContainerConfi
return nil, err return nil, err
} }
if err = s.cgroupsUpdate(ctx); err != nil { if err = s.resourceControllerUpdate(ctx); err != nil {
return nil, err return nil, err
} }
@ -1421,8 +1421,8 @@ func (s *Sandbox) DeleteContainer(ctx context.Context, containerID string) (VCCo
} }
} }
// update the sandbox cgroup // update the sandbox resource controller
if err = s.cgroupsUpdate(ctx); err != nil { if err = s.resourceControllerUpdate(ctx); err != nil {
return nil, err return nil, err
} }
@ -1487,7 +1487,7 @@ func (s *Sandbox) UpdateContainer(ctx context.Context, containerID string, resou
return err return err
} }
if err := s.cgroupsUpdate(ctx); err != nil { if err := s.resourceControllerUpdate(ctx); err != nil {
return err return err
} }
@ -1515,7 +1515,7 @@ func (s *Sandbox) StatsContainer(ctx context.Context, containerID string) (Conta
// Stats returns the stats of a running sandbox // Stats returns the stats of a running sandbox
func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) { func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) {
metrics, err := s.sandboxCgroup.Stat() metrics, err := s.sandboxController.Stat()
if err != nil { if err != nil {
return SandboxStats{}, err return SandboxStats{}, err
} }
@ -1599,7 +1599,7 @@ func (s *Sandbox) createContainers(ctx context.Context) error {
return err return err
} }
if err := s.cgroupsUpdate(ctx); err != nil { if err := s.resourceControllerUpdate(ctx); err != nil {
return err return err
} }
if err := s.storeSandbox(ctx); err != nil { if err := s.storeSandbox(ctx); err != nil {
@ -1752,10 +1752,10 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
span, ctx := katatrace.Trace(ctx, s.Logger(), "HotplugAddDevice", sandboxTracingTags, map[string]string{"sandbox_id": s.id}) span, ctx := katatrace.Trace(ctx, s.Logger(), "HotplugAddDevice", sandboxTracingTags, map[string]string{"sandbox_id": s.id})
defer span.End() defer span.End()
if s.sandboxCgroup != nil { if s.sandboxController != nil {
if err := s.sandboxCgroup.AddDevice(device.GetHostPath()); err != nil { if err := s.sandboxController.AddDevice(device.GetHostPath()); err != nil {
s.Logger().WithError(err).WithField("device", device). s.Logger().WithError(err).WithField("device", device).
Warn("Could not add device to cgroup") Warnf("Could not add device to the %s controller", s.sandboxController)
} }
} }
@ -1803,11 +1803,11 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
// HotplugRemoveDevice is used for removing a device from sandbox // HotplugRemoveDevice is used for removing a device from sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go // Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, devType config.DeviceType) error { func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
defer func() { defer func() {
if s.sandboxCgroup != nil { if s.sandboxController != nil {
if err := s.sandboxCgroup.RemoveDevice(device.GetHostPath()); err != nil { if err := s.sandboxController.RemoveDevice(device.GetHostPath()); err != nil {
s.Logger().WithError(err).WithField("device", device). s.Logger().WithError(err).WithField("device", device).
Warn("Could not add device to cgroup") Warnf("Could not add device to the %s controller", s.sandboxController)
} }
} }
}() }()
@ -2095,25 +2095,26 @@ func (s *Sandbox) GetHypervisorType() string {
return string(s.config.HypervisorType) return string(s.config.HypervisorType)
} }
// cgroupsUpdate updates the sandbox cpuset cgroup subsystem. // resourceControllerUpdate updates the sandbox cpuset resource controller
// Also, if the sandbox has an overhead cgroup, it updates the hypervisor // (Linux cgroup) subsystem.
// Also, if the sandbox has an overhead controller, it updates the hypervisor
// constraints by moving the potentially new vCPU threads back to the sandbox // constraints by moving the potentially new vCPU threads back to the sandbox
// cgroup. // controller.
func (s *Sandbox) cgroupsUpdate(ctx context.Context) error { func (s *Sandbox) resourceControllerUpdate(ctx context.Context) error {
cpuset, memset, err := s.getSandboxCPUSet() cpuset, memset, err := s.getSandboxCPUSet()
if err != nil { if err != nil {
return err return err
} }
// We update the sandbox cgroup with potentially new virtual CPUs. // We update the sandbox controller with potentially new virtual CPUs.
if err := s.sandboxCgroup.UpdateCpuSet(cpuset, memset); err != nil { if err := s.sandboxController.UpdateCpuSet(cpuset, memset); err != nil {
return err return err
} }
if s.overheadCgroup != nil { if s.overheadController != nil {
// If we have an overhead cgroup, new vCPU threads would start there, // If we have an overhead controller, new vCPU threads would start there,
// as being children of the VMM PID. // as being children of the VMM PID.
// We need to constrain them by moving them into the sandbox cgroup. // We need to constrain them by moving them into the sandbox controller.
if err := s.constrainHypervisor(ctx); err != nil { if err := s.constrainHypervisor(ctx); err != nil {
return err return err
} }
@ -2122,41 +2123,41 @@ func (s *Sandbox) cgroupsUpdate(ctx context.Context) error {
return nil return nil
} }
// cgroupsDelete will move the running processes in the sandbox cgroup // resourceControllerDelete will move the running processes in the sandbox resource
// to the parent and then delete the sandbox cgroup // cvontroller to the parent and then delete the sandbox controller.
func (s *Sandbox) cgroupsDelete() error { func (s *Sandbox) resourceControllerDelete() error {
s.Logger().Debug("Deleting sandbox cgroup") s.Logger().Debugf("Deleting sandbox %s resource controler", s.sandboxController)
if s.state.SandboxCgroupPath == "" { if s.state.SandboxCgroupPath == "" {
s.Logger().Warnf("sandbox cgroup path is empty") s.Logger().Warnf("sandbox %s resource controler path is empty", s.sandboxController)
return nil return nil
} }
sandboxCgroup, err := cgroups.LoadResourceController(s.state.SandboxCgroupPath) sandboxController, err := resCtrl.LoadResourceController(s.state.SandboxCgroupPath)
if err != nil { if err != nil {
return err return err
} }
resCtrlParent := sandboxCgroup.Parent() resCtrlParent := sandboxController.Parent()
if err := sandboxCgroup.MoveTo(resCtrlParent); err != nil { if err := sandboxController.MoveTo(resCtrlParent); err != nil {
return err return err
} }
if err := sandboxCgroup.Delete(); err != nil { if err := sandboxController.Delete(); err != nil {
return err return err
} }
if s.state.OverheadCgroupPath != "" { if s.state.OverheadCgroupPath != "" {
overheadCgroup, err := cgroups.LoadResourceController(s.state.OverheadCgroupPath) overheadController, err := resCtrl.LoadResourceController(s.state.OverheadCgroupPath)
if err != nil { if err != nil {
return err return err
} }
resCtrlParent := overheadCgroup.Parent() resCtrlParent := overheadController.Parent()
if err := s.overheadCgroup.MoveTo(resCtrlParent); err != nil { if err := s.overheadController.MoveTo(resCtrlParent); err != nil {
return err return err
} }
if err := overheadCgroup.Delete(); err != nil { if err := overheadController.Delete(); err != nil {
return err return err
} }
} }
@ -2164,16 +2165,16 @@ func (s *Sandbox) cgroupsDelete() error {
return nil return nil
} }
// constrainHypervisor will place the VMM and vCPU threads into cgroups. // constrainHypervisor will place the VMM and vCPU threads into resource controllers (cgroups on Linux).
func (s *Sandbox) constrainHypervisor(ctx context.Context) error { func (s *Sandbox) constrainHypervisor(ctx context.Context) error {
tids, err := s.hypervisor.GetThreadIDs(ctx) tids, err := s.hypervisor.GetThreadIDs(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err) return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
} }
// All vCPU threads move to the sandbox cgroup. // All vCPU threads move to the sandbox controller.
for _, i := range tids.vcpus { for _, i := range tids.vcpus {
if err := s.sandboxCgroup.AddThread(i); err != nil { if err := s.sandboxController.AddThread(i); err != nil {
return err return err
} }
} }
@ -2181,22 +2182,22 @@ func (s *Sandbox) constrainHypervisor(ctx context.Context) error {
return nil return nil
} }
// setupCgroups adds the runtime process to either the sandbox cgroup or the overhead one, // setupResourceController adds the runtime process to either the sandbox resource controller or the
// depending on the sandbox_cgroup_only configuration setting. // overhead one, depending on the sandbox_cgroup_only configuration setting.
func (s *Sandbox) setupCgroups() error { func (s *Sandbox) setupResourceController() error {
vmmCgroup := s.sandboxCgroup vmmController := s.sandboxController
if s.overheadCgroup != nil { if s.overheadController != nil {
vmmCgroup = s.overheadCgroup vmmController = s.overheadController
} }
// By adding the runtime process to either the sandbox or overhead cgroup, we are making // By adding the runtime process to either the sandbox or overhead controller, we are making
// sure that any child process of the runtime (i.e. *all* processes serving a Kata pod) // sure that any child process of the runtime (i.e. *all* processes serving a Kata pod)
// will initially live in this cgroup. Depending on the sandbox_cgroup settings, we will // will initially live in this controller. Depending on the sandbox_cgroup settings, we will
// then move the vCPU threads between cgroups. // then move the vCPU threads between resource controllers.
runtimePid := os.Getpid() runtimePid := os.Getpid()
// Add the runtime to the VMM sandbox cgroup // Add the runtime to the VMM sandbox resource controller
if err := vmmCgroup.AddProcess(runtimePid); err != nil { if err := vmmController.AddProcess(runtimePid); err != nil {
return fmt.Errorf("Could not add runtime PID %d to sandbox cgroup: %v", runtimePid, err) return fmt.Errorf("Could not add runtime PID %d to the sandbox %s resource controller: %v", runtimePid, s.sandboxController, err)
} }
return nil return nil
@ -2217,8 +2218,8 @@ func (s *Sandbox) GetPatchedOCISpec() *specs.Spec {
} }
// get the container associated with the PodSandbox annotation. In Kubernetes, this // get the container associated with the PodSandbox annotation. In Kubernetes, this
// represents the pause container. In Docker, this is the container. We derive the // represents the pause container. In Docker, this is the container.
// cgroup path from this container. // On Linux, we derive the group path from this container.
for _, cConfig := range s.config.Containers { for _, cConfig := range s.config.Containers {
if cConfig.Annotations[annotations.ContainerTypeKey] == string(PodSandbox) { if cConfig.Annotations[annotations.ContainerTypeKey] == string(PodSandbox) {
return cConfig.CustomSpec return cConfig.CustomSpec

View File

@ -1502,14 +1502,14 @@ func TestSandbox_Cgroups(t *testing.T) {
} }
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
err := tt.s.createCgroups() err := tt.s.createResourceController()
t.Logf("create groups error %v", err) t.Logf("create groups error %v", err)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("Sandbox.CreateCgroups() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("Sandbox.CreateCgroups() error = %v, wantErr %v", err, tt.wantErr)
} }
if err == nil { if err == nil {
if err := tt.s.setupCgroups(); (err != nil) != tt.wantErr { if err := tt.s.setupResourceController(); (err != nil) != tt.wantErr {
t.Errorf("Sandbox.SetupCgroups() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("Sandbox.SetupCgroups() error = %v, wantErr %v", err, tt.wantErr)
} }
} }