mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-02 02:02:24 +00:00
device-manager: refactor device manger
Fixes #50 This commit imports a big logic change: * host device to be attached or appended now is sandbox level resources, one device should bind to sandbox/hypervisor first, then container could reference it via device's unique ID. * attach or detach device should go through the device manager interface instead of the device interface. * allocate device ID in global device mapper to guarantee every device has a uniq device ID and there won't be any ID collision. With this change, there will some changes on data format on disk for sandbox and container, these changes also make a breakage of backward compatibility. New persist data format: * every sandbox will get a new "devices.json" file under "/run/vc/sbs/<sid>/" which saves detailed device information, this also conforms to the concept that device should be sandbox level resource. * every container uses a "devices.json" file but with new data format: ``` [ { "ID": "b80d4736e70a471f", "ContainerPath": "/dev/zero" }, { "ID": "6765a06e0aa0897d", "ContainerPath": "/dev/null" } ] ``` `ID` should reference to a device in a sandbox, `ContainerPath` indicates device path inside a container. Signed-off-by: Zhang Wei <zhangwei555@huawei.com>
This commit is contained in:
parent
c08a26397e
commit
7f5989f06c
@ -20,9 +20,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/manager"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
)
|
||||
|
||||
@ -229,6 +228,14 @@ type SystemMountsInfo struct {
|
||||
DevShmSize uint
|
||||
}
|
||||
|
||||
// ContainerDevice describes a device associated with container
|
||||
type ContainerDevice struct {
|
||||
// ID is device id referencing the device from sandbox's device manager
|
||||
ID string
|
||||
// ContainerPath is device path displayed in container
|
||||
ContainerPath string
|
||||
}
|
||||
|
||||
// Container is composed of a set of containers and a runtime environment.
|
||||
// A Container can be created, deleted, started, stopped, listed, entered, paused and restored.
|
||||
type Container struct {
|
||||
@ -251,7 +258,7 @@ type Container struct {
|
||||
|
||||
mounts []Mount
|
||||
|
||||
devices []api.Device
|
||||
devices []ContainerDevice
|
||||
|
||||
systemMountsInfo SystemMountsInfo
|
||||
}
|
||||
@ -362,7 +369,7 @@ func (c *Container) storeDevices() error {
|
||||
return c.sandbox.storage.storeContainerDevices(c.sandboxID, c.id, c.devices)
|
||||
}
|
||||
|
||||
func (c *Container) fetchDevices() ([]api.Device, error) {
|
||||
func (c *Container) fetchDevices() ([]ContainerDevice, error) {
|
||||
return c.sandbox.storage.fetchContainerDevices(c.sandboxID, c.id)
|
||||
}
|
||||
|
||||
@ -430,29 +437,19 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
|
||||
continue
|
||||
}
|
||||
|
||||
var stat unix.Stat_t
|
||||
if err := unix.Stat(m.Source, &stat); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if mount is a block device file. If it is, the block device will be attached to the host
|
||||
// instead of passing this as a shared mount.
|
||||
if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
|
||||
// TODO: remove dependency of package drivers
|
||||
b := &drivers.BlockDevice{
|
||||
DeviceInfo: &config.DeviceInfo{
|
||||
HostPath: m.Source,
|
||||
ContainerPath: m.Destination,
|
||||
DevType: "b",
|
||||
},
|
||||
}
|
||||
|
||||
if len(m.BlockDeviceID) > 0 {
|
||||
// Attach this block device, all other devices passed in the config have been attached at this point
|
||||
if err := b.Attach(c.sandbox); err != nil {
|
||||
if err := c.sandbox.devManager.AttachDevice(m.BlockDeviceID, c.sandbox); err != nil &&
|
||||
err != manager.ErrDeviceAttached {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.mounts[idx].BlockDevice = b
|
||||
if err := c.sandbox.storeSandboxDevices(); err != nil {
|
||||
//TODO: roll back?
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -565,6 +562,38 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err
|
||||
c.mounts = mounts
|
||||
}
|
||||
|
||||
// iterate all mounts and create block device if it's block based.
|
||||
for i, m := range c.mounts {
|
||||
if len(m.BlockDeviceID) > 0 || m.Type != "bind" {
|
||||
// Non-empty m.BlockDeviceID indicates there's already one device
|
||||
// associated with the mount,so no need to create a new device for it
|
||||
// and we only create block device for bind mount
|
||||
continue
|
||||
}
|
||||
|
||||
var stat unix.Stat_t
|
||||
if err := unix.Stat(m.Source, &stat); err != nil {
|
||||
return nil, fmt.Errorf("stat %q failed: %v", m.Source, err)
|
||||
}
|
||||
|
||||
// Check if mount is a block device file. If it is, the block device will be attached to the host
|
||||
// instead of passing this as a shared mount.
|
||||
if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
|
||||
b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{
|
||||
HostPath: m.Source,
|
||||
ContainerPath: m.Destination,
|
||||
DevType: "b",
|
||||
Major: int64(unix.Major(stat.Rdev)),
|
||||
Minor: int64(unix.Minor(stat.Rdev)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("device manager failed to create new device for %q: %v", m.Source, err)
|
||||
}
|
||||
|
||||
c.mounts[i].BlockDeviceID = b.DeviceID()
|
||||
}
|
||||
}
|
||||
|
||||
// Devices will be found in storage after create stage has completed.
|
||||
// We fetch devices from storage at all other stages.
|
||||
storedDevices, err := c.fetchDevices()
|
||||
@ -578,9 +607,14 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err
|
||||
if err != nil {
|
||||
return &Container{}, err
|
||||
}
|
||||
c.devices = append(c.devices, dev)
|
||||
|
||||
c.devices = append(c.devices, ContainerDevice{
|
||||
ID: dev.DeviceID(),
|
||||
ContainerPath: info.ContainerPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@ -1020,6 +1054,7 @@ func (c *Container) hotplugDrive() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: use general device manager instead of BlockDrive directly
|
||||
// Add drive with id as container id
|
||||
devID := utils.MakeNameID("drive", c.id, maxDevIDSize)
|
||||
drive := config.BlockDrive{
|
||||
@ -1076,18 +1111,30 @@ func (c *Container) removeDrive() (err error) {
|
||||
}
|
||||
|
||||
func (c *Container) attachDevices() error {
|
||||
for _, device := range c.devices {
|
||||
if err := device.Attach(c.sandbox); err != nil {
|
||||
for _, dev := range c.devices {
|
||||
if err := c.sandbox.devManager.AttachDevice(dev.ID, c.sandbox); err != nil {
|
||||
if err == manager.ErrDeviceAttached {
|
||||
// skip if device is already attached before
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.sandbox.storeSandboxDevices(); err != nil {
|
||||
//TODO: roll back?
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) detachDevices() error {
|
||||
for _, device := range c.devices {
|
||||
if err := device.Detach(c.sandbox); err != nil {
|
||||
for _, dev := range c.devices {
|
||||
if err := c.sandbox.devManager.DetachDevice(dev.ID, c.sandbox); err != nil {
|
||||
if err == manager.ErrDeviceNotAttached {
|
||||
// skip if device is already attached before
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +52,9 @@ const (
|
||||
|
||||
// devicesFileType represents a device file type
|
||||
devicesFileType
|
||||
|
||||
// devicesIDFileType saves reference IDs to file, e.g. device IDs
|
||||
devicesIDFileType
|
||||
)
|
||||
|
||||
// configFile is the file name used for every JSON sandbox configuration.
|
||||
@ -126,6 +129,8 @@ type resourceStorage interface {
|
||||
fetchSandboxState(sandboxID string) (State, error)
|
||||
fetchSandboxNetwork(sandboxID string) (NetworkNamespace, error)
|
||||
storeSandboxNetwork(sandboxID string, networkNS NetworkNamespace) error
|
||||
fetchSandboxDevices(sandboxID string) ([]api.Device, error)
|
||||
storeSandboxDevices(sandboxID string, devices []api.Device) error
|
||||
|
||||
// Hypervisor resources
|
||||
fetchHypervisorState(sandboxID string, state interface{}) error
|
||||
@ -144,8 +149,8 @@ type resourceStorage interface {
|
||||
storeContainerProcess(sandboxID, containerID string, process Process) error
|
||||
fetchContainerMounts(sandboxID, containerID string) ([]Mount, error)
|
||||
storeContainerMounts(sandboxID, containerID string, mounts []Mount) error
|
||||
fetchContainerDevices(sandboxID, containerID string) ([]api.Device, error)
|
||||
storeContainerDevices(sandboxID, containerID string, devices []api.Device) error
|
||||
fetchContainerDevices(sandboxID, containerID string) ([]ContainerDevice, error)
|
||||
storeContainerDevices(sandboxID, containerID string, devices []ContainerDevice) error
|
||||
}
|
||||
|
||||
// filesystem is a resourceStorage interface implementation for a local filesystem.
|
||||
@ -227,6 +232,35 @@ type TypedDevice struct {
|
||||
Data json.RawMessage
|
||||
}
|
||||
|
||||
// storeDeviceIDFile is used to marshal and store device IDs to disk.
|
||||
func (fs *filesystem) storeDeviceIDFile(file string, data interface{}) error {
|
||||
if file == "" {
|
||||
return errNeedFile
|
||||
}
|
||||
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
devices, ok := data.([]ContainerDevice)
|
||||
if !ok {
|
||||
return fmt.Errorf("Incorrect data type received, Expected []string")
|
||||
}
|
||||
|
||||
jsonOut, err := json.Marshal(devices)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not marshal devices: %s", err)
|
||||
}
|
||||
|
||||
if _, err := f.Write(jsonOut); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeDeviceFile is used to provide custom marshalling for Device objects.
|
||||
// Device is first marshalled into TypedDevice to include the type
|
||||
// of the Device object.
|
||||
@ -347,7 +381,7 @@ func (fs *filesystem) fetchDeviceFile(fileData []byte, devices *[]api.Device) er
|
||||
func resourceNeedsContainerID(sandboxSpecific bool, resource sandboxResource) bool {
|
||||
|
||||
switch resource {
|
||||
case lockFileType, networkFileType, hypervisorFileType, agentFileType:
|
||||
case lockFileType, networkFileType, hypervisorFileType, agentFileType, devicesFileType:
|
||||
// sandbox-specific resources
|
||||
return false
|
||||
default:
|
||||
@ -370,7 +404,7 @@ func resourceDir(sandboxSpecific bool, sandboxID, containerID string, resource s
|
||||
case configFileType:
|
||||
path = configStoragePath
|
||||
break
|
||||
case stateFileType, networkFileType, processFileType, lockFileType, mountsFileType, devicesFileType, hypervisorFileType, agentFileType:
|
||||
case stateFileType, networkFileType, processFileType, lockFileType, mountsFileType, devicesFileType, devicesIDFileType, hypervisorFileType, agentFileType:
|
||||
path = runStoragePath
|
||||
break
|
||||
default:
|
||||
@ -421,6 +455,9 @@ func (fs *filesystem) resourceURI(sandboxSpecific bool, sandboxID, containerID s
|
||||
case devicesFileType:
|
||||
filename = devicesFile
|
||||
break
|
||||
case devicesIDFileType:
|
||||
filename = devicesFile
|
||||
break
|
||||
default:
|
||||
return "", "", errInvalidResource
|
||||
}
|
||||
@ -466,6 +503,7 @@ func (fs *filesystem) commonResourceChecks(sandboxSpecific bool, sandboxID, cont
|
||||
case processFileType:
|
||||
case mountsFileType:
|
||||
case devicesFileType:
|
||||
case devicesIDFileType:
|
||||
default:
|
||||
return errInvalidResource
|
||||
}
|
||||
@ -552,6 +590,19 @@ func (fs *filesystem) storeDeviceResource(sandboxSpecific bool, sandboxID, conta
|
||||
return fs.storeDeviceFile(devicesFile, file)
|
||||
}
|
||||
|
||||
func (fs *filesystem) storeDevicesIDResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error {
|
||||
if resource != devicesIDFileType {
|
||||
return errInvalidResource
|
||||
}
|
||||
|
||||
devicesFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.storeDeviceIDFile(devicesFile, file)
|
||||
}
|
||||
|
||||
func (fs *filesystem) storeResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, data interface{}) error {
|
||||
if err := fs.commonResourceChecks(sandboxSpecific, sandboxID, containerID, resource); err != nil {
|
||||
return err
|
||||
@ -575,6 +626,8 @@ func (fs *filesystem) storeResource(sandboxSpecific bool, sandboxID, containerID
|
||||
|
||||
case []api.Device:
|
||||
return fs.storeDeviceResource(sandboxSpecific, sandboxID, containerID, resource, file)
|
||||
case []ContainerDevice:
|
||||
return fs.storeDevicesIDResource(sandboxSpecific, sandboxID, containerID, resource, file)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Invalid resource data type")
|
||||
@ -628,6 +681,18 @@ func (fs *filesystem) fetchSandboxNetwork(sandboxID string) (NetworkNamespace, e
|
||||
return networkNS, nil
|
||||
}
|
||||
|
||||
func (fs *filesystem) fetchSandboxDevices(sandboxID string) ([]api.Device, error) {
|
||||
var devices []api.Device
|
||||
if err := fs.fetchResource(true, sandboxID, "", devicesFileType, &devices); err != nil {
|
||||
return []api.Device{}, err
|
||||
}
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (fs *filesystem) storeSandboxDevices(sandboxID string, devices []api.Device) error {
|
||||
return fs.storeSandboxResource(sandboxID, devicesFileType, devices)
|
||||
}
|
||||
|
||||
func (fs *filesystem) fetchHypervisorState(sandboxID string, state interface{}) error {
|
||||
return fs.fetchResource(true, sandboxID, "", hypervisorFileType, state)
|
||||
}
|
||||
@ -734,11 +799,11 @@ func (fs *filesystem) fetchContainerMounts(sandboxID, containerID string) ([]Mou
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func (fs *filesystem) fetchContainerDevices(sandboxID, containerID string) ([]api.Device, error) {
|
||||
var devices []api.Device
|
||||
func (fs *filesystem) fetchContainerDevices(sandboxID, containerID string) ([]ContainerDevice, error) {
|
||||
var devices []ContainerDevice
|
||||
|
||||
if err := fs.fetchResource(false, sandboxID, containerID, devicesFileType, &devices); err != nil {
|
||||
return []api.Device{}, err
|
||||
if err := fs.fetchResource(false, sandboxID, containerID, devicesIDFileType, &devices); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
@ -748,8 +813,8 @@ func (fs *filesystem) storeContainerMounts(sandboxID, containerID string, mounts
|
||||
return fs.storeContainerResource(sandboxID, containerID, mountsFileType, mounts)
|
||||
}
|
||||
|
||||
func (fs *filesystem) storeContainerDevices(sandboxID, containerID string, devices []api.Device) error {
|
||||
return fs.storeContainerResource(sandboxID, containerID, devicesFileType, devices)
|
||||
func (fs *filesystem) storeContainerDevices(sandboxID, containerID string, devices []ContainerDevice) error {
|
||||
return fs.storeContainerResource(sandboxID, containerID, devicesIDFileType, devices)
|
||||
}
|
||||
|
||||
func (fs *filesystem) deleteContainerResources(sandboxID, containerID string, resources []sandboxResource) error {
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
|
||||
proxyClient "github.com/clearcontainers/proxy/client"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
|
||||
ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
@ -235,20 +234,19 @@ func fsMapFromMounts(mounts []Mount) []*hyperstart.FsmapDescriptor {
|
||||
func fsMapFromDevices(c *Container) ([]*hyperstart.FsmapDescriptor, error) {
|
||||
var fsmap []*hyperstart.FsmapDescriptor
|
||||
for _, dev := range c.devices {
|
||||
device := c.sandbox.devManager.GetDeviceByID(dev.DeviceID())
|
||||
device := c.sandbox.devManager.GetDeviceByID(dev.ID)
|
||||
if device == nil {
|
||||
return nil, fmt.Errorf("can't find device: %#v", dev)
|
||||
}
|
||||
blockDev := device.(*drivers.BlockDevice)
|
||||
|
||||
d, ok := blockDev.GetDeviceDrive().(*config.BlockDrive)
|
||||
d, ok := device.GetDeviceDrive().(*config.BlockDrive)
|
||||
if !ok || d == nil {
|
||||
return nil, fmt.Errorf("can't retrieve block device information")
|
||||
}
|
||||
|
||||
fsmapDesc := &hyperstart.FsmapDescriptor{
|
||||
Source: d.VirtPath,
|
||||
Path: blockDev.DeviceInfo.ContainerPath,
|
||||
Path: dev.ContainerPath,
|
||||
AbsolutePath: true,
|
||||
DockerVolume: false,
|
||||
SCSIAddr: d.SCSIAddr,
|
||||
@ -464,8 +462,8 @@ func (h *hyper) stopSandbox(sandbox *Sandbox) error {
|
||||
// container.
|
||||
func (h *hyper) handleBlockVolumes(c *Container) {
|
||||
for _, m := range c.mounts {
|
||||
if m.BlockDevice != nil {
|
||||
c.devices = append(c.devices, m.BlockDevice)
|
||||
if len(m.BlockDeviceID) > 0 {
|
||||
c.devices = append(c.devices, ContainerDevice{ID: m.BlockDeviceID})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,8 +19,7 @@ import (
|
||||
|
||||
kataclient "github.com/kata-containers/agent/protocols/client"
|
||||
"github.com/kata-containers/agent/protocols/grpc"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
||||
ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
@ -722,23 +721,34 @@ func (k *kataAgent) handleShm(grpcSpec *grpc.Spec, sandbox *Sandbox) {
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kataAgent) appendDevices(deviceList []*grpc.Device, devices []api.Device) []*grpc.Device {
|
||||
for _, device := range devices {
|
||||
d, ok := device.(*drivers.BlockDevice)
|
||||
if !ok {
|
||||
func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*grpc.Device {
|
||||
for _, dev := range c.devices {
|
||||
device := c.sandbox.devManager.GetDeviceByID(dev.ID)
|
||||
if device == nil {
|
||||
k.Logger().WithField("device", dev.ID).Error("failed to find device by id")
|
||||
return nil
|
||||
}
|
||||
|
||||
if device.DeviceType() != config.DeviceBlock {
|
||||
continue
|
||||
}
|
||||
|
||||
d, ok := device.GetDeviceDrive().(*config.BlockDrive)
|
||||
if !ok || d == nil {
|
||||
k.Logger().WithField("device", device).Error("malformed block drive")
|
||||
continue
|
||||
}
|
||||
|
||||
kataDevice := &grpc.Device{
|
||||
ContainerPath: d.DeviceInfo.ContainerPath,
|
||||
ContainerPath: dev.ContainerPath,
|
||||
}
|
||||
|
||||
if d.BlockDrive.SCSIAddr == "" {
|
||||
if d.SCSIAddr == "" {
|
||||
kataDevice.Type = kataBlkDevType
|
||||
kataDevice.Id = d.BlockDrive.PCIAddr
|
||||
kataDevice.Id = d.PCIAddr
|
||||
} else {
|
||||
kataDevice.Type = kataSCSIDevType
|
||||
kataDevice.Id = d.BlockDrive.SCSIAddr
|
||||
kataDevice.Id = d.SCSIAddr
|
||||
}
|
||||
|
||||
deviceList = append(deviceList, kataDevice)
|
||||
@ -867,7 +877,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
}
|
||||
|
||||
// Append container devices for block devices passed with --device.
|
||||
ctrDevices = k.appendDevices(ctrDevices, c.devices)
|
||||
ctrDevices = k.appendDevices(ctrDevices, c)
|
||||
|
||||
// Handle all the volumes that are block device files.
|
||||
// Note this call modifies the list of container devices to make sure
|
||||
@ -956,27 +966,41 @@ func (k *kataAgent) handleBlockVolumes(c *Container) []*grpc.Storage {
|
||||
var volumeStorages []*grpc.Storage
|
||||
|
||||
for _, m := range c.mounts {
|
||||
b := m.BlockDevice
|
||||
id := m.BlockDeviceID
|
||||
|
||||
if b == nil {
|
||||
if len(id) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the block device to the list of container devices, to make sure the
|
||||
// device is detached with detachDevices() for a container.
|
||||
c.devices = append(c.devices, b)
|
||||
c.devices = append(c.devices, ContainerDevice{ID: id})
|
||||
if err := c.storeDevices(); err != nil {
|
||||
k.Logger().WithField("device", id).WithError(err).Error("store device failed")
|
||||
return nil
|
||||
}
|
||||
|
||||
vol := &grpc.Storage{}
|
||||
|
||||
device := c.sandbox.devManager.GetDeviceByID(id)
|
||||
if device == nil {
|
||||
k.Logger().WithField("device", id).Error("failed to find device by id")
|
||||
return nil
|
||||
}
|
||||
blockDrive, ok := device.GetDeviceDrive().(*config.BlockDrive)
|
||||
if !ok || blockDrive == nil {
|
||||
k.Logger().Error("malformed block drive")
|
||||
continue
|
||||
}
|
||||
if c.sandbox.config.HypervisorConfig.BlockDeviceDriver == VirtioBlock {
|
||||
vol.Driver = kataBlkDevType
|
||||
vol.Source = b.BlockDrive.VirtPath
|
||||
vol.Source = blockDrive.VirtPath
|
||||
} else {
|
||||
vol.Driver = kataSCSIDevType
|
||||
vol.Source = b.BlockDrive.SCSIAddr
|
||||
vol.Source = blockDrive.SCSIAddr
|
||||
}
|
||||
|
||||
vol.MountPoint = b.DeviceInfo.ContainerPath
|
||||
vol.MountPoint = m.Destination
|
||||
vol.Fstype = "bind"
|
||||
vol.Options = []string{"bind"}
|
||||
|
||||
|
@ -14,8 +14,6 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
)
|
||||
|
||||
// DefaultShmSize is the default shm size to be used in case host
|
||||
@ -284,10 +282,10 @@ type Mount struct {
|
||||
// ReadOnly specifies if the mount should be read only or not
|
||||
ReadOnly bool
|
||||
|
||||
// BlockDevice represents block device that is attached to the
|
||||
// BlockDeviceID represents block device that is attached to the
|
||||
// VM in case this mount is a block device file or a directory
|
||||
// backed by a block device.
|
||||
BlockDevice *drivers.BlockDevice
|
||||
BlockDeviceID string
|
||||
}
|
||||
|
||||
func bindUnmountContainerRootfs(sharedDir, sandboxID, cID string) error {
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"os"
|
||||
|
||||
govmmQemu "github.com/intel/govmm/qemu"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
deviceConfig "github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -134,7 +134,7 @@ func (q *qemuPPC64le) appendImage(devices []govmmQemu.Device, path string) ([]go
|
||||
|
||||
id := utils.MakeNameID("image", hex.EncodeToString(randBytes), maxDevIDSize)
|
||||
|
||||
drive := drivers.Drive{
|
||||
drive := deviceConfig.BlockDrive{
|
||||
File: path,
|
||||
Format: "raw",
|
||||
ID: id,
|
||||
|
@ -713,6 +713,12 @@ func createSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, erro
|
||||
s.networkNS = networkNS
|
||||
}
|
||||
|
||||
devices, err := s.storage.fetchSandboxDevices(s.id)
|
||||
if err != nil {
|
||||
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("fetch sandbox device failed")
|
||||
}
|
||||
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices)
|
||||
|
||||
// We first try to fetch the sandbox state from storage.
|
||||
// If it exists, this means this is a re-creation, i.e.
|
||||
// we don't need to talk to the guest's agent, but only
|
||||
@ -758,7 +764,6 @@ func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error)
|
||||
storage: &filesystem{},
|
||||
network: network,
|
||||
config: &sandboxConfig,
|
||||
devManager: deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil),
|
||||
volumes: sandboxConfig.Volumes,
|
||||
containers: map[string]*Container{},
|
||||
runPath: filepath.Join(runStoragePath, sandboxConfig.ID),
|
||||
@ -808,6 +813,10 @@ func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) storeSandboxDevices() error {
|
||||
return s.storage.storeSandboxDevices(s.id, s.devManager.GetAllDevices())
|
||||
}
|
||||
|
||||
// storeSandbox stores a sandbox config.
|
||||
func (s *Sandbox) storeSandbox() error {
|
||||
err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config))
|
||||
|
Loading…
Reference in New Issue
Block a user